content stringlengths 5 1.05M |
|---|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from distancematrix.util import diag_length
from distancematrix.util import diag_indices
from distancematrix.util import diag_indices_of
from distancematrix.util import cut_indices_of
from distancematrix.util import shortest_path_distances
from distancematrix.util import shortest_path
from distancematrix.util import sliding_min
from distancematrix.util import sliding_max
from distancematrix.util import sliding_window_view
class TestUtil(TestCase):
def test_diag_length_square_matrix(self):
self.assertEqual(diag_length(5, 5, 0), 5)
self.assertEqual(diag_length(5, 5, 1), 4)
self.assertEqual(diag_length(5, 5, -2), 3)
self.assertEqual(diag_length(5, 5, 4), 1)
self.assertEqual(diag_length(5, 5, 5), 0)
self.assertEqual(diag_length(5, 5, 6), 0)
def test_diag_length_rect_matrix(self):
self.assertEqual(diag_length(5, 3, 0), 3)
self.assertEqual(diag_length(5, 3, 1), 2)
self.assertEqual(diag_length(5, 3, 2), 1)
self.assertEqual(diag_length(5, 3, 3), 0)
self.assertEqual(diag_length(5, 3, 4), 0)
self.assertEqual(diag_length(5, 3, -1), 3)
self.assertEqual(diag_length(5, 3, -2), 3)
self.assertEqual(diag_length(5, 3, -3), 2)
self.assertEqual(diag_length(5, 3, -4), 1)
self.assertEqual(diag_length(5, 3, -5), 0)
self.assertEqual(diag_length(5, 3, -6), 0)
self.assertEqual(diag_length(3, 5, 0), 3)
self.assertEqual(diag_length(3, 5, 1), 3)
self.assertEqual(diag_length(3, 5, 2), 3)
self.assertEqual(diag_length(3, 5, 3), 2)
self.assertEqual(diag_length(3, 5, 4), 1)
self.assertEqual(diag_length(3, 5, 5), 0)
self.assertEqual(diag_length(3, 5, 6), 0)
self.assertEqual(diag_length(3, 5, -1), 2)
self.assertEqual(diag_length(3, 5, -2), 1)
self.assertEqual(diag_length(3, 5, -3), 0)
self.assertEqual(diag_length(3, 5, -4), 0)
def test_diag_indices_square(self):
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
npt.assert_equal(data[diag_indices(3, 3, -3)], [])
npt.assert_equal(data[diag_indices(3, 3, -2)], [7])
npt.assert_equal(data[diag_indices(3, 3, -1)], [4, 8])
npt.assert_equal(data[diag_indices(3, 3, 0)], [1, 5, 9])
npt.assert_equal(data[diag_indices(3, 3, 1)], [2, 6])
npt.assert_equal(data[diag_indices(3, 3, 2)], [3])
npt.assert_equal(data[diag_indices(3, 3, 3)], [])
def test_diag_indices_rect(self):
data = np.array([[1, 2, 3], [4, 5, 6]])
npt.assert_equal(data[diag_indices(2, 3, -2)], [])
npt.assert_equal(data[diag_indices(2, 3, -1)], [4])
npt.assert_equal(data[diag_indices(2, 3, 0)], [1, 5])
npt.assert_equal(data[diag_indices(2, 3, 1)], [2, 6])
npt.assert_equal(data[diag_indices(2, 3, 2)], [3])
npt.assert_equal(data[diag_indices(2, 3, 3)], [])
def test_diag_indices_of_rect(self):
data = np.array([[1, 2, 3], [4, 5, 6]])
npt.assert_equal(data[diag_indices_of(data, -2)], [])
npt.assert_equal(data[diag_indices_of(data, -1)], [4])
npt.assert_equal(data[diag_indices_of(data, 0)], [1, 5])
npt.assert_equal(data[diag_indices_of(data, 1)], [2, 6])
npt.assert_equal(data[diag_indices_of(data, 2)], [3])
npt.assert_equal(data[diag_indices_of(data, 3)], [])
def test_cut_indices_of(self):
data = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]
])
npt.assert_equal(data[cut_indices_of(data, 0)], [1])
npt.assert_equal(data[cut_indices_of(data, 1)], [4, 2])
npt.assert_equal(data[cut_indices_of(data, 2)], [7, 5, 3])
npt.assert_equal(data[cut_indices_of(data, 3)], [10, 8, 6])
npt.assert_equal(data[cut_indices_of(data, 4)], [11, 9])
npt.assert_equal(data[cut_indices_of(data, 5)], [12])
npt.assert_equal(data[cut_indices_of(data, 6)], [])
data = np.array([
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]
])
npt.assert_equal(data[cut_indices_of(data, 0)], [0])
npt.assert_equal(data[cut_indices_of(data, 1)], [5, 1])
npt.assert_equal(data[cut_indices_of(data, 2)], [6, 2])
npt.assert_equal(data[cut_indices_of(data, 3)], [7, 3])
npt.assert_equal(data[cut_indices_of(data, 4)], [8, 4])
npt.assert_equal(data[cut_indices_of(data, 5)], [9])
def test_shortest_path_distances(self):
data = np.array([
[1, 2, 1, 0, 3],
[1, 3, 0, 1, 1],
[0, 1, 1, 4, 0],
[2, 5, 5, 2, 2],
[0, 1, 2, 3, 9]
], dtype=float)
expected = np.array([
[1, 3, 4, 4, 7],
[2, 4, 3, 4, 5],
[2, 3, 4, 7, 4],
[4, 7, 8, 6, 6],
[4, 5, 7, 9, 15]
], dtype=float)
result = shortest_path_distances(data)
npt.assert_equal(result, expected)
result = shortest_path_distances(data[:3, :])
npt.assert_equal(result, expected[:3, :])
result = shortest_path_distances(data[:, :3])
npt.assert_equal(result, expected[:, :3])
def test_shortest_path(self):
data = np.array([
[1, 2, 1, 0, 3],
[1, 3, 3, 1, 1],
[4, 3, 8, 4, 0],
[2, 2, 5, 2, 5],
[0, 1, 1, 3, 2],
[0, 1, 1, 5, 9]
], dtype=float)
result = shortest_path(data)
npt.assert_equal(result, [[0, 0], [1, 0], [2, 1], [3, 1], [4, 2], [4, 3], [5, 4]])
def test_sliding_min(self):
data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
npt.assert_equal(
sliding_min(data, 3),
[1, 2, 3, 4, 5, 6]
)
data = np.array([8, 7, 6, 5, 4, 3, 2, 1])
npt.assert_equal(
sliding_min(data, 3),
[6, 5, 4, 3, 2, 1]
)
data = np.array([8, 3, 4, 0, 6, 1, 1, 1, 2, 7, 6, 4, 3, 4])
npt.assert_equal(
sliding_min(data, 3),
[3, 0, 0, 0, 1, 1, 1, 1, 2, 4, 3, 3]
)
def test_sliding_max(self):
data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
npt.assert_equal(
sliding_max(data, 3),
[3, 4, 5, 6, 7, 8]
)
data = np.array([8, 7, 6, 5, 4, 3, 2, 1])
npt.assert_equal(
sliding_max(data, 3),
[8, 7, 6, 5, 4, 3]
)
data = np.array([8, 3, 4, 0, 6, 1, 1, 1, 2, 7, 6, 4, 3, 4])
npt.assert_equal(
sliding_max(data, 3),
[8, 4, 6, 6, 6, 1, 2, 7, 7, 7, 6, 4]
)
def test_sliding_window_view(self):
data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
npt.assert_equal(
sliding_window_view(data, [3]),
[[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8]]
)
npt.assert_equal(
sliding_window_view(data, [3], step=[2]),
[[1, 2, 3], [3, 4, 5], [5, 6, 7]]
)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
npt.assert_equal(
sliding_window_view(data, [2, 2]),
[[[[1, 2], [4, 5]], [[2, 3], [5, 6]]], [[[4, 5], [7, 8]], [[5, 6], [8, 9]]]]
)
npt.assert_equal(
sliding_window_view(data, [1, 3], step=[2, 1]),
[[[[1, 2, 3]]], [[[7, 8, 9]]]]
)
|
# -*- coding: utf-8 -*-
from typing import Optional, List, Dict, Any
from overrides import overrides
from docker.models.volumes import Volume
from recc.container.interfaces.container_volume import ContainerVolume
from recc.container.struct.volume_info import VolumeInfo
from recc.container.docker.mixin.docker_base import DockerBase
def _create_volume_info(volume: Volume) -> VolumeInfo:
key = volume.id
name = volume.name
labels = volume.attrs["Labels"]
if labels is None:
labels = dict()
# created_at = volume.attrs["CreatedAt"]
# driver = volume.attrs["Driver"]
# mount_point = volume.attrs["Mountpoint"]
# options = volume.attrs["Options"]
# scope = volume.attrs["Scope"]
assert key is not None
assert name is not None
assert labels is not None
return VolumeInfo(key, name, labels)
class DockerVolume(ContainerVolume, DockerBase):
@overrides
async def volumes(
self, filters: Optional[Dict[str, Any]] = None, **kwargs
) -> List[VolumeInfo]:
updated_kwargs = dict()
updated_kwargs.update(kwargs)
if filters:
updated_kwargs["filters"] = filters
volumes = self.docker.volumes.list(**updated_kwargs)
return [_create_volume_info(v) for v in volumes]
def _get_volume(self, key: str) -> Volume:
return self.docker.volumes.get(key)
@overrides
async def exist_volume(self, key: str) -> bool:
try:
return self._get_volume(key) is not None
except: # noqa
return False
@overrides
async def create_volume(self, name: str, **kwargs) -> VolumeInfo:
volume = self.docker.volumes.create(name, **kwargs)
return _create_volume_info(volume)
@overrides
async def remove_volume(self, key: str, force=False) -> None:
self._get_volume(key).remove(force)
|
#Getting Input
strInput = input('Enter a string: ')
if strInput == "":
#Check if string is empty
print('The string is empty.')
else:
#Uppercase function
print(strInput.upper()) |
import os
import GPUtil
# If you need one GPU, I will pick it here for you
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
gpu = [str(g) for g in GPUtil.getAvailable(maxMemory=0.2)]
assert len(gpu) > 0, 'No available GPUs'
print('Using GPU', ','.join(gpu))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpu)
import argparse
import copy
from docopt import docopt
import functools
import json
import logging
import math
import numpy as np
import pprint
import psutil
import random
import runpy
from sacred.arg_parser import get_config_updates
from sacred import Experiment
import subprocess
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.transforms as transforms
import torchvision.utils as tvutils
import torch.nn.functional as F
import torchsummary
from tqdm import tqdm as tqdm
from multiprocessing.pool import ThreadPool
import threading
import warnings
from tlkit.data.synset import synset_arr
from tlkit.models.ewc import EWC
from tlkit.models.student_models import FCN3, FCN4, FCN5, FCN8
from tlkit.models.lifelong_framework import load_submodule
from tlkit.logging_helpers import log, log_image, reset_log, add_classification_specific_logging, get_logger, write_logs
from tlkit.utils import update, var_to_numpy, index_to_image, load_state_dict_from_path
import tlkit.utils
import tlkit.data.datasets.taskonomy_dataset as taskonomy_dataset
import tlkit.data.datasets.fashion_mnist_dataset as fashion_mnist_dataset
import tlkit.data.datasets.imagenet_dataset as imagenet_dataset
import tlkit.data.datasets.icifar_dataset as icifar_dataset
import tlkit.data.splits as splits
from tlkit.utils import LIST_OF_TASKS, TASKS_TO_CHANNELS, SINGLE_IMAGE_TASKS
from evkit.saving.observers import FileStorageObserverWithExUuid
import evkit.saving.checkpoints as checkpoints
from evkit.utils.profiler import Profiler
from evkit.utils.random import set_seed
from evkit.utils.misc import cfg_to_md, count_trainable_parameters, count_total_parameters, search_and_replace_dict
from evkit.utils.parallel import _CustomDataParallel
from evkit.utils.losses import heteroscedastic_double_exponential, heteroscedastic_normal, weighted_mse_loss, softmax_cross_entropy, weighted_l1_loss, perceptual_l1_loss, perceptual_l2_loss, perceptual_cross_entropy_loss, identity_regularizer, transfer_regularizer, perceptual_regularizer, dense_cross_entropy, dense_softmax_cross_entropy, weighted_l2_loss
from evkit.utils.viz.core import pack_images, imagenet_unnormalize
from evkit.models.taskonomy_network import TaskonomyEncoder, TaskonomyDecoder, TaskonomyNetwork
from evkit.models.unet import UNet, UNetHeteroscedasticFull, UNetHeteroscedasticIndep, UNetHeteroscedasticPooled
from tlkit.models.student_models import FCN4Reshaped
from tlkit.models.resnet_cifar import ResnetiCifar44
from tlkit.models.sidetune_architecture import GenericSidetuneNetwork, TransferConv3, PreTransferedDecoder
from tlkit.models.models_additional import BoostedNetwork, ConstantModel
from tlkit.models.lifelong_framework import LifelongSidetuneNetwork
import tnt.torchnet as tnt
from tnt.torchnet.logger import FileLogger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
ex = Experiment(name="Train Lifelong Learning agent")
LOG_DIR = sys.argv[1]
sys.argv.pop(1)
runpy.run_module('configs.vision_lifelong', init_globals=globals())
runpy.run_module('configs.icifar_cfg', init_globals=globals())
runpy.run_module('configs.seq_taskonomy_cfg', init_globals=globals())
runpy.run_module('configs.seq_taskonomy_cfg_extra', init_globals=globals())
runpy.run_module('configs.shared', init_globals=globals())
@ex.command
def prologue(cfg, uuid):
os.makedirs(LOG_DIR, exist_ok=True)
assert not (cfg['saving']['obliterate_logs'] and cfg['training']['resume_training']), 'Cannot obliterate logs and resume training'
if cfg['saving']['obliterate_logs']:
assert LOG_DIR, 'LOG_DIR cannot be empty'
subprocess.call(f'rm -rf {LOG_DIR}', shell=True)
if cfg['training']['resume_training']:
checkpoints.archive_current_run(LOG_DIR, uuid)
@ex.main
def train(cfg, uuid):
set_seed(cfg['training']['seed'])
############################################################
# Logger
############################################################
logger.setLevel(logging.INFO)
logger.info(pprint.pformat(cfg))
logger.debug(f'Loaded Torch version: {torch.__version__}')
logger.debug(f'Using device: {device}')
logger.info(f"Training following tasks: ")
for i, (s, t) in enumerate(zip(cfg['training']['sources'], cfg['training']['targets'])):
logger.info(f"\tTask {i}: {s} -> {t}")
logger.debug(f'Starting data loaders')
############################################################
# Model (and possibly resume from checkpoint)
############################################################
logger.debug(f'Setting up model')
search_and_replace_dict(cfg['learner']['model_kwargs'], cfg['training']['targets'][0][0]) # switches to the proper pretrained encoder
model = eval(cfg['learner']['model'])(**cfg['learner']['model_kwargs'])
logger.info(f"Created model. Number of trainable parameters: {count_trainable_parameters(model)}. Number of total parameters: {count_total_parameters(model)}")
try:
logger.info(f"Number of trainable transfer parameters: {count_trainable_parameters(model.transfers)}. Number of total transfer parameters: {count_total_parameters(model.transfers)}")
if isinstance(model.encoder, nn.Module):
logger.info(f"Number of trainable encoder parameters: {count_trainable_parameters(model.base)}. Number of total encoder parameters: {count_total_parameters(model.base)}")
if isinstance(model.side_networks, nn.Module):
logger.info(f"Number of trainable side parameters: {count_trainable_parameters(model.sides)}. Number of total side parameters: {count_total_parameters(model.sides)}")
if isinstance(model.merge_operators, nn.Module):
logger.info(f"Number of trainable merge (alpha) parameters: {count_trainable_parameters(model.merge_operators)}. Number of total merge (alpha) parameters: {count_total_parameters(model.merge_operators)}")
except:
pass
ckpt_fpath = cfg['training']['resume_from_checkpoint_path']
loaded_optimizer = None
start_epoch = 0
if ckpt_fpath is not None and not cfg['training']['resume_training']:
warnings.warn('Checkpoint path provided but resume_training is set to False, are you sure??')
if ckpt_fpath is not None and cfg['training']['resume_training']:
if not os.path.exists(ckpt_fpath):
logger.warning(f'Trying to resume training, but checkpoint path {ckpt_fpath} does not exist. Starting training from beginning...')
else:
model, checkpoint = load_state_dict_from_path(model, ckpt_fpath)
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
logger.info(f"Loaded model (epoch {start_epoch if 'epoch' in checkpoint else 'unknown'}) from {ckpt_fpath}")
if 'optimizer' in checkpoint:
loaded_optimizer = checkpoint['optimizer']
else:
warnings.warn('No optimizer in checkpoint, are you sure?')
try: # we do not use state_dict, do not let it take up precious CUDA memory
del checkpoint['state_dict']
except KeyError:
pass
model.to(device)
if torch.cuda.device_count() > 1:
logger.info(f"Using {torch.cuda.device_count()} GPUs!")
assert cfg['learner']['model'] != 'ConstantModel', 'ConstantModel (blind) does not operate with multiple devices'
model = nn.DataParallel(model, range(torch.cuda.device_count()))
model.to(device)
############################################################
# Data Loading
############################################################
for key in ['sources', 'targets', 'masks']:
cfg['training']['dataloader_fn_kwargs'][key] = cfg['training'][key]
dataloaders = eval(cfg['training']['dataloader_fn'])(**cfg['training']['dataloader_fn_kwargs'])
if cfg['training']['resume_training']:
if 'curr_iter_idx' in checkpoint and checkpoint['curr_iter_idx'] == -1:
warnings.warn(f'curr_iter_idx is -1, Guessing curr_iter_idx to be start_epoch {start_epoch}')
dataloaders['train'].start_dl = start_epoch
elif 'curr_iter_idx' in checkpoint:
logger.info(f"Starting dataloader at {checkpoint['curr_iter_idx']}")
dataloaders['train'].start_dl = checkpoint['curr_iter_idx']
else:
warnings.warn(f'Guessing curr_iter_idx to be start_epoch {start_epoch}')
dataloaders['train'].start_dl = start_epoch
############################################################
# Loss Functions
############################################################
loss_fn_lst = cfg['training']['loss_fn']
loss_kwargs_lst = cfg['training']['loss_kwargs']
if not isinstance(loss_fn_lst, list):
loss_fn_lst = [ loss_fn_lst ]
loss_kwargs_lst = [ loss_kwargs_lst ]
elif isinstance(loss_kwargs_lst, dict):
loss_kwargs_lst = [loss_kwargs_lst for _ in range(len(loss_fn_lst))]
loss_fns = []
assert len(loss_fn_lst) == len(loss_kwargs_lst), 'number of loss fn/kwargs not the same'
for loss_fn, loss_kwargs in zip(loss_fn_lst, loss_kwargs_lst):
if loss_fn == 'perceptual_l1':
loss_fn = perceptual_l1_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
elif loss_fn == 'perceptual_l2':
loss_fn = perceptual_l2_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
elif loss_fn == 'perceptual_cross_entropy':
loss_fn = perceptual_cross_entropy_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
else:
loss_fn = functools.partial(eval(loss_fn), **loss_kwargs)
loss_fns.append(loss_fn)
if len(loss_fns) == 1 and len(cfg['training']['sources']) > 1:
loss_fns = [loss_fns[0] for _ in range(len(cfg['training']['sources']))]
if 'regularizer_fn' in cfg['training'] and cfg['training']['regularizer_fn'] is not None:
assert torch.cuda.device_count() <= 1, 'Regularization does not support multi GPU, unable to access model attributes from DataParallel wrapper'
bare_model = model.module if torch.cuda.device_count() > 1 else model
loss_fns = [eval(cfg['training']['regularizer_fn'])(loss_fn=loss_fn, model=bare_model, **cfg['training']['regularizer_kwargs']) for loss_fn in loss_fns]
############################################################
# More Logging
############################################################
flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True)
mlog = get_logger(cfg, uuid)
mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text')
mlog.update_meter(cfg_to_md(cfg, uuid), meters={'config'}, phase='train')
for task, _ in enumerate(cfg['training']['targets']):
mlog.add_meter(f'alpha/task_{task}', tnt.meter.ValueSummaryMeter())
mlog.add_meter(f'output/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image')
mlog.add_meter(f'input/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image')
mlog.add_meter('weight_histogram/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='histogram')
for loss in cfg['training']['loss_list']:
mlog.add_meter(f'losses/{loss}_{task}', tnt.meter.ValueSummaryMeter())
if cfg['training']['task_is_classification'][task] :
mlog.add_meter(f'accuracy_top1/task_{task}', tnt.meter.ClassErrorMeter(topk=[1], accuracy=True))
mlog.add_meter(f'accuracy_top5/task_{task}', tnt.meter.ClassErrorMeter(topk=[5], accuracy=True))
mlog.add_meter(f'perplexity_pred/task_{task}', tnt.meter.ValueSummaryMeter())
mlog.add_meter(f'perplexity_label/task_{task}', tnt.meter.ValueSummaryMeter())
############################################################
# Training
############################################################
try:
if cfg['training']['train']:
# Optimizer
if cfg['training']['resume_training'] and loaded_optimizer is not None:
optimizer = loaded_optimizer
else:
optimizer = eval(cfg['learner']['optimizer_class'])(
[
{'params': [param for name, param in model.named_parameters() if 'merge_operator' in name or 'context' in name or 'alpha' in name], 'weight_decay': 0.0},
{'params': [param for name, param in model.named_parameters() if 'merge_operator' not in name and 'context' not in name and 'alpha' not in name]},
],
lr=cfg['learner']['lr'], **cfg['learner']['optimizer_kwargs']
)
# Scheduler
scheduler = None
if cfg['learner']['lr_scheduler_method'] is not None:
scheduler = eval(cfg['learner']['lr_scheduler_method'])(optimizer, **cfg['learner']['lr_scheduler_method_kwargs'])
model.start_training() # For PSP variant
# Mixed precision training
if cfg['training']['amp']:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
logger.info("Starting training...")
context = train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=start_epoch,
num_epochs=cfg['training']['num_epochs'], save_epochs=cfg['saving']['save_interval'],
scheduler=scheduler, mlog=mlog, flog=flog)
finally:
print(psutil.virtual_memory())
GPUtil.showUtilization(all=True)
####################
# Final Test
####################
if cfg['training']['test']:
run_kwargs = {
'cfg': cfg,
'mlog': mlog,
'flog': flog,
'optimizer': None,
'loss_fns': loss_fns,
'model': model,
'use_thread': cfg['saving']['in_background'],
}
context, _ = run_one_epoch(dataloader=dataloaders['val'], epoch=0, train=False, **run_kwargs)
logger.info('Waiting up to 10 minutes for all files to save...')
mlog.flush()
[c.join(600) for c in context]
logger.info('All saving is finished.')
def train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=0, num_epochs=250, save_epochs=25, scheduler=None, mlog=None, flog=None):
'''
Main training loop. Multiple tasks might happen in the same epoch.
0 to 1 random validation only
1 to 2 train task 0 labeled as epoch 2, validate all
i to {i+1} train task {i-1} labeled as epoch {i+1}
'''
checkpoint_dir = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir'])
run_kwargs = {
'cfg': cfg,
'mlog': mlog,
'flog': flog,
'optimizer': optimizer,
'loss_fns': loss_fns,
'model': model,
'use_thread': cfg['saving']['in_background'],
}
context = []
log_interval = cfg['saving']['log_interval']
log_interval = int(log_interval) if log_interval > 1 else log_interval
end_epoch = start_epoch + num_epochs
print(f'training for {num_epochs} epochs')
for epoch in range(start_epoch, end_epoch):
# tlkit.utils.count_open() # Turn on to check for memory leak
torch.cuda.empty_cache()
if epoch == 0 or epoch % save_epochs == save_epochs - 1:
context += save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
should_run_validation = (epoch == 0) or (log_interval <= 1) or ((epoch % log_interval) == (log_interval - 1))
if should_run_validation:
assert math.isnan(mlog.peek_meter()['losses/total_0']), 'Loggers are not empty at the beginning of evaluation. Were training logs cleared?'
context1, loss_dict = run_one_epoch(dataloader=dataloaders['val'], epoch=epoch, train=False, **run_kwargs)
context += context1
if scheduler is not None:
try:
scheduler.step(loss_dict['total'])
except:
scheduler.step()
# training starts logging at epoch 1, val epoch 0 is fully random, each task should only last ONE epoch
context1, _ = run_one_epoch(dataloader=dataloaders['train'], epoch=epoch+1, train=True, **run_kwargs)
context += context1
# Compute needed after the end of an epoch - e.g. EWC computes ~Fisher info matrix
post_training_epoch(dataloader=dataloaders['train'], epoch=epoch, **run_kwargs)
context1, _ = run_one_epoch(dataloader=dataloaders['val'], epoch=end_epoch, train=False, **run_kwargs)
context += context1
context += save_checkpoint(model, optimizer, end_epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
return context
def post_training_epoch(dataloader=None, epoch=-1, model=None, loss_fns=None, **kwargs):
post_training_cache = {}
if hasattr(loss_fns[dataloader.curr_iter_idx], 'post_training_epoch'): # this lets respective loss_fn compute F
loss_fns[dataloader.curr_iter_idx].post_training_epoch(model, dataloader, post_training_cache, **kwargs)
for i, loss_fn in enumerate(loss_fns):
if hasattr(loss_fn, 'post_training_epoch') and i != dataloader.curr_iter_idx:
loss_fn.post_training_epoch(model, dataloader, post_training_cache, **kwargs)
def run_one_epoch(model: LifelongSidetuneNetwork, dataloader, loss_fns, optimizer, epoch, cfg, mlog, flog, train=True, use_thread=False)->(list,dict):
# logs through the progress of the epoch from [epoch, epoch + 1)
start_time = time.time()
model.train(train)
params_with_grad = model.parameters()
phase = 'train' if train else 'val'
sources = cfg['training']['sources']
targets = cfg['training']['targets']
tasks = [t for t in SINGLE_IMAGE_TASKS if len([tt for tt in cfg['training']['targets'] if t in tt]) > 0]
cache = {'phase': phase, 'sources': sources, 'targets': targets, 'tasks': tasks}
context = []
losses = {x:[] for x in cfg['training']['loss_list']}
log_steps = []
log_interval = cfg['saving']['log_interval']
log_interval = int(log_interval) if log_interval >= 1 else log_interval
if log_interval < 1 and train:
num_logs_per_epoch = int(1 // log_interval)
log_steps = [i * int(len(dataloader)/num_logs_per_epoch) for i in range(1, num_logs_per_epoch)]
if cfg['training']['post_aggregation_transform_fn'] is not None:
post_agg_transform = eval(cfg['training']['post_aggregation_transform_fn'])
if cfg['learner']['use_feedback']:
num_passes = cfg['learner']['feedback_kwargs']['num_feedback_iter']
backward_kwargs = {'retain_graph': True}
else:
num_passes = 1
backward_kwargs = {}
if isinstance(model, _CustomDataParallel):
warnings.warn('DataParallel does not allow you to put part of the model on CPU')
model.cuda()
with torch.set_grad_enabled(train):
# print(type(model.encoder.encoder), torch.norm(next(model.encoder.encoder.parameters())))
# print(type(model.encoder.side_network), torch.norm(next(model.encoder.side_network.parameters())))
seen = set()
for i, (task_idx, batch_tuple) in enumerate(tqdm(dataloader, desc=f"Epoch {epoch} ({phase})")):
if cfg['training']['post_aggregation_transform_fn'] is not None:
batch_tuple = post_agg_transform(batch_tuple, **cfg['training']['post_aggregation_transform_fn_kwargs'])
# Determine and handle new task
old_size = len(seen)
seen.add(task_idx)
if len(seen) > old_size:
logger.info(f"Moving to task: {task_idx}")
model.start_task(task_idx, train, print_alpha=True)
# Decompose batch, Forward, Compute Loss
x, label, masks = tlkit.utils.process_batch_tuple(batch_tuple, task_idx, cfg)
for pass_i in range(num_passes):
prediction = model(x, task_idx=task_idx, pass_i=pass_i)
loss_dict = loss_fns[task_idx](prediction, label, masks, cache)
# If training, Backward
if train:
optimizer.zero_grad()
loss_dict['total'].backward(**backward_kwargs)
if cfg['learner']['max_grad_norm'] is not None:
torch.nn.utils.clip_grad_norm_(params_with_grad, cfg['learner']['max_grad_norm'])
optimizer.step()
# Logging
mlog.update_meter(model.merge_operator.param, meters={f'alpha/task_{task_idx}'}, phase=phase)
for loss in cfg['training']['loss_list']:
assert loss in loss_dict.keys(), f'Promised to report loss {loss}, but missing from loss_dict'
mlog.update_meter(loss_dict[loss].detach().item(), meters={f'losses/{loss}_{task_idx}'}, phase=phase)
if cfg['training']['task_is_classification'][task_idx]:
add_classification_specific_logging(cache, mlog, task_idx, phase)
if len(seen) > old_size:
log_image(mlog, task_idx, cfg, x, label, prediction, masks=masks, cache=cache)
# for super long epochs where we want some information between epochs
if i in log_steps:
step = epoch + i / len(dataloader)
step = int(np.floor(step * cfg['saving']['ticks_per_epoch']))
for loss in cfg['training']['loss_list']:
losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item())
context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=False)
for loss in cfg['training']['loss_list']:
losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item())
if log_interval <= 1 or epoch % log_interval == log_interval - 1 or epoch == 0:
step = epoch + (len(dataloader) - 1) / len(dataloader)
step = int(np.floor(step * cfg['saving']['ticks_per_epoch']))
context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=True)
assert len(losses['total']) > 0, 'Need to report loss'
for k in losses.keys():
losses[k] = sum(losses[k]) / len(losses[k])
loss_str = ''.join([' | ' + k + ' loss: {0:.6f} '.format(v) for k, v in losses.items()])
duration = int(time.time() - start_time)
logger.info(f'End of epoch {epoch} ({phase}) ({duration//60}m {duration%60}s) {loss_str}') # this is cumulative from previous train epochs in the same log_interval
return context, losses
def save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=False):
dict_to_save = {
'state_dict': model.state_dict(),
'epoch': epoch,
'model': model,
'optimizer': optimizer,
'curr_iter_idx': dataloaders['train'].curr_iter_idx,
}
checkpoints.save_checkpoint(dict_to_save, checkpoint_dir, epoch)
return []
if __name__ == '__main__':
assert LOG_DIR, 'log dir cannot be empty'
# Manually parse command line opts
short_usage, usage, internal_usage = ex.get_usage()
args = docopt(internal_usage, [str(a) for a in sys.argv[1:]], help=False)
config_updates, named_configs = get_config_updates(args['UPDATE'])
ex.run('prologue', config_updates, named_configs, options=args)
ex.observers.append(FileStorageObserverWithExUuid.create(LOG_DIR))
ex.run_commandline()
else:
print(__name__)
|
import unittest
from game_of_life.model import World
from game_of_life.model import Pattern
from game_of_life.model import OutOfBoundError
class PatternTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
# The "Glider" pattern
pattern = Pattern('Glider', alives=[(1, 0), (0, 1), (-1, -1), (0, -1), (1, -1)])
self.assertIsInstance(pattern, Pattern)
self.assertEqual(pattern.name, 'Glider')
def test_repr(self):
pattern = Pattern('FooBar', alives=[(1, 0), (0, 1)])
r = repr(pattern)
self.assertEqual(r,
'<Pattern "FooBar">: ((1, 0), (0, 1))')
def test_as_screen_coordinate(self):
pattern = Pattern('Glider', alives=[(1, 0), (0, 1), (-1, -1), (0, -1), (1, -1)])
r = pattern.as_screen_coordinate(width=11, height=33)
# Formula:
# x' = x + int(width / 2)
# y' = -y + int(height / 2)
self.assertEqual(r,
((6, 16),
(5, 15),
(4, 17),
(5, 17),
(6, 17)))
def test_as_screen_coordinate_empty_pattern(self):
pattern = Pattern('Glider', alives=[])
r = pattern.as_screen_coordinate(width=11, height=33)
self.assertEqual(r, tuple())
def test_as_screen_coordinate_error_size_too_small(self):
pattern = Pattern('FooBar', alives=[(10, 5), (-8, -10)])
# Formula:
# width = 1 + 2 * max(abs(x) for x in all_x)
# height = 1 + 2 * max(abs(y) for y in all_y)
with self.assertRaisesRegex(ValueError,
r'Size must be larger than width: 21, height: 21.'):
pattern.as_screen_coordinate(20, 20)
class WorldTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_x_y(self):
world = World(x=20, y=30)
self.assertIsInstance(world, World)
self.assertEqual(world.size, (20, 30))
def test_init_x_or_y_error(self):
with self.assertRaises(ValueError):
World(x=-10, y=10)
with self.assertRaises(ValueError):
World(x=-0, y=10)
with self.assertRaises(ValueError):
World(x=10, y=-10)
with self.assertRaises(ValueError):
World(x=10, y=0)
with self.assertRaises(ValueError):
World(x=-10, y=-10)
with self.assertRaises(ValueError):
World(x=0, y=0)
def test_set_alive(self):
world = World(20, 30)
world.set_alive(2, 3)
self.assertEqual(world.is_alive(2, 3), True)
def test_set_alive_out_of_bound(self):
world = World(20, 30)
with self.assertRaises(OutOfBoundError):
world.set_alive(20, 30)
def test_is_alive(self):
world = World(20, 30)
world.set_alive(2, 3)
self.assertEqual(world.is_alive(2, 3), True)
def test_is_alive_out_of_bound(self):
world = World(20, 30)
with self.assertRaises(OutOfBoundError):
world.is_alive(20, 30)
def test_set_dead(self):
world = World(20, 30)
world.set_alive(2, 3)
world.set_dead(2, 3)
self.assertEqual(world.is_alive(2, 3), False)
def test_set_dead_out_of_bound(self):
world = World(20, 30)
with self.assertRaises(OutOfBoundError):
world.set_dead(20, 30)
def test_set_dead_for_not_alives(self):
world = World(20, 30)
world.set_dead(3, 4)
self.assertEqual(world.is_alive(3, 4), False)
def test_toggle_aliveness_from_dead(self):
world = World(20, 30)
world.set_dead(2, 3)
world.toggle_aliveness(2, 3)
self.assertEqual(world.is_alive(2, 3), True)
def test_toggle_aliveness_from_alive(self):
world = World(20, 30)
world.set_alive(2, 3)
world.toggle_aliveness(2, 3)
self.assertEqual(world.is_alive(2, 3), False)
def test_toggle_aliveness_out_of_bound(self):
world = World(20, 30)
with self.assertRaises(OutOfBoundError):
world.toggle_aliveness(20, 30)
def test_get_alives(self):
world = World(20, 30)
world.set_alive(2, 3)
world.set_alive(10, 20)
self.assertEqual(world.alives,
((2, 3), (10, 20)))
def test_calc_neighbors(self):
world = World(20, 30)
neighbors = world._calc_neighbors(3, 4)
self.assertIsInstance(neighbors, tuple)
self.assertEqual(len(neighbors), 8)
self.assertCountEqual(neighbors,
((2, 3),
(2, 4),
(2, 5),
(3, 3),
(3, 5),
(4, 3),
(4, 4),
(4, 5)))
def test_calc_neighbors_at_bottom_left_corners(self):
world = World(20, 30)
neighbors = world._calc_neighbors(0, 0)
self.assertIsInstance(neighbors, tuple)
self.assertEqual(len(neighbors), 3)
self.assertCountEqual(neighbors,
((0, 1),
(1, 0),
(1, 1)))
def test_calc_neighbors_at_bottom_right_corners(self):
world = World(20, 30)
neighbors = world._calc_neighbors(19, 0)
self.assertIsInstance(neighbors, tuple)
self.assertEqual(len(neighbors), 3)
self.assertCountEqual(neighbors,
((18, 0),
(18, 1),
(19, 1)))
def test_calc_neighbors_at_upper_left_corners(self):
world = World(20, 30)
neighbors = world._calc_neighbors(0, 29)
self.assertIsInstance(neighbors, tuple)
self.assertEqual(len(neighbors), 3)
self.assertCountEqual(neighbors,
((0, 28),
(1, 28),
(1, 29)))
def test_calc_neighbors_at_upper_right_corners(self):
world = World(20, 30)
neighbors = world._calc_neighbors(19, 29)
self.assertIsInstance(neighbors, tuple)
self.assertCountEqual(neighbors,
((18, 28),
(18, 29),
(19, 28)))
def test_calc_neighbors_on_left_side(self):
world = World(20, 30)
neighbors = world._calc_neighbors(0, 5)
self.assertIsInstance(neighbors, tuple)
self.assertCountEqual(neighbors,
((0, 6),
(1, 6),
(1, 5),
(1, 4),
(0, 4)))
def test_calc_neighbors_on_right_side(self):
world = World(20, 30)
neighbors = world._calc_neighbors(19, 5)
self.assertIsInstance(neighbors, tuple)
self.assertCountEqual(neighbors,
((19, 6),
(18, 6),
(18, 5),
(18, 4),
(19, 4)))
def test_calc_neighbors_on_upper_side(self):
world = World(20, 30)
neighbors = world._calc_neighbors(5, 29)
self.assertIsInstance(neighbors, tuple)
self.assertCountEqual(neighbors,
((4, 29),
(4, 28),
(5, 28),
(6, 28),
(6, 29)))
def test_calc_neighbors_on_bottom_side(self):
world = World(20, 30)
neighbors = world._calc_neighbors(5, 0)
self.assertIsInstance(neighbors, tuple)
self.assertEqual(len(neighbors), 5)
self.assertCountEqual(neighbors,
((4, 0),
(4, 1),
(5, 1),
(6, 1),
(6, 0)))
def test_calc_aliveness_died_by_no_neighbor(self):
world = World(20, 30)
world.set_alive(5, 5)
self.assertEqual(world._calc_aliveness(5, 5), False)
self.assertEqual(world._calc_aliveness(5, 6), False)
def test_calc_aliveness_died_by_one_neighbor(self):
world = World(20, 30)
world.set_alive(5, 5)
world.set_alive(5, 6)
self.assertEqual(world._calc_aliveness(5, 5), False)
self.assertEqual(world._calc_aliveness(5, 6), False)
def test_calc_aliveness_died_by_four_neighbors(self):
world = World(20, 30)
world.set_alive(5, 5)
world.set_alive(5, 6)
world.set_alive(5, 4)
world.set_alive(6, 5)
world.set_alive(6, 4)
self.assertEqual(world._calc_aliveness(5, 5), False)
self.assertEqual(world._calc_aliveness(6, 5), False)
def test_calc_aliveness_died_by_five_neighbors(self):
world = World(20, 30)
world.set_alive(5, 5)
world.set_alive(5, 6)
world.set_alive(5, 4)
world.set_alive(6, 5)
world.set_alive(6, 4)
world.set_alive(6, 6)
self.assertEqual(world._calc_aliveness(5, 5), False)
self.assertEqual(world._calc_aliveness(6, 5), False)
def test_calc_aliveness_survive_by_two_neighbors(self):
world = World(20, 30)
world.set_alive(5, 5)
world.set_alive(6, 6)
world.set_alive(4, 4)
self.assertEqual(world._calc_aliveness(5, 5), True)
def test_calc_aliveness_survive_by_three_neighbors(self):
world = World(20, 30)
world.set_alive(5, 5)
world.set_alive(5, 6)
world.set_alive(6, 6)
world.set_alive(6, 5)
self.assertEqual(world._calc_aliveness(5, 5), True)
self.assertEqual(world._calc_aliveness(5, 6), True)
self.assertEqual(world._calc_aliveness(6, 6), True)
self.assertEqual(world._calc_aliveness(6, 5), True)
def test_calc_aliveness_populated(self):
world = World(20, 30)
world.set_alive(4, 6)
world.set_alive(4, 4)
world.set_alive(6, 5)
self.assertEqual(world._calc_aliveness(5, 5), True)
def test_calc_aliveness_not_populated_with_two_neighbors(self):
world = World(20, 30)
world.set_alive(6, 5)
world.set_alive(4, 5)
self.assertEqual(world._calc_aliveness(5, 5), False)
def test_calc_alivness_not_populated_with_four_neighbors(self):
world = World(20, 30)
world.set_alive(6, 5)
world.set_alive(4, 5)
world.set_alive(5, 6)
world.set_alive(5, 4)
self.assertEqual(world._calc_aliveness(5, 5), False)
def test_calc_aliveness_bottom_left_corner_alive(self):
world = World(10, 10)
world.set_alive(0, 0)
world.set_alive(0, 1)
world.set_alive(1, 0)
self.assertEqual(world._calc_aliveness(0, 0), True)
self.assertEqual(world._calc_aliveness(0, 1), True)
self.assertEqual(world._calc_aliveness(1, 0), True)
self.assertEqual(world._calc_aliveness(1, 1), True)
def test_calc_aliveness_bottom_right_corner_alive(self):
world = World(10, 10)
world.set_alive(9, 0)
world.set_alive(8, 0)
world.set_alive(9, 1)
self.assertEqual(world._calc_aliveness(9, 0), True)
self.assertEqual(world._calc_aliveness(9, 1), True)
self.assertEqual(world._calc_aliveness(8, 0), True)
self.assertEqual(world._calc_aliveness(8, 1), True)
def test_calc_aliveness_upper_left_corner_alive(self):
world = World(10, 10)
world.set_alive(0, 9)
world.set_alive(0, 8)
world.set_alive(1, 9)
self.assertEqual(world._calc_aliveness(0, 9), True)
self.assertEqual(world._calc_aliveness(0, 8), True)
self.assertEqual(world._calc_aliveness(1, 9), True)
self.assertEqual(world._calc_aliveness(1, 8), True)
def test_calc_aliveness_upper_right_corner_alive(self):
world = World(10, 10)
world.set_alive(9, 9)
world.set_alive(8, 9)
world.set_alive(9, 8)
self.assertEqual(world._calc_aliveness(9, 9), True)
self.assertEqual(world._calc_aliveness(8, 9), True)
self.assertEqual(world._calc_aliveness(9, 8), True)
self.assertEqual(world._calc_aliveness(8, 8), True)
def test_advance(self):
world = World(10, 10)
world.set_alive(9, 9)
world.set_alive(8, 9)
world.set_alive(9, 8)
self.assertCountEqual(world.alives,
((9, 9),
(8, 9),
(9, 8)))
world.advance()
self.assertCountEqual(world.alives,
((9, 9),
(8, 9),
(9, 8),
(8, 8)))
|
from setuptools import setup,find_packages
import sys
from os import path
with open(path.join(path.dirname(__file__), 'VERSION')) as v:
VERSION = v.readline().strip()
setup(name = "fyplot",
packages=find_packages('.'),
version = VERSION,
description="minimal interface for dynamic time plotting",
url="https://github.com/intelligent-soft-robots/fyplot",
long_description="see https://fyplot.readthedocs.io/en/latest/",
author="Vincent Berenz",
author_email="vberenz@tuebingen.mpg.de",
scripts=['bin/fyplot_demo','bin/fyplot_demo2'],
install_requires = ["pyqtgraph"]
)
|
# Generated by Django 2.1.3 on 2018-11-20 06:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Space',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=150, null=True)),
('summary', models.TextField(blank=True, null=True)),
('capacity', models.IntegerField(blank=True, null=True)),
('hourly_rate', models.IntegerField(blank=True, null=True)),
('daily_rate', models.IntegerField(blank=True, null=True)),
('min_booking_hours', models.IntegerField(blank=True, null=True)),
('is_hidden', models.BooleanField(default=False)),
('status', models.IntegerField(choices=[(0, 'Not Submitted'), (1, 'Awaiting Listing Approval'), (2, 'Listing Approved')], default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import matplotlib
matplotlib.use("TKagg")
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
def show():
plt.show()
plt.clf()
#unsupervised estimator, principle component analysis (PCA)
#Intro to PAC
#lets generate some data
rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T
plt.scatter(X[:, 0], X[:, 1])
plt.axis('equal');
show()
#we want to learn the relationship between x and y, not predict values
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
print(pca.components_)
print(pca.explained_variance_)
#lets visualize these numbers as vectors to better understand their meaning
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal');
show()
#PCA as dimensionality reduction
pca= PCA(n_components=1)
pca.fit(X)
X_pca = pca.transform(X)
print("original shape: ", X.shape) #(200,2)
print("transformed shape:", X_pca.shape)#(200,1)
#lets inverse transform and then plot with the orginial data
X_new = pca.inverse_transform(X_pca)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal');
show()
#PCA for Visualization: hand-written digits
from sklearn.datasets import load_digits
digits = load_digits() #(1797,64)
#lets reduce this down to 2 dimensions
pca = PCA(2) #2 dimensional projection
projected = pca.fit_transform(digits.data)
#now we can plot the 2 principle components
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('Spectral', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
show()
#choosing the number of components
pca = PCA().fit(digits.data)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
show() #our 2 componets loose a lot of the data, need 20 some for 90%
#PCA as noise filtering
def plot_digits(data):
fig, axes = plt.subplots(4, 10, figsize=(10, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(data[i].reshape(8, 8),
cmap='binary', interpolation='nearest',
clim=(0, 16))
plot_digits(digits.data)
show()
#add noise
np.random.seed(42)
noisy = np.random.normal(digits.data, 4)
plot_digits(noisy)
show()
#lets filter
pca = PCA(0.50).fit(noisy) #12 components
#reconstruct with inverse transform
components = pca.transform(noisy)
filtered = pca.inverse_transform(components)
plot_digits(filtered)
show()
#Example Eigenfaces
#get out face data
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
#we will use RandomizedPCA since this is a large dataset
#we will reduce from near 3000 to 150 components
from sklearn.decomposition import PCA as RandomizedPCA
pca = RandomizedPCA(150)
pca.fit(faces.data)
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')
show()
#lets check the cumulative variance
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
#150 turns out to be around 90% of variance
#lets compare to the full data
# Compute the components and projected faces
pca = RandomizedPCA(150).fit(faces.data)
components = pca.transform(faces.data) #filter
projected = pca.inverse_transform(components)#rebuild
# Plot the results
fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i in range(10):
ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')
ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')
ax[0, 0].set_ylabel('full-dim\ninput')
ax[1, 0].set_ylabel('150-dim\nreconstruction');
show()
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
import logging
import os
import shutil
from dataclasses import dataclass
from pathlib import Path
logger = logging.getLogger("cdk-helper")
@dataclass
class Cleanable:
"""Encapsulates something that can be cleaned by the cleaner"""
name: str
file_type: str
pattern: str
def __post_init__(self):
if self.file_type not in ("d", "f"):
raise ValueError("only directories and files are allowed ('d' or 'f')")
def delete(self, source_dir):
source_path = Path(source_dir)
for path in source_path.rglob(self.pattern):
if "aws_solutions" not in str(
path.name
): # prevent the module from being unlinked in a dev environment
if self.file_type == "d" and path.is_dir():
logger.info(f"deleting {self.name} directory {path}")
shutil.rmtree(path, ignore_errors=True)
if self.file_type == "f" and path.is_file():
logger.info(f"deleting {self.name} file {path}")
try:
path.unlink()
except FileNotFoundError:
pass
class Cleaner:
"""Encapsulates functions that help clean up the build environment."""
TO_CLEAN = [
Cleanable("Python bytecode", "f", "*.py[cod]"),
Cleanable("Python Coverage databases", "f", ".coverage"),
Cleanable("CDK Cloud Assemblies", "d", "cdk.out"),
Cleanable("Python egg", "d", "*.egg-info"),
Cleanable("Python bytecode cache", "d", "__pycache__"),
Cleanable("Python test cache", "d", ".pytest_cache"),
]
@staticmethod
def clean_dirs(*args):
"""Recursively remove each of its arguments, then recreate the directory"""
for dir_to_remove in args:
logger.info("cleaning %s" % dir_to_remove)
shutil.rmtree(dir_to_remove, ignore_errors=True)
os.makedirs(dir_to_remove)
@staticmethod
def cleanup_source(source_dir):
"""Cleans up all items found in TO_CLEAN"""
for item in Cleaner.TO_CLEAN:
item.delete(source_dir)
|
import os
import re
import csv
import pandas as pd
def stream_local_csv(file_path, options):
reader = pd.read_csv(file_path, chunksize=1000, **options)
if reader:
return reader
def stream_csv_from_s3(s3_file_url, options):
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
reader = pd.read_csv(
s3_file_url,
storage_options={
"key": AWS_ACCESS_KEY_ID,
"secret": AWS_SECRET_ACCESS_KEY,
},
chunksize=1000
)
if reader:
return reader
def stream_csv(file_path, options={}):
if not file_path:
raise Exception('Not csv file path found')
if re.match(r's3://', file_path):
return stream_csv_from_s3(file_path, options)
else:
return stream_local_csv(file_path, options)
|
#!/usr/bin/env python3
""" Makes a PR curve based on output from generate_detection_metrics.py """
import argparse
import numpy as np
import matplotlib.pyplot as plt
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('data_file')
parser.add_argument('--output', default="pr.png")
parser.add_argument("--doubles", action="store_true")
args = parser.parse_args()
matrix = np.load(args.data_file)
keep_t = matrix[:,0]
precision = matrix[:,1]
recall = matrix[:,2]
doubles = matrix[:,3]
fig,axes = plt.subplots(2)
fig.set_figheight(10)
fig.set_figwidth(8)
ax1 = axes[0]
ax1.plot(recall, precision, color='blue', label='Precision/Recall')
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
#ax1.plot(recall, keep_t, color='green', label='Keep Threshold')
ax1.set_xlim(0,1)
ax1.set_ylim(0,1)
ax1.grid()
ax2 = axes[1]
ax2.plot(recall, keep_t,color='green', label='Keep Threshold')
ax2.set_ylabel('Keep Threshold')
ax2.set_xlabel('Recall')
ax2.set_xlim(0,1)
ax2.set_ylim(0,1)
ax2.grid()
if not args.doubles:
ax2 = ax1.twinx()
ax2.plot(recall, doubles, color='red', label='doubles')
ax2.set_ylabel('Doubles / Total Truth', color='red')
ax2.tick_params(axis='y', labelcolor='red')
fig.legend(loc='lower left')
fig.tight_layout()
plt.savefig(args.output)
|
import time
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from ConfigSpace import Configuration
def execute_func(params):
start_time = time.time()
evaluator, config, subsample_ratio = params
try:
if isinstance(config, Configuration):
score = evaluator(config, name='hpo', data_subsample_ratio=subsample_ratio)
else:
score = evaluator(None, data_node=config, name='fe', data_subsample_ratio=subsample_ratio)
except Exception as e:
score = np.inf
time_taken = time.time() - start_time
return score, time_taken
class ParallelEvaluator(object):
def __init__(self, evaluator, n_worker=1):
self.evaluator = evaluator
self.n_worker = n_worker
self.thread_pool = ThreadPoolExecutor(max_workers=n_worker)
def update_evaluator(self, evaluator):
self.evaluator = evaluator
def wait_tasks_finish(self, trial_stats):
all_completed = False
while not all_completed:
all_completed = True
for trial in trial_stats:
if not trial.done():
all_completed = False
time.sleep(0.1)
break
def parallel_execute(self, param_list, subsample_ratio=1.):
n_configuration = len(param_list)
batch_size = self.n_worker
n_batch = n_configuration // batch_size + (1 if n_configuration % batch_size != 0 else 0)
evaluation_result = list()
for i in range(n_batch):
execution_stats = list()
for _param in param_list[i * batch_size: (i + 1) * batch_size]:
execution_stats.append(self.thread_pool.submit(execute_func,
(self.evaluator, _param, subsample_ratio)))
# wait a batch of trials finish
self.wait_tasks_finish(execution_stats)
# get the evaluation statistics
for trial in execution_stats:
assert (trial.done())
perf = trial.result()[0]
evaluation_result.append(perf)
return evaluation_result
|
import os
import re
import requests
from invoke import task
def _get_aws_token(c):
token = os.getenv("AWS_TOKEN")
if not token:
token = c.run("aws ecr get-authorization-token --output text "
"--query 'authorizationData[].authorizationToken'", hide=True).stdout.strip()
return token
def _get_gcloud_token(c):
token = os.getenv("GCLOUD_TOKEN")
if not token:
token = c.run("gcloud auth print-access-token", hide=True).stdout.strip()
return token
def _version_to_int(version):
"""Converts a version number into an integer number, so it can be sorted
>>> _version_to_int("0.1.1")
1001
>>> _version_to_int("1.2.3")
1002003
>>> _version_to_int("2001")
2001
>>> _version_to_int("latest")
0
"""
if version == "latest":
return 0
components = version.split(".")
ret = 0
for i, comp in enumerate(components):
ret += int(comp) * (1000 ** (len(components) - (i + 1)))
return ret
def _registry_type(registry):
if "amazonaws" in registry:
return "aws"
elif "gcr.io" in registry:
return "googlecloud"
elif "icr.io" in registry:
return "ibmcloud"
elif registry == "":
return "dockerhub"
else:
return "unknown"
def _join(registry, image):
if not registry:
return image
return "{}/{}".format(registry, image)
def _auth_headers(c, registry):
if _registry_type(registry) == "aws":
token = _get_aws_token(c)
return dict(headers={'Authorization': 'Basic {}'.format(token)})
elif _registry_type(registry) == "googlecloud":
token = _get_gcloud_token(c)
return dict(auth=("oauth2accesstoken", token))
else:
return {}
def _get_last_version_from_local_docker(c, registry, image):
registry_image = _join(registry, image)
output = c.run(f"docker image ls {registry_image}", hide="out")
# Black magic explanation: skips first line (header), 2nd field is version
tags = [re.split(" +", lin)[1] for lin in output.stdout.splitlines()[1:]]
return sorted(tags, key=_version_to_int)[-1]
def _get_last_version(c, registry, image):
if _registry_type(registry) in ("ibmcloud", "dockerhub"):
# fallback, don't know how to get tabs from ibmcloud registry
return _get_last_version_from_local_docker(c, registry, image)
url = 'https://{}/v2/{}/tags/list'.format(registry, image)
r = requests.get(url, **_auth_headers(c, registry))
r.raise_for_status()
tags = r.json()['tags']
if len(tags) == 100:
raise RuntimeError(
"Error, the response has 100 tags, we hit the limit and paging not supported, "
"you should remove some tags in ECR console"
)
return sorted(tags, key=_version_to_int)[-1]
def _get_next_version(c, registry, image):
registry, image = _default_registry_image(c, registry, image)
version = _get_last_version(c, registry, image)
parts = version.split('.')
parts[-1] = str(int(parts[-1]) + 1)
return '.'.join(parts)
def _default_registry_image(c, registry, image):
if not registry:
registry = c.config.registry
if not image:
image = c.config.image
return registry, image
@task
def last_version(c, registry=None, image=None):
registry, image = _default_registry_image(c, registry, image)
print(_get_last_version(c, registry, image))
@task
def next_version(c, registry=None, image=None):
registry, image = _default_registry_image(c, registry, image)
print(_get_next_version(c, registry, image))
def docker_exec(c, command, container=None, pty=True, envs={}, workdir=None):
container = container or c.config.container
run_command = "docker exec "
if pty:
run_command += "-it "
if workdir:
run_command += f"-w {workdir}"
for env_var, env_value in envs.items():
run_command += f"--env {env_var}={env_value} "
for k, env_value in os.environ.items():
if k.startswith("DOCKEREXEC_"):
env_var = k.split('_', 1)[1]
run_command += f"--env {env_var}={env_value} "
c.run("{} {} {}".format(run_command, container, command), pty=pty)
@task
def docker_put(c, source, target, container=None):
container = container or c.config.container
c.run(f"docker cp {source} {container}:{target}")
@task
def docker_get(c, source, target, container=None):
container = container or c.config.container
c.run(f"docker cp {container}:{source} {target}")
def _compose_file():
return os.getenv("COMPOSE_FILE", "docker-compose.yml")
@task
def start_dev(c, compose_files="docker-compose.override.dev.yml,docker-compose.override.local-dev.yml",
detach=True):
extra_param = ""
for compose_file in compose_files.split(","):
if os.path.exists(compose_file):
extra_param += f"-f {compose_file} "
detach = "-d" if detach else ""
c.run(f"docker-compose -f {_compose_file()} {extra_param} up --build {detach}")
@task
def start(c, detach=True):
detach = "-d" if detach else ""
c.run(f"docker-compose -f {_compose_file()} up --build {detach}")
@task
def stop(c):
c.run(f"docker-compose down -f {_compose_file()}")
@task
def shell(c):
shell = c.config.get("container_shell", "sh")
docker_exec(c, shell)
@task
def pyshell(c):
pyshell = c.config.get("container_pyshell", "ipython")
docker_exec(c, pyshell)
@task
def build(c, registry=None, image=None, version=None):
registry, image = _default_registry_image(c, registry, image)
registry_image = _join(registry, image)
version = version or _get_next_version(c, registry, image)
c.run("docker build -t {}:{} .".format(registry_image, version))
@task
def push_image(c, registry=None, image=None, version=None):
registry, image = _default_registry_image(c, registry, image)
if not version:
if _registry_type(registry) in ("ibmcloud", "dockerhub"):
version = _get_last_version_from_local_docker(c, registry, image)
else:
version = _get_next_version(c, registry, image)
if _registry_type(registry) == "aws":
docker_login_cmd = c.run("aws ecr get-login --no-include-email", hide=True).stdout
c.run(docker_login_cmd)
registry_image = _join(registry, image)
c.run("docker push {}:{}".format(registry_image, version))
|
for i in range(4):
print(i) |
from django.core.files.base import ContentFile
from django.utils.six import b
from django.test import TestCase
from wagtail.wagtaildocs.models import Document, document_served
from downloadcounter.models import DownloadCount
class TestCounter(TestCase):
def test_count(self):
fake_file = ContentFile(b("Example document"))
fake_file.name = 'test.txt'
document = Document.objects.create(title="Test document", file=fake_file)
counter = DownloadCount.objects.create(file=document)
assert counter.count is 0
document_served.send(sender=Document, instance=document)
counter.refresh_from_db()
assert counter.count is 1
|
bot_token = ""
users = [
]
website_links =[
"https://www.google.com",
]
|
class Solution:
def sortArrayByParityII(self, a):
"""
:type A: List[int]
:rtype: List[int]
"""
evens = (x for x in a if x % 2 == 0)
odds = (x for x in a if x % 2 == 1)
return list(itertools.chain.from_iterable(zip(evens, odds)))
|
import os
import sys
print("HELLO")
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
print(sys.path[0])
sys.path.insert(0, os.getcwd())
print(sys.path[0])
|
from arch.unitroot import ADF, PhillipsPerron, KPSS
import pandas as pd
class StationarityTests:
"""
Stationarity Testing
Also often called Unit Root tests
Three commonly used tests to check stationarity of the data
"""
def __init__(self, significance=.05):
self.SignificanceLevel = significance
self.pValue = None
self.isStationary = None
def ADF_Test(self, timeseries, printResults=True):
"""
Augmented Dickey-Fuller (ADF) Test
Null Hypothesis is Unit Root
Reject Null Hypothesis >> Series is stationary >> Use price levels
Fail to Reject >> Series has a unit root >> Use price returns
"""
adfTest = ADF(timeseries)
self.pValue = adfTest.pvalue
if (self.pValue < self.SignificanceLevel):
self.isStationary = True
else:
self.isStationary = False
if printResults:
print('Augmented Dickey-Fuller (ADF) Test Results: {}'.format(
'Stationary' if self.isStationary else 'Not Stationary'))
def PP_Test(self, timeseries, printResults=True):
"""
Phillips-Perron (PP) Test
Null Hypothesis is Unit Root
Reject Null Hypothesis >> Series is stationary >> Use price levels
Fail to Reject >> Series has a unit root >> Use price returns
"""
ppTest = PhillipsPerron(timeseries)
self.pValue = ppTest.pvalue
if (self.pValue < self.SignificanceLevel):
self.isStationary = True
else:
self.isStationary = False
if printResults:
print('Phillips-Perron (PP) Test Results: {}'.format(
'Stationary' if self.isStationary else 'Not Stationary'))
def KPSS_Test(self, timeseries, printResults=True):
"""
Kwiatkowski-Phillips-Schmidt-Shin (KPSS) Test
Null Hypothesis is Unit Root
Reject Null Hypothesis >> Series has a unit root >> Use price returns
Fail to Reject >> Series is stationary >> Use price levels
"""
kpssTest = KPSS(timeseries)
self.pValue = kpssTest.pvalue
if (self.pValue < self.SignificanceLevel):
self.isStationary = False
else:
self.isStationary = True
if printResults:
print('Kwiatkowski-Phillips-Schmidt-Shin (KPSS) Test Results: {}'.format(
'Stationary' if self.isStationary else 'Not Stationary'))
|
# Generated by Django 2.0.2 on 2018-04-02 20:54
from django.db import migrations, models
import re
CALENDAR_PERIODS = [
{
"calendar_period_id": 9999,
"calendar_period_name": "TemporaryValue",
},
]
def extract_year(semester_name):
answer = re.findall(r'^\d{4} ', semester_name)
if len(answer) != 1:
raise ValueError('Error found in year portion of semester name:', semester_name)
return answer[0]
def extract_calendar_period_name(semester_name):
answer = re.findall(r' (Spring|Summer|Fall)$', semester_name)
if len(answer) != 1:
raise ValueError('Error found in calendar_period portion of semester name:', semester_name)
return answer[0]
def forward_convert_semester_data(apps, schema_editor):
CalendarPeriod = apps.get_model('courseinfo', 'CalendarPeriod')
Semester = apps.get_model('courseinfo', 'Semester')
semesters = Semester.objects.all()
for semester in semesters:
semester.year = extract_year(semester.semester_name)
this_calendar_period_name = extract_calendar_period_name(semester.semester_name)
calendar_period_object = CalendarPeriod.objects.get(
calendar_period_name=this_calendar_period_name
)
semester.calendar_period = calendar_period_object
semester.save()
def reverse_convert_semester_data(apps, schema_editor):
CalendarPeriod = apps.get_model('courseinfo', 'CalendarPeriod')
Semester = apps.get_model('courseinfo', 'Semester')
semesters = Semester.objects.all()
for semester in semesters:
semester.semester_name = str(semester.year) + ' - ' + semester.calendar_period.calendar_period_name
semester.year = 0
calendar_period_object = CalendarPeriod.objects.get(
calendar_period_id=9999
)
semester.calendar_period = calendar_period_object
semester.save()
def remove_temporary_calendar_period_data(apps, schema_editor):
CalendarPeriod = apps.get_model('courseinfo', 'CalendarPeriod')
for calendar_period in CALENDAR_PERIODS:
calendar_period_object = CalendarPeriod.objects.get(
calendar_period_id=calendar_period['calendar_period_id']
)
calendar_period_object.delete()
def add_temporary_calendar_period_data(apps, schema_editor):
CalendarPeriod = apps.get_model('courseinfo', 'CalendarPeriod')
for calendar_period in CALENDAR_PERIODS:
calendar_period_object = CalendarPeriod.objects.create(
calendar_period_id=calendar_period['calendar_period_id'],
calendar_period_name=calendar_period['calendar_period_name']
)
class Migration(migrations.Migration):
dependencies = [
('courseinfo', '0006_calenderperiod'),
]
operations = [
migrations.AlterField(
model_name='semester',
name='semester_name',
field=models.CharField(max_length=45, unique=False, default='temporary value'),
),
migrations.AddField(
model_name='semester',
name='year',
field=models.IntegerField(default=0)
),
migrations.AddField(
model_name='semester',
name='calendar_period',
field=models.ForeignKey(to='courseinfo.calendarperiod', default=9999, on_delete=models.CASCADE)
),
migrations.RunPython(
forward_convert_semester_data,
reverse_convert_semester_data
),
migrations.RunPython(
remove_temporary_calendar_period_data,
add_temporary_calendar_period_data
),
migrations.RemoveField(
model_name='semester',
name='semester_name'
),
migrations.AlterField(
model_name='semester',
name='year',
field=models.IntegerField()
),
migrations.AlterField(
model_name='semester',
name='calendar_period',
field=models.ForeignKey(to='courseinfo.calendarperiod', on_delete=models.CASCADE)
),
migrations.AlterUniqueTogether(
name="semester",
unique_together=set([('year', 'calendar_period')]),
),
]
|
from core.viewsets import ModelViewSet
from core.models import Service
from core.services.serializers import ServiceSerializer
from core.permissions import FRONT_DESK, CASE_MANAGER, ADMIN
class ServiceViewSet(ModelViewSet):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
permission_groups = {
'retrieve': [FRONT_DESK, CASE_MANAGER, ADMIN],
'list': [FRONT_DESK, CASE_MANAGER, ADMIN],
'update': [FRONT_DESK, CASE_MANAGER, ADMIN]
}
|
import streamlit as st
#%% fuction for the second page
def lenet_page():
#%%% En tête :
st.markdown("<h1 style='text-align: center'>Architecture LeNet</h1>", unsafe_allow_html=True)
st.write("")
st.write("L’architecture LeNet est introduit par LeCun et al. en 1998" )
st.write("")
st.image("architecture.png", width=700)
st.write("")
st.write("""<body style='text-align: justify'>LeNet est composé des couches de convolution suivis des couches de Pooling,
puis des couches entièrement connectées, avec une dernière couche munie d'une fonction d'activation Softmax.</body>""", unsafe_allow_html=True)
st.write("Voici le summary de l'architecture LeNet testée dans ce projet:")
st.write("")
st.image("summary_lenet.png", width=700)
st.write("")
body1="""<body style='text-align: justify'>Nous avons utilisé la classe ImageDataGenerator pour augmenter le nombre de nos images
et éviter le surapprentissage. Nous avons utiliser la méthode flow_from_dataframe pour créer nos datasets de train et de test.
Sur la figure ci-dessous, vous pouvez voir les plots de l’accuracy et la fonction de loss pour nos datasets de train et de test :</body>"""
st.markdown(body1, unsafe_allow_html=True)
st.write("")
st.write()
st.image("loss_accuracy_lenet.png", width=700)
st.write("""<body style='text-align: justify'>Nous pouvons constater que nous n’avons pas une amélioration significative de notre accuracy ni
pour notre dataset de train, ni pour celui de test. La fonction de perte ne diminue pas dans les deux cas.</body>""",unsafe_allow_html=True)
st.markdown("En affichant la matrice de confusion, nous constatons que le modèle prédit tous les labels en tant que label 0 :")
st.write("")
st.image("matrice_confusion_lenet.png", width=500)
st.subheader("Conclusion:")
st.write("")
st.markdown("Ces indicateurs nous prouvent que l’architecture LeNet n’est pas une architecture adaptée pour notre problématique de classification des champignons.")
st.write("")
# %%
|
from . import git_autopep8
|
from vosk import Model, KaldiRecognizer
import os
import pyaudio
import pyttsx3
import json
import core
# Começo da sintese de fala
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[-2].id) # garante que sairá em portugues
# Função para receber a mensagem falada
def speak(text):
engine.say(text)
engine.runAndWait()
# Fim da sintese de fala
# Reconhecimento de fala
model = Model("model")
rec = KaldiRecognizer(model, 16000)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1,
rate=16000, input=True, frames_per_buffer=2048)
stream.start_stream()
# Loop do reconhecimento de fala
while True:
data = stream.read(2048)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
result = rec.Result()
result = json.loads(result)
if result is not None:
text = result['text']
#text = str(input('Escreva: '))
if text == 'data de hoje':
speak(core.SystemInfo.get_date())
if text == 'que horas são':
speak(core.SystemInfo.get_time())
if text == 'abrir bloco de notas':
speak('Abrindo o bloco de notas')
speak(core.SystemInfo.get_Notepad())
if text == 'sair':
speak('Desligando')
exit()
if text == 'abrir terminal':
speak('Abrindo o Prompt de comando')
speak(core.SystemInfo.get_Prompt())
if text == 'abrir navegador':
speak('Abrindo o Google Chrome')
speak(core.SystemInfo.get_Chrome())
print(text)
|
"""
Tests for the basic scenarios for the scanner.
"""
import os
import sys
import tempfile
from test.pytest_execute import InProcessExecution
from project_summarizer.__main__ import main
from project_summarizer.main import ProjectSummarizer
JUNIT_COMMAND_LINE_FLAG = "--junit"
COBERTURA_COMMAND_LINE_FLAG = "--cobertura"
PUBLISH_COMMAND_LINE_FLAG = "--publish"
ONLY_CHANGES_COMMAND_LINE_FLAG = "--only-changes"
REPORT_DIRECTORY = "report"
PUBLISH_DIRECTORY = "publish"
JUNIT_RESULTS_FILE_NAME = "tests.xml"
RESULTS_SUMMARY_FILE_NAME = "test-results.json"
COVERAGE_SUMMARY_FILE_NAME = "coverage.json"
__COBERTURA_COVERAGE_FILE_NAME = "coverage.xml"
__COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME = "coverage-non-windows.xml"
def get_coverage_file_name():
"""
Get the coverage file for the specific operating system class.
This is needed as Windows uses a different file name hierarchy than the others.
"""
if sys.platform.startswith("win"):
return __COBERTURA_COVERAGE_FILE_NAME
return __COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME
class MainlineExecutor(InProcessExecution):
"""
Class to provide for a local instance of a InProcessExecution class.
"""
def __init__(self, use_module=False, use_main=False):
super().__init__()
self.__use_main = use_main
self.__entry_point = "__main.py__" if use_module else "main.py"
resource_directory = os.path.join(os.getcwd(), "test", "resources")
assert os.path.exists(resource_directory)
assert os.path.isdir(resource_directory)
self.resource_directory = resource_directory
def execute_main(self):
if self.__use_main:
main()
else:
ProjectSummarizer().main()
def get_main_name(self):
return self.__entry_point
def test_get_summarizer_version():
"""
Make sure that we can get information about the version of the summarizer.
"""
# Arrange
executor = MainlineExecutor()
suppplied_arguments = ["--version"]
expected_output = """\
main.py 0.5.0
"""
expected_error = ""
expected_return_code = 0
# Act
execute_results = executor.invoke_main(arguments=suppplied_arguments, cwd=None)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
# pylint: disable=consider-using-with
def setup_directories(
create_report_directory=True,
create_publish_directory=False,
temporary_work_directory=None,
):
"""
Setup a temporary directory, a report directory under it (created if necessary),
and the publish directory (not created by default if necessary).
"""
if not temporary_work_directory:
temporary_work_directory = tempfile.TemporaryDirectory()
report_directory = os.path.join(temporary_work_directory.name, "report")
if create_report_directory:
os.makedirs(report_directory)
publish_directory = os.path.join(temporary_work_directory.name, "publish")
if create_publish_directory:
os.makedirs(publish_directory)
return temporary_work_directory, report_directory, publish_directory
# pylint: enable=consider-using-with
|
from k5test import *
entries = ('URI _kerberos.TEST krb5srv::kkdcp:https://kdc1 1 1\n',
'URI _kerberos.TEST krb5srv::kkdcp:https://kdc3:300/path 3 1\n',
'URI _kerberos.TEST krb5srv:m:kkdcp:https://kdc2/path 2 1\n',
'URI _kerberos.TEST KRB5SRV:xMz:UDP:KDC4 4 1\n',
'URI _kerberos.TEST krb5srv:xyz:tcp:192.168.1.6 6 1\n',
'URI _kerberos.TEST krb5srv::tcp:kdc5:500 5 1\n',
'URI _kerberos.TEST krb5srv::tcp:[dead:beef:cafe:7]:700 7 1\n',
'URI _kerberos.TEST bogustag:m:kkdcp:https://bogus 8 1\n',
'URI _kerberos.TEST krb5srv:m:bogustrans:https://bogus 10 1\n',
'URI _kerberos.TEST krb5srv:m:kkdcp:bogus 11 1\n',
'URI _kerberos.TEST krb5srv:m:bogusnotrans 12 1\n')
expected = ('7 servers:',
'0: h:kdc1 t:https p:443 m:0 P:',
'1: h:kdc2 t:https p:443 m:1 P:path',
'2: h:kdc3 t:https p:300 m:0 P:path',
'3: h:KDC4 t:udp p:88 m:1 P:',
'4: h:kdc5 t:tcp p:500 m:0 P:',
'5: h:192.168.1.6 t:tcp p:88 m:0 P:',
'6: h:dead:beef:cafe:7 t:tcp p:700 m:0 P:')
conf = {'libdefaults': {'dns_lookup_kdc' : 'true'}}
realm = K5Realm(create_kdb=False, krb5_conf=conf)
hosts_filename = os.path.join(realm.testdir, 'resolv_hosts')
f = open(hosts_filename, 'w')
for line in entries:
f.write(line)
f.close()
realm.env['LD_PRELOAD'] = 'libresolv_wrapper.so'
realm.env['RESOLV_WRAPPER_HOSTS'] = hosts_filename
out = realm.run(['./t_locate_kdc', 'TEST'], env=realm.env)
l = out.splitlines()
j = 0
for i in range(4, 12):
if l[i].strip() != expected[j]:
fail('URI answers do not match')
j += 1
success('uri discovery tests')
|
from usersimulator.UserModel import GoalGenerator, UMGoal
import yaml
import random
from ontology import Ontology
import copy
class SGDGoalGenerator(GoalGenerator):
def __init__(self, dstring):
super(SGDGoalGenerator, self).__init__(dstring)
self.domain = dstring
self.goals = {}
file_name = 'usersimulator/myUserSim/goals/{}.yml'.format(self.domain)
with open(file_name, 'r') as f:
self.goals = yaml.safe_load(f)
def init_goal(self, otherDomainsConstraints, um_patience):
um_goal = UMGoal(um_patience, domainString=self.domain)
goal = self.generate_goal(um_goal)
return goal
def generate_goal(self, um_goal):
goal_name = random.choice(list(self.goals.keys()))
goal = self.goals[goal_name]
if self.domain == "Calendar": # substitute event_name with name
for slot in copy.deepcopy(goal["inform_slots"]):
if slot == "event_name":
del goal["inform_slots"][slot]
for intent in goal['goals']['intent']:
if goal['request_slots'][intent] != None:
if "event_name" in goal['request_slots'][intent]:
goal['request_slots'][intent]["name"] = "UNK"
del goal['request_slots'][intent]["event_name"]
if self.domain == "Restaurants": # substitute restaurant_name with name
for slot in copy.deepcopy(goal["inform_slots"]):
if slot == "restaurant_name":
del goal["inform_slots"][slot]
for intent in goal['goals']['intent']:
if goal['request_slots'][intent] != None:
if "restaurant_name" in goal['request_slots'][intent]:
goal['request_slots'][intent]["name"] = "UNK"
del goal['request_slots'][intent]["restaurant_name"]
um_goal.yaml_goal = goal
# constraints
for slot in goal['inform_slots']:
val = goal['inform_slots'][slot]
if type(val) == list:
val = val[0]
slot = slot.replace('_','')
if not Ontology.global_ontology.is_value_in_slot(self.domain, val, slot):
val = Ontology.global_ontology.getRandomValueForSlot(self.domain, slot=slot, nodontcare=True)
um_goal.add_const(slot=slot, value=val)
# intents
intents = []
for intent in goal['goals']['intent']:
intents.append(intent)
um_goal.add_const(slot='intent', value=intent.lower())
valid_requests = Ontology.global_ontology.getValidRequestSlotsForTask(self.domain)
#requests
for intent in intents:
if goal['request_slots'][intent] != None:
for slot in goal['request_slots'][intent]:
slot = slot.replace('_','')
if slot in valid_requests:
um_goal.requests[slot] = None
if 'name' not in um_goal.requests:
um_goal.requests['name'] = None
return um_goal
|
import sys
from pyspark.sql import SparkSession
from knn.utils import deleteHeader
if __name__ == '__main__':
if len(sys.argv) == 5 :
fileLocation = sys.argv[1]
numPartitions = int(sys.argv[2])
d = int(sys.argv[3])
k = int(sys.argv[4])
spark=SparkSession.builder.appName(sys.argv[0]).getOrCreate()
Xpredict = spark.sparkContext.textFile(fileLocation,numPartitions).mapPartitionsWithIndex(deleteHeader).map(lambda x : float(x.split(',')[1])).cache()
n = Xpredict.count()
optim=knn.KNN_Optim(Xpredict,d,k,n)
train=knn.KNN_Past(Xpredict,optim[2],optim[1],n)
n=train.count()
prediction=knn.KNN_Next(train,optim[2],optim[1],n)
print(prediction)
spark.stop()
else:
print("usagge <fileLocation><numPartitions><d><k>") |
from werkzeug.security import check_password_hash, generate_password_hash
from rent_a_car import users_collection,cars_collection, app,session_handler
from flask import jsonify, request,url_for,redirect,session
from bson.json_util import ObjectId,loads
# Ruta de Registro
@app.route("/register", methods=["GET","POST"])
def register():
if session_handler():
return jsonify({"message": "Ya te encuentras loggeado como {}".format(session["username"])})
if request.method=="POST":
username=request.json["username"]
password=request.json["password"]
found_user=users_collection.find_one({"username":username})
if not found_user:
users_collection.insert_one({"username":username,"password": generate_password_hash(password), "carro": False})
return jsonify({"message": "Usuario Añadido con exito"})
else:
return jsonify({"message": "El usuario que intentas registrar no esta disponible. Intenta denuevo"})
return jsonify({"message": "Necesitas enviar un Post Request a register para registrarte"})
#Ruta de Login
@app.route("/login", methods=["GET","POST"])
def login():
if session_handler():
return jsonify({"message": "Ya te encuentras loggeado como {}".format(session["username"])})
if request.method=="POST":
username=request.json["username"]
password=request.json["password"]
found_user=users_collection.find_one({"username":username})
if found_user!=None and check_password_hash(found_user["password"],password):
session["username"]=username
session.permantent=True
return jsonify({"message":"Logeado correctamente como {}".format(session["username"])})
return jsonify({"message":"El usuario no ha sido creado o el password es incorrecto. Verificar Credenciales"})
return jsonify({"message":"Necesitas Iniciar sesion primero mediante a un POST Request"})
#Ruta profile
@app.route("/user/<username>")
def profile(username):
if session_handler()==username:
obj=users_collection.find_one({"username":username})
output=[]
output.append({"_id":str(obj["_id"]),"username": obj["username"], "password": obj["password"], "carro":str(loads(obj["carro"]))})
return jsonify({"message": "Bienvenido a tu perfil {}".format(session["username"])},{"resultado": output})
return jsonify({"message": "Este no es tu perfil o no te has loggeado. Verifica tu informacion."})
#Listar Usuarios
@app.route("/user/list_users")
def users():
if session_handler():
objects=users_collection.find()
ouput=[]
for obj in objects:
if obj["carro"]:
deserialized_id=loads(obj["carro"])
carro=cars_collection.find_one({"_id": ObjectId(deserialized_id)})
d_carro={"_id": str(carro["_id"]), "modelo":carro["modelo"],"marca": carro["marca"],"año": carro["año"], "kilometraje": carro["kilometraje"]}
ouput.append({"username":obj["username"], "password":obj["password"], "carro": d_carro})
else:
ouput.append({"username":obj["username"], "password":obj["password"], "carro": obj["carro"]})
return jsonify({"result": ouput})
return jsonify({"message": "Necesitas Loggearte para ver la lista de usuarios primero"})
#Ruta de Logout
@app.route("/logout")
def logout():
session.pop("username",None)
return jsonify({"message":"Logout exitoso"})
|
#from random import randint
import random
import time
print('='*20, 'DESAFIO 028', '='*20)
print('Vou pensar em um número entre 0 e 5. Tente adivinhar....')
computador = random.randint(0, 5)
resposta=int(input('Em que número pensei? '))
print('PROCESSANDO.....')
time.sleep(3)
if resposta == computador:
print('Parabéns!! Você acertou!!')
else:
print('Que pena, você errou, pensei no número {}'.format(computador))
print('--FIM--')
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Time : 2021/12/2 15:49
import matplotlib.pyplot as plt
from stochastic.processes.continuous import FractionalBrownianMotion
def fbm(t_len, hurst):
f = FractionalBrownianMotion(hurst=hurst, t=t_len)
n = int(t_len) * 100
t = f.times(n)
x = f.sample(n)
return t, x
# if __name__ == "__main__":
# t1, x1 = fbm(100, 0.75)
# fig1 = plt.figure(1)
# plt.plot(t1, x1)
# plt.xlabel("t")
# plt.ylabel("x")
# fig1.savefig("../figures/fbm1.png")
#
# t2, x2 = fbm(100, 0.45)
# fig2 = plt.figure(2)
# plt.plot(t2, x2)
# plt.xlabel("t")
# plt.ylabel("x")
# fig2.savefig("../figures/fbm2.png")
|
from django.db import models
class Journalist(models.Model):
first_name = models.CharField(max_length=31)
last_name = models.CharField(max_length=31)
bio = models.TextField(blank=True)
def __unicode__(self):
return "{} {}".format(self.first_name, self.last_name)
def __str__(self):
return "ID: {} - {} {}".format(self.id, self.first_name, self.last_name)
class Article(models.Model):
author = models.ForeignKey(Journalist, on_delete=models.CASCADE, related_name='articles')
title = models.CharField(max_length=255)
description = models.TextField()
body = models.TextField()
location = models.CharField(max_length=255)
publication_date = models.DateField()
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "{} - {}".format(self.author, self.title)
def __str__(self):
return "{} - {}".format(self.author, self.title)
|
from django.contrib import admin
from import_export import fields, resources
from import_export.admin import ExportMixin
from remo.featuredrep.models import FeaturedRep
class FeaturedRepResource(resources.ModelResource):
user = fields.Field()
class Meta:
model = FeaturedRep
def dehydrate_users(self, featuredrep):
return featuredrep.users.all()
class FeaturedRepAdmin(ExportMixin, admin.ModelAdmin):
resource_class = FeaturedRepResource
model = FeaturedRep
list_display = ('featured_rep_users', 'created_on')
search_fields = ['users__first_name', 'users__last_name']
def featured_rep_users(self, obj):
return ', '.join([user.get_full_name() for user in obj.users.all()])
def save_model(self, request, obj, form, change):
super(FeaturedRepAdmin, self).save_model(request, obj, form, change)
featured = FeaturedRep.objects.get(pk=obj.pk)
if (form.is_valid() and 'updated_on' in form.changed_data
and form.cleaned_data['updated_on'] != featured.updated_on):
obj.save(**{'updated_on': form.cleaned_data['updated_on']})
admin.site.register(FeaturedRep, FeaturedRepAdmin)
|
from pyspark import SparkContext
import re
import sys
def main():
# Insure a search term was supplied at the command line
if len(sys.argv) != 2:
sys.stderr.write('Usage: {} <search_term>'.format(sys.argv[0]))
sys.exit()
# Create the SparkContext
sc = SparkContext(appName='SparkWordCount')
# Broadcast the requested term
requested_movie = sc.broadcast(sys.argv[1])
# Load the input file
source_file = sc.textFile('/user/hduser/input/movies')
# Get the movie title from the second fields
titles = source_file.map(lambda line: line.split('|')[1])
# Create a map of the normalized title to the raw title
normalized_title = titles.map(lambda title: (re.sub(r'\s*\(\d{4}\)','', title).lower(), title))
# Find all movies matching the requested_movie
matches = normalized_title.filter(lambda x: requested_movie.value in x[0])
# Collect all the matching titles
matching_titles = matches.map(lambda x: x[1]).distinct().collect()
# Display the result
print '{} Matching titles found:'.format(len(matching_titles))
for title in matching_titles:
print title
sc.stop()
if __name__ == '__main__':
main() |
def solution(arr1, arr2):
return [[c + d for c, d in zip(a, b)] for a, b in zip(arr1, arr2)]
|
from .Provider import Provider
from .MappingProvider import MappingProvider, PropMappings
from .DefaultFileProvider import DefaultFileProvider
|
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='scrapy-statsd-middleware',
version='0.0.8',
description='Statsd integration middleware for scrapy',
long_description=readme,
author='Zach Goldstein',
author_email='zachgold@gmail.com',
url='https://github.com/zachgoldstein/scrapy-statsd',
license='Apache 2.0',
packages=['scrapy_statsd_middleware'],
install_requires=[
'Scrapy>=1.0.5',
'statsd==3.2.1'
],
extras_require={
'test': ['mock==2.0.0'],
}
)
|
from labrad.gpib import GPIBDeviceWrapper
from twisted.internet.defer import inlineCallbacks, returnValue
class Agilent33210AWrapper(GPIBDeviceWrapper):
# GENERAL
@inlineCallbacks
def reset(self):
yield self.write('*RST')
@inlineCallbacks
def toggle(self, status):
# setter
if status is not None:
yield self.write('OUTP {:d}'.format(status))
# getter
resp = yield self.query('OUTP?')
resp = bool(int(resp))
returnValue(resp)
# WAVEFORM
@inlineCallbacks
def function(self, shape):
if shape:
shape = shape.upper()
if shape in ("SIN", "SQU", "RAMP", "PULS", "NOIS", "DC"):
yield self.write('FUNC {:s}'.format(shape))
else:
raise Exception('Error: invalid input. Shape must be one of (SIN, SQU, RAMP, PULS, NOIS, DC).')
resp = yield self.query('FUNC?')
returnValue(resp)
@inlineCallbacks
def frequency(self, freq):
# setter
if freq:
if (freq < 1e7) and (freq > 1e-3):
yield self.write('FREQ {:f}'.format(freq))
else:
raise Exception('Error: invalid input. Frequency must be in range [1mHz, 10MHz].')
# getter
resp = yield self.query('FREQ?')
returnValue(float(resp))
@inlineCallbacks
def amplitude(self, ampl):
# setter
if ampl:
if (ampl < 1e1) and (ampl > 1e-2):
yield self.write('VOLT {:f}'.format(ampl))
else:
raise Exception('Error: invalid input. Amplitude must be in range [1e-2 Vpp, 1e1 Vpp].')
# getter
resp = yield self.query('VOLT?')
returnValue(float(resp))
@inlineCallbacks
def offset(self, off):
# setter
if off:
if (off < 1e1) and (off > 1e-2):
yield self.write('VOLT:OFFS {:f}'.format(off))
else:
raise Exception('Error: invalid input. Amplitude offset must be in range [-1e1 Vpp, 1e1 Vpp].')
# getter
resp = yield self.query('VOLT:OFFS?')
returnValue(float(resp))
# MODULATION
# todo
# SWEEP
# todo
|
# Copyright 2013-2014 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import tablib
import json
import sys
from BanzaiDB import errors
def convert_from_JSON_to_CSV(json_data, header=False):
"""
Converts a single JSON element to CSV
.. note:: this will not handle nested JSON. Will need to used something
like https://github.com/evidens/json2csv to achieve this
:param json_data: the JSON
:param header: [optional] include the and return the header
"""
json_str = json.dumps(json_data)
data = tablib.Dataset()
data.json = '['+json_str+']'
tmp = data.csv.split('\n')
if tmp[1].find('}') != -1:
raise errors.NestedJSONError(data.json)
if tmp[0] and tmp[1] == '':
raise errors.CouldNotParseJSONError(data.json)
if header:
return tmp[0].rstrip()+"\n"+tmp[1].rstrip()
else:
return tmp[1].rstrip()
def convert_from_csv_to_JSON(csv_data, header=False):
"""
Converts from CSV to JSON
NotImplemented yet!
:param json_data: csv data
:param header: [optional]
"""
sys.stderr.write("NotImplemented yet!")
sys.exit(1)
|
"""
This script provides tools for creating and updating resources.
Here, a resource is anything that has below properties:
1) it is used by the Flask application;
2) it should not be added to Git repository.
For example, a resource can be SQLite database or a Markdown
file with content that is very close to that of a Jupyter note.
Author: Nikolay Lysenko
"""
import sqlite3
import os
from collections import defaultdict
from typing import List, Dict, Any, Optional
from contextlib import closing
from readingbricks import utils, settings
class DatabaseCreator:
"""
Creator of SQLite database mapping a tag to a list of notes.
Namely, tables from the database represent tags and rows of a table
represent notes tagged with the corresponding tag.
:param path_to_ipynb_notes:
path to directory where Jupyter files with notes are located
:param path_to_db:
path to SQLite database; if this file already exists, it will
be overwritten, else the file will be created
"""
def __init__(self, path_to_ipynb_notes: str, path_to_db: str):
"""Initialize an instance."""
self.__path_to_ipynb_notes = path_to_ipynb_notes
self.__path_to_db = path_to_db
@staticmethod
def __update_mapping_of_tags_to_notes(
tag_to_notes: defaultdict,
cell: Dict[str, Any]
) -> defaultdict:
# Store cell header in lists that relates to its tags.
cell_header = cell['source'][0].rstrip('\n')
cell_header = cell_header.lstrip('## ')
cell_tags = cell['metadata']['tags'] + ['all_notes']
for tag in cell_tags:
tag_to_notes[tag].append(cell_header)
return tag_to_notes
def __write_tag_to_notes_mapping_to_db(
self,
tag_to_notes: defaultdict
) -> None:
# Write content of `tag_to_notes` to the target DB.
with closing(sqlite3.connect(self.__path_to_db)) as conn:
with utils.open_transaction(conn) as cur:
for k, v in tag_to_notes.items():
cur.execute(
f"CREATE TABLE IF NOT EXISTS {k} (note_id VARCHAR)"
)
cur.execute(
f"""
CREATE UNIQUE INDEX IF NOT EXISTS
{k}_index
ON
{k} (note_id)
"""
)
cur.execute(
f"DELETE FROM {k}"
)
for note_title in v:
cur.execute(
f"INSERT INTO {k} (note_id) VALUES (?)",
(utils.compress(note_title),)
)
with closing(conn.cursor()) as cur:
cur.execute('VACUUM')
def create_or_update_db(self) -> None:
"""
Create SQLite database if it does not exist or update it else.
:return:
None
"""
tag_to_notes = defaultdict(lambda: [])
for cell in utils.extract_cells(self.__path_to_ipynb_notes):
tag_to_notes = self.__update_mapping_of_tags_to_notes(
tag_to_notes, cell
)
self.__write_tag_to_notes_mapping_to_db(tag_to_notes)
class MarkdownDirectoryCreator:
"""
Converter of notes in Jupyter format to Markdown.
Each note is stored in a separate file within
a specified directory.
Also instances of the class can remove files that correspond to
removed or renamed notes.
:param path_to_ipynb_notes:
path to directory where Jupyter files with notes are located
:param path_to_markdown_notes:
path to directory where Markdown files created based on
Jupyter files should be located
"""
def __init__(self, path_to_ipynb_notes: str, path_to_markdown_notes: str):
"""Initialize an instance."""
self.__path_to_ipynb_notes = path_to_ipynb_notes
self.__path_to_markdown_notes = path_to_markdown_notes
def __provide_empty_directory(self) -> None: # pragma: no cover
# Make directory for Markdown files if it does not exist
# and delete all files from there if it is not empty.
if not os.path.isdir(self.__path_to_markdown_notes):
os.mkdir(self.__path_to_markdown_notes)
for file_name in os.listdir(self.__path_to_markdown_notes):
file_name = os.path.join(self.__path_to_markdown_notes, file_name)
if os.path.isfile(file_name):
os.unlink(file_name)
@staticmethod
def __insert_blank_line_before_each_list(content: List[str]) -> List[str]:
# Insert blank line before each Markdown list when it is needed
# for Misaka parser.
list_markers = ['* ', '- ', '+ ', '1. ']
result = []
for first, second in zip(content, content[1:]):
result.append(first)
if any([second.startswith(x) for x in list_markers]) and first:
result.append('')
result.append(content[-1])
return result
def __copy_cell_content_to_markdown_file(
self,
cell: Dict[str, Any],
) -> None:
# Extract content of cell and save it as Markdown file in the
# specified directory.
content = [line.rstrip('\n') for line in cell['source']]
content = self.__insert_blank_line_before_each_list(content)
note_title = content[0].lstrip('## ')
file_name = utils.compress(note_title)
file_path = (
os.path.join(self.__path_to_markdown_notes, file_name) + '.md'
)
with open(file_path, 'w') as destination_file:
for line in content:
destination_file.write(line + '\n')
def create_or_update_directory_with_markdown_notes(self) -> None:
"""
Manage directory with Markdown notes.
Delete previous editions of notes in Markdown if there are any
and create the ones based on the current editions of files from
directory with notes.
:return:
None
"""
self.__provide_empty_directory()
for cell in utils.extract_cells(self.__path_to_ipynb_notes):
self.__copy_cell_content_to_markdown_file(cell)
def provide_resources(
ipynb_path: Optional[str] = None,
markdown_path: Optional[str] = None,
db_path: Optional[str] = None
) -> None:
"""
Create or update all resources.
:return:
None
"""
ipynb_path = ipynb_path or settings.get_path_to_ipynb_notes()
markdown_path = markdown_path or settings.get_path_to_markdown_notes()
db_path = db_path or settings.get_path_to_db()
md_creator = MarkdownDirectoryCreator(ipynb_path, markdown_path)
md_creator.create_or_update_directory_with_markdown_notes()
db_creator = DatabaseCreator(ipynb_path, db_path)
db_creator.create_or_update_db()
if __name__ == '__main__':
provide_resources()
|
from typing import List, Optional, Tuple
import matplotlib as mpl
mpl.use("Agg")
from theseus.opt import Opts
import os
import cv2
import torch
import numpy as np
from theseus.opt import Config
from theseus.cps.models import MODEL_REGISTRY
from theseus.cps.augmentations import TRANSFORM_REGISTRY
from theseus.cps.datasets import DATASET_REGISTRY, DATALOADER_REGISTRY
from theseus.utilities.loading import load_state_dict
from theseus.utilities.loggers import LoggerObserver, StdoutLogger
from theseus.utilities.cuda import get_devices_info
from theseus.utilities.getter import get_instance, get_instance_recursively
from theseus.utilities.visualization.visualizer import Visualizer
from .ensembler import SemanticEnsembler
MODEL_REGISTRY.register(SemanticEnsembler)
class TestPipeline(object):
def __init__(self, opt: Config):
super(TestPipeline, self).__init__()
self.opt = opt
self.debug = opt["global"]["debug"]
self.logger = LoggerObserver.getLogger("main")
self.savedir = opt["global"]["save_dir"]
os.makedirs(self.savedir, exist_ok=True)
stdout_logger = StdoutLogger(__name__, self.savedir, debug=self.debug)
self.logger.subscribe(stdout_logger)
self.logger.text(self.opt, level=LoggerObserver.INFO)
self.transform_cfg = Config.load_yaml(opt["global"]["cfg_transform"])
self.device_name = opt["global"]["device"]
self.device = torch.device(self.device_name)
self.weights = opt["global"]["weights"]
self.transform = get_instance_recursively(
self.transform_cfg, registry=TRANSFORM_REGISTRY
)
self.dataset = get_instance(
opt["data"]["dataset"],
registry=DATASET_REGISTRY,
transform=self.transform["val"],
)
CLASSNAMES = self.dataset.classnames
self.dataloader = get_instance(
opt["data"]["dataloader"],
registry=DATALOADER_REGISTRY,
dataset=self.dataset,
)
self.model = get_instance_recursively(
self.opt["ensembler"],
registry=MODEL_REGISTRY,
classnames=CLASSNAMES,
num_classes=len(CLASSNAMES),
).to(self.device)
if self.weights[0] is not None:
for i, weight_path in enumerate(self.weights):
state_dict = torch.load(weight_path, map_location=self.device)
self.model.models[i] = load_state_dict(
self.model.models[i], state_dict, "model"
)
def infocheck(self):
device_info = get_devices_info(self.device_name)
self.logger.text("Using " + device_info, level=LoggerObserver.INFO)
self.logger.text(
f"Number of test sample: {len(self.dataset)}", level=LoggerObserver.INFO
)
self.logger.text(
f"Everything will be saved to {self.savedir}", level=LoggerObserver.INFO
)
@torch.no_grad()
def inference(self):
self.infocheck()
self.logger.text("Inferencing...", level=LoggerObserver.INFO)
visualizer = Visualizer()
self.model.eval()
saved_mask_dir = os.path.join(self.savedir, "masks")
saved_overlay_dir = os.path.join(self.savedir, "overlays")
os.makedirs(saved_mask_dir, exist_ok=True)
os.makedirs(saved_overlay_dir, exist_ok=True)
for idx, batch in enumerate(self.dataloader):
inputs = batch["inputs"]
img_names = batch["img_names"]
ori_sizes = batch["ori_sizes"]
outputs = self.model.get_prediction(batch, self.device)
preds = outputs["masks"]
for (input, pred, filename, ori_size) in zip(
inputs, preds, img_names, ori_sizes
):
decode_pred = visualizer.decode_segmap(pred)[:, :, ::-1]
resized_decode_mask = cv2.resize(decode_pred, dsize=tuple(ori_size))
# Save mask
savepath = os.path.join(saved_mask_dir, filename)
cv2.imwrite(savepath, resized_decode_mask)
# Save overlay
raw_image = visualizer.denormalize(input)
raw_image = (raw_image * 255).astype(np.uint8)
ori_image = cv2.resize(raw_image, dsize=tuple(ori_size))
ori_image = cv2.cvtColor(ori_image, cv2.COLOR_RGB2BGR)
overlay = ori_image * 0.7 + resized_decode_mask * 0.3
savepath = os.path.join(saved_overlay_dir, filename)
cv2.imwrite(savepath, overlay)
self.logger.text(f"Save image at {savepath}", level=LoggerObserver.INFO)
if __name__ == "__main__":
opts = Opts().parse_args()
val_pipeline = TestPipeline(opts)
val_pipeline.inference()
|
import device_comms
from device_comms import *
position = 0
def set_VCO(link, varactor_voltage, power_state):
max_supply = 5.0
send_size = 0
send_size = add_float(link, send_size, varactor_voltage)
send_size = add_float(link, send_size, power_state)
link.send(send_size, packet_id=0)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
def start_amplifier(link):
max_supply = 5.0
send_size = 0
send_size = add_float(link, send_size, 0)
link.send(send_size, packet_id=1)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
def sample_turbidity(link):
send_size = 0
send_size = add_float(link, send_size, 0)
link.send(send_size, packet_id=1)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def move_relative(link, direction, distance):
send_size = 0
send_size = add_float(link, send_size, direction)
send_size = add_float(link, send_size, distance)
link.send(send_size, packet_id=2)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def home(link):
send_size = 0
send_size = add_float(link, send_size, 0)
link.send(send_size, packet_id=3)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
def move_relative(link, direction, distance):
send_size = 0
send_size = add_float(link, send_size, direction)
send_size = add_float(link, send_size, distance)
link.send(send_size, packet_id=2)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def move_absolute(link, position):
send_size = 0
send_size = add_float(link, send_size, position)
link.send(send_size, packet_id=4)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def move_to_cuvette(link, id, function):
send_size = 0
send_size = add_float(link, send_size, id)
send_size = add_float(link, send_size, function)
link.send(send_size, packet_id=5)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def LO_power(link, power):
send_size = 0
send_size = add_float(link, send_size, power)
link.send(send_size, packet_id=6)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def LO_tune(link, val):
send_size = 0
send_size = add_float(link, send_size, val)
link.send(send_size, packet_id=7)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def varactor_voltage_sweep(link, VCO_supply, start_voltage, end_voltage, step, delay):
for i in np.arange(start_voltage, end_voltage, step):
set_VCO(link, 0, i, VCO_supply, 1)
print(f"Sweeping through {i} V")
sleep(delay)
def pulse(link, duration):
send_size = 0
send_size = add_float(link, send_size, duration)
link.send(send_size, packet_id=8)
wait_for_response(link)
pos = 0
val, pos = rx_float(link, pos)
clear_buffers(link)
return val
def get_temperatures(link):
send_size = 0
send_size = add_float(link, send_size, duration)
link.send(send_size, packet_id=9)
wait_for_response(link)
pos = 0
ambient, pos = rx_float(link, pos)
max, pos = rx_float(link, pos)
min, pos = rx_float(link, pos)
clear_buffers(link)
return ambient, max, min
def read_multimeter():
multimeter.write("val?\r\n".encode())
val = float(multimeter.readline())
return val
def measure_spectrum(link, voltage_list, averages, amplifier_supply):
data = np.zeros_like(voltage_list)
for a in range(0, averages):
for idx, v in enumerate(voltage_list):
print(f"Capturing spectrum: {v}")
set_VCO(link, 0, v, amplifier_supply, 1)
read_multimeter()
data[idx] += read_multimeter()
data /= averages
return data
|
from django.core.management.base import BaseCommand, AppCommand
from optparse import make_option
from django.contrib.gis.utils import LayerMapping
from django.contrib.gis.gdal import DataSource
from lingcod.studyregion.models import StudyRegion
class Command(BaseCommand):
option_list = AppCommand.option_list + (
make_option('--name', action='store', dest='region_name', default=False,
help='Give a name to the study region, otherwise the name attribute from the shapefile will be used.'),
)
help = "Creates a new study region from a shapefile containing a single multigeometry"
args = '[shapefile]'
def handle(self, shapefile, *args, **options):
ds = DataSource(shapefile)
if len(ds) != 1:
raise Exception("Data source should only contain a single layer. Aborting.")
layer = ds[0]
if len(layer) != 1:
raise Exception("Layer should containing ONLY a single feature")
if not 'polygon' in layer.geom_type.name.lower():
print layer.geom_type.name
raise Exception("Study region must be a multigeometry")
if options.get('region_name'):
mapping = {
'geometry': 'MULTIPOLYGON',
}
else:
mapping = {
'geometry': 'MULTIPOLYGON',
'name': 'name',
}
lm = LayerMapping(StudyRegion, shapefile, mapping, transform=False)
lm.save()
study_region = StudyRegion.objects.order_by('-creation_date')[0]
if options.get('region_name'):
study_region.name = options.get('region_name')
study_region.save()
print ""
print "Study region created: %s, primary key = %s" % (study_region.name, study_region.pk)
print "To switch to this study region, you will need to run 'python manage.py change_study_region %s'" % (study_region.pk, )
print ""
|
import pytest
from hls4ml_ipbb import Port, IOType, VHDLStdLogic, VHDLStdLogicVector
from hls4ml_ipbb import PortPurpose, ValueType
@pytest.mark.parametrize('name', ['test1', 'test2', 'test3'])
def test_port_name_returns_correct_name(name):
port = Port(name=name, io_type=IOType.INPUT, value_type=VHDLStdLogic())
assert port.name == name
@pytest.mark.parametrize('io_type', list(IOType))
def test_port_io_type_returns_correct_io_type(io_type):
port = Port(name='test', io_type=io_type, value_type=VHDLStdLogic())
assert port.io_type == io_type
@pytest.mark.parametrize('value_type', [VHDLStdLogic(), VHDLStdLogicVector(10)])
def test_port_value_type_returns_correct_value_type(value_type):
port = Port(name='test', io_type=IOType.INPUT, value_type=value_type)
assert port.value_type == value_type
@pytest.mark.parametrize('name,expected', [('test_input_1', PortPurpose.NET_IN),
('abc_abc_in_121', PortPurpose.OTHER),
('12_out_z', PortPurpose.NET_OUT),
('fc1_input_V', PortPurpose.NET_IN),
('fmgjiomc94', PortPurpose.OTHER),
('fc1_input_V_ap_vld',
PortPurpose.NET_IN_AP_VLD),
('abc_out_28', PortPurpose.NET_OUT),
('layer13_out_3_V',
PortPurpose.NET_OUT),
('layer13_out_0_V_ap_vld',
PortPurpose.NET_OUT_AP_VLD),
('const_1', PortPurpose.CONST),
('const_size_in_1',
PortPurpose.CONST),
('const_size_out_1',
PortPurpose.CONST),
('ap_vld', PortPurpose.OTHER)])
def test_port_purpose_returns_correct_purpose(name, expected):
port = Port(name=name, io_type=IOType.INPUT, value_type=VHDLStdLogic())
assert port.purpose == expected
def test_valuetype_convert_returns_vhdl_std_logic():
assert isinstance(ValueType.convert('std_logic'), VHDLStdLogic)
assert isinstance(ValueType.convert('STD_logic'), VHDLStdLogic)
assert isinstance(ValueType.convert('STD_LOGIC'), VHDLStdLogic)
@pytest.mark.parametrize('name,expected_size',
[('std_logic_vector(10 downto 0)', 11),
('std_logic_vector(2292 downto 0)', 2293),
('STD_LOGIC_VECTOR (1 DOWNto 0)', 2),
('STD_LOGIC_VECTOR (7 DOWNTO 0)', 8),
('std_logic_vector (0 to 88)', 89),
('STD_Logic_Vector(0 to 100)', 101),
('STD_LOGIC_VECTOR\t(0 to 204)', 205),
('std_logic_vector(0 to 9)', 10)])
def test_valuetype_convert_returns_vhdl_std_logic_vector(name, expected_size):
instance = ValueType.convert(name)
assert isinstance(instance, VHDLStdLogicVector)
assert len(instance) == expected_size
@pytest.mark.parametrize('name', ['ajoijmo', 'std_logic2', 'std_logic;',
'std_logic_vector(2 to 1)',
'STD_LOGIC_VECTOR (89 downto 2)'])
def test_valuetype_convert_returns_none_for_invalid_str(name):
assert ValueType.convert(name) is None
|
"""
AASIST
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import os
if __name__ == "__main__":
cmd = "curl -o ./LA.zip -# https://datashare.ed.ac.uk/bitstream/handle/10283/3336/LA.zip\?sequence\=3\&isAllowed\=y"
os.system(cmd)
cmd = "unzip LA.zip"
os.system(cmd) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The module file for ios_snmp_server
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
author:
- Sagar Paul (@KB-perByte)
description:
- This module provides declarative management of SNMP server on Cisco IOS devices.
module: ios_snmp_server
notes:
- Tested against Cisco IOSv Version 15.6.
- This module works with connection C(network_cli).
options:
config:
description: A dictionary of SNMP server configuration
suboptions:
accounting:
description: SNMP Accounting parameters
suboptions:
command:
description: For SNMP set commands
type: str
type: dict
cache:
description: Enable SNMP cache and MIB expiry interval
type: int
chassis_id:
description: String to uniquely identify this chassis (Hexadecimal)
type: str
communities:
description: Community name configuration.
elements: dict
suboptions:
acl_v4:
description: standard access-list name
type: str
acl_v6:
description: IPv6 access list name
type: str
name:
description: Community name (default RO)
type: str
ro:
description: Only reads are permitted
type: bool
rw:
description: Read-write access
type: bool
view:
description: MIB view name
type: str
type: list
contact:
description: Text for mib object sysContact
type: str
context:
description: Create/Delete a context apart from default
elements: str
type: list
drop:
description: Silently drop SNMP packets
suboptions:
unknown_user:
description: Silently drop unknown v3 user packets
type: bool
vrf_traffic:
description: Silently drop SNMP packets that come on VRF interfaces
type: bool
type: dict
engine_id:
description: Configure a local or remote SNMPv3 engineID
elements: dict
suboptions:
id:
description: engine ID octet string
type: str
local:
description: Local SNMP agent
type: bool
remote:
description: Remote SNMP agent
suboptions:
host:
description: Hostname or IP address of remote SNMP notification host
type: str
udp_port:
description: The remote SNMP notification host's UDP port number.
type: int
vrf:
description: The remote notification host's VPN routing instance
type: str
type: dict
type: list
file_transfer:
description: File transfer related commands
suboptions:
access_group:
description: Access control for file transfers
type: str
protocol:
description: Access control protocol for file transfers
type: list
elements: str
type: dict
groups:
description: Define a User Security Model group
elements: dict
suboptions:
context:
description: Specify a context to associate with the group
type: str
version_option:
choices:
- auth
- noauth
- priv
description: community name to the host.
type: str
group:
description: SNMP group for the user
type: str
notify:
description: View to restrict notifications
type: str
read:
description: View to restrict read access
type: str
version:
choices:
- v1
- v3
- v2c
description: snmp security group version
type: str
write:
description: View to restrict write access
type: str
acl_v4:
description: specify an access-list associated with this group
type: str
acl_v6:
description: specify an access-list associated with this group
type: str
type: list
hosts:
description: Specify hosts to receive SNMP notifications
elements: dict
suboptions:
host:
description: Hostname or IP address of SNMP notification host.
type: str
informs:
description: Use SNMP inform messages.
type: bool
community_string:
description: SNMPv1/v2c community string or SNMPv3 user name
type: str
traps:
description: Use SNMP trap messages
type: list
elements: str
version:
choices:
- '1'
- 2c
- '3'
description: Notification message SNMP version.
type: str
version_option:
choices:
- auth
- noauth
- priv
description: community name to the host.
type: str
vrf:
description: Specify the VRF in which the host is configured
type: str
type: list
if_index:
description: Enable ifindex persistence
type: bool
inform:
description: Configure SNMP Informs options
suboptions:
pending:
description: Set number of unacked informs to hold
type: int
retries:
description: Set retry count for informs
type: int
timeout:
description: Set timeout for informs
type: int
type: dict
ip:
description: IP ToS configuration for SNMP traffic
suboptions:
dscp:
description: IP DSCP value for SNMP traffic
type: int
precedence:
description: IP Precedence value for SNMP traffic
type: int
type: dict
location:
description: Text for mib object sysLocation
type: str
manager:
description: Modify SNMP manager parameters
type: int
packet_size:
description: Largest SNMP packet size
type: int
password_policy:
description: SNMP v3 users password policy
elements: dict
suboptions:
change:
description: Number of Character changes b/w old and new password
type: int
digits:
description: Number of digits
type: int
lower_case:
description: Number of lower-case characters
type: int
max_len:
description: Maximum password length
type: int
min_len:
description: Minimum password length
type: int
policy_name:
description: Name of the policy
type: str
special_char:
description: Number of special case character
type: int
upper_case:
description: Number of upper-case characters
type: int
username:
description: Name of the user
type: str
type: list
queue_length:
description: Message queue length for each TRAP host
type: int
source_interface:
description: Source interface to be used for sending out SNMP notifications.
type: str
system_shutdown:
description: Enable use of the SNMP reload command
type: bool
trap_source:
description: Assign an interface for the source address of all traps
type: str
trap_timeout:
description: Set timeout for TRAP message retransmissions
type: int
traps:
description: Enable SNMP Traps
suboptions:
auth_framework:
description: Enable SNMP CISCO-AUTH-FRAMEWORK-MIB traps
suboptions:
sec_violation:
description: Mode sec_violation
type: bool
enable:
description: Enable/disable auth framework
type: bool
type: dict
bfd:
description: Allow SNMP BFD traps
suboptions:
enable:
description: Enable/disable bfd
type: bool
session_down:
description: Enable BFD session down traps
type: bool
session_up:
description: Enable BFD session up traps
type: bool
type: dict
bgp:
description: Allow bgp traps
suboptions:
cbgp2:
description: Enable BGP MIBv2 traps
type: bool
enable:
description: Enable/disable bgp traps
type: bool
state_changes:
description: Traps for FSM state changes
suboptions:
all:
description: CISCO specific trap for all fsm state changes
type: bool
backward_trans:
description: CISCO specific trap for backward transition
type: bool
limited:
description: Trap for standard backward transition and established
type: bool
enable:
description: Enable/disable bgp state_changes traps
type: bool
type: dict
threshold:
description: Mode threshold
suboptions:
prefix:
description: Enable/disable bgp threshold traps
type: bool
type: dict
type: dict
bridge:
description: Allow bridge related traps
suboptions:
newroot:
description: Enable SNMP STP Bridge MIB newroot traps
type: bool
enable:
description: Enable/disable bridge traps
type: bool
topologychange:
description: Enable SNMP STP Bridge MIB topologychange traps
type: bool
type: dict
casa:
description: Enable SNMP config casa traps
type: bool
cef:
description: Allow cef related traps
suboptions:
inconsistency:
description: Enable SNMP CEF Inconsistency traps
type: bool
peer_fib_state_change:
description: Enable SNMP CEF Peer FIB State change traps
type: bool
peer_state_change:
description: Enable SNMP CEF Peer state change traps
type: bool
resource_failure:
description: Enable SNMP CEF Resource Failure traps
type: bool
enable:
description: Enable/disable cef traps
type: bool
type: dict
cnpd:
description: Enable SNMP cnpd traps
type: bool
config:
description: Enable SNMP config traps
type: bool
config_copy:
description: Enable SNMP config copy traps
type: bool
config_ctid:
description: Enable SNMP config ctid traps
type: bool
cpu:
description: Allow CPU related traps
suboptions:
enable:
description: Enable/disable cpu traps
type: bool
threshold:
description: Mode threshold
type: bool
type: dict
dhcp:
description: Enable SNMP dhcp traps
type: bool
dlsw:
description: Allow dlsw related traps
suboptions:
circuit:
description: Enable SNMP dlsw circuit traps
type: bool
enable:
description: Enable/disable cef traps
type: bool
tconn:
description: Enable SNMP dlsw peer transport connection traps
type: bool
type: dict
eigrp:
description: Enable SNMP eigrp traps
type: bool
entity:
description: Enable SNMP entity traps
type: bool
energywise:
description: Enable SNMP energywise traps
type: bool
ethernet:
description: Allow ethernet traps
suboptions:
cfm:
description: Enable SNMP Ethernet CFM traps
suboptions:
alarm:
description: Enable SNMP Ethernet CFM fault alarm trap
type: bool
cc:
description: Enable SNMP Ethernet CC trap
type: dict
suboptions:
config:
description: Enable SNMP Ethernet CFM configuration error traps
type: bool
cross_connect:
description: Enable SNMP Ethernet CFM cross-connect traps
type: bool
loop:
description: Enable SNMP Ethernet CFM loop traps
type: bool
mep_down:
description: Enable SNMP Ethernet CFM CC Down traps
type: bool
mep_up:
description: Enable SNMP Ethernet CFM CC Up traps
type: bool
crosscheck:
description: Enable SNMP Ethernet CC crosscheck trap
type: dict
suboptions:
mep_missing:
description: Enable SNMP Ethernet CC crosscheck missing trap
type: bool
mep_unknown:
description: Enable SNMP Ethernet CC crosscheck unknown traps
type: bool
service_up:
description: Enable SNMP Ethernet CC crosscheck service traps
type: bool
type: dict
evc:
description: Enable SNMP Ethernet EVC traps
suboptions:
create:
description: Enable SNMP Ethernet EVC create traps
type: bool
delete:
description: Enable SNMP Ethernet EVC delete traps
type: bool
status:
description: Enable SNMP Ethernet EVC status traps
type: bool
type: dict
type: dict
event_manager:
description: Enable SNMP event-manager traps
type: bool
flowmon:
description: Enable SNMP flowmon traps
type: bool
firewall:
description: Enable SNMP firewall traps
suboptions:
enable:
description: Enable/disable firewall traps
type: bool
serverstatus:
description: Enable firewall server status change trap
type: bool
type: dict
frame_relay:
description: Allow frame-relay traps
suboptions:
enable:
description: Enable/disable frame-relay traps
type: bool
subif:
description: Enable SNMP frame-relay subinterface traps
suboptions:
count:
description: Maximum number of traps sent per interval
type: int
interval:
description: Interval duration in which to limit the number of traps sent
type: int
enable:
description: Enable/disable subif traps
type: bool
type: dict
type: dict
fru_ctrl:
description: Enable SNMP fru-ctrl traps
type: bool
hsrp:
description: Enable SNMP hsrp traps
type: bool
ike:
description: Allow ike traps
suboptions:
policy:
description: Enable IKE Policy traps
suboptions:
add:
description: Enable IKE Policy add trap
type: bool
delete:
description: Enable IKE Policy delete trap
type: bool
type: dict
tunnel:
description: Enable IKE Tunnel traps
suboptions:
start:
description: Enable IKE Tunnel start trap
type: bool
stop:
description: Enable IKE Tunnel stop trap
type: bool
type: dict
type: dict
ipmulticast:
description: Enable SNMP ip multi cast traps
type: bool
ipsec:
description: Allow ike traps
suboptions:
cryptomap:
description: Enable IPsec Cryptomap traps
suboptions:
add:
description: Enable IPsec Cryptomap add trap
type: bool
attach:
description: Enable IPsec Cryptomap Attach trap
type: bool
delete:
description: Enable IPsec Cryptomap delete trap
type: bool
detach:
description: Enable IPsec Cryptomap Detach trap
type: bool
type: dict
too_many_sas:
description: Enable IPsec Tunnel Start trap
type: bool
tunnel:
description: Enable IPsec Tunnel traps
suboptions:
start:
description: Enable IPsec Tunnel start trap
type: bool
stop:
description: Enable IPsec Tunnel stop trap
type: bool
type: dict
type: dict
ipsla:
description: Enable SNMP ipsla traps
type: bool
l2tun:
description: Allow SNMP l2tun traps
suboptions:
pseudowire_status:
description: Enable BFD pseudo wire status traps
type: bool
session:
description: Enable BFD session traps
type: bool
type: dict
msdp:
description: Enable SNMP msdp traps
type: bool
mvpn:
description: Enable SNMP mvpn traps
type: bool
mpls_vpn:
description: Enable SNMP mpls traps
type: bool
ospf:
description: Allow ospf related traps
suboptions:
cisco_specific:
description: Cisco specific traps
suboptions:
error:
description: error traps
type: bool
lsa:
description: Lsa related traps
type: bool
retransmit:
description: Packet retransmit traps
type: bool
state_change:
description: state change traps
suboptions:
nssa_trans_change:
description: Nssa translator state changes
type: bool
shamlink:
description: Config mismatch errors on virtual interfaces
suboptions:
interface:
description: Sham link interface state changes
type: bool
neighbor:
description: Sham link neighbor state changes
type: bool
type: dict
type: dict
type: dict
error:
description: Enable error traps
type: bool
retransmit:
description: Enable/disable ospf retransmit traps
type: bool
lsa:
description: Enable/disable ospf lsa traps
type: bool
state_change:
description: Enable/disable state change traps
type: bool
type: dict
pim:
description: Allow PIM traps
suboptions:
invalid_pim_message:
description: Enable invalid pim message trap
type: bool
neighbor_change:
description: Enable neighbor change trap
type: bool
rp_mapping_change:
description: Enable rp mapping change trap
type: bool
enable:
description: Enable/disable PIM traps
type: bool
type: dict
vrfmib:
description: Allow vrfmib traps
suboptions:
vrf_up:
description: Enable vrf-up trap
type: bool
vrf_down:
description: Enable vrf-down trap
type: bool
vnet_trunk_up:
description: Enable vnet-trunk-up trap
type: bool
vnet_trunk_down:
description: Enable vnet-trunk-down traps
type: bool
type: dict
pki:
description: Enable SNMP pki traps
type: bool
rsvp:
description: Enable SNMP RSVP traps
type: bool
isis:
description: Enable SNMP isis traps
type: bool
pw_vc:
description: Enable SNMP pw vc traps
type: bool
snmp:
description: Enable SNMP traps
suboptions:
authentication:
description: Enable authentication trap
type: bool
coldstart:
description: Enable coldStart trap
type: bool
linkdown:
description: Enable linkDown trap
type: bool
linkup:
description: Enable linkUp trap
type: bool
warmstart:
description: Enable warmStart trap
type: bool
type: dict
syslog:
description: Enable SNMP syslog traps
type: bool
transceiver_all:
description: Enable SNMP transceiver traps
type: bool
tty:
description: Enable SNMP tty TCP connection traps
type: bool
vrrp:
description: Enable SNMP vrrp traps
type: bool
type: dict
users:
description: Define a user who can access the SNMP engine
elements: dict
suboptions:
acl_v6:
description: Access list ipv6 associated
type: str
acl_v4:
description: Access list ipv4 associated
type: str
group:
description: SNMP group for the user.
type: str
remote:
description: System where an SNMPv3 user is hosted
type: str
udp_port:
description: UDP port used by the remote SNMP system
type: int
username:
description: SNMP user name
type: str
version:
choices:
- v1
- v2c
- v3
description: SNMP security version
type: str
version_option:
choices:
- auth
- access
- encrypted
description: community name to the host.
type: str
vrf:
description: The remote SNMP entity's VPN Routing instance
type: str
type: list
views:
description: Define an SNMPv2 MIB view
elements: dict
suboptions:
excluded:
description: MIB family is excluded from the view
type: bool
family_name:
description: MIB view family name
type: str
included:
description: MIB family is included in the view
type: bool
name:
description: Name of the view
type: str
type: list
type: dict
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS device by
executing the command B(show running-config | include snmp-server).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
choices:
- merged
- replaced
- overridden
- deleted
- parsed
- gathered
- rendered
default: merged
description:
- The state the configuration should be left in.
- Refer to examples for more details.
- The states I(replaced) and I(overridden) have identical
behaviour for this module.
type: str
short_description: snmp_server resource module
version_added: 2.6.0
"""
EXAMPLES = """
# Using state: merged
# Before state:
# -------------
# router-ios#show running-config | section ^snmp-server
# --------------------- EMPTY -----------------
# Merged play:
# ------------
- name: Apply the provided configuration
cisco.ios.ios_snmp_server:
config:
communities:
- acl_v4: testACL
name: mergedComm
rw: true
contact: contact updated using merged
engine_id:
- id: AB0C5342FF0F
remote:
host: 172.16.0.12
udp_port: 25
groups:
- group: mergedGroup
version: v3
version_option: auth
file_transfer:
access_group: test
protocol:
- ftp
hosts:
- community_string: mergedComm
host: 172.16.2.9
informs: true
traps:
- msdp
- stun
- pki
version: 2c
- community_string: mergedComm
host: 172.16.2.9
traps:
- slb
- pki
password_policy:
- change: 3
digits: 23
lower_case: 12
max_len: 24
policy_name: MergedPolicy
special_char: 32
upper_case: 12
- change: 43
min_len: 12
policy_name: MergedPolicy2
special_char: 22
upper_case: 12
- change: 11
digits: 23
max_len: 12
min_len: 12
policy_name: policy3
special_char: 22
upper_case: 12
traps:
cef:
enable: true
inconsistency: true
peer_fib_state_change: true
peer_state_change: true
resource_failure: true
msdp: true
ospf:
cisco_specific:
error: true
lsa: true
retransmit: true
state_change:
nssa_trans_change: true
shamlink:
interface: true
neighbor: true
error: true
lsa: true
retransmit: true
state_change: true
syslog: true
tty: true
users:
- acl_v4: '24'
group: dev
username: userPaul
version: v1
state: merged
# Commands Fired:
# ---------------
# "commands": [
# "snmp-server contact contact updated using merged",
# "snmp-server file-transfer access-group test protocol ftp",
# "snmp-server enable traps msdp",
# "snmp-server enable traps syslog",
# "snmp-server enable traps tty",
# "snmp-server enable traps ospf cisco-specific errors",
# "snmp-server enable traps ospf cisco-specific retransmit",
# "snmp-server enable traps ospf cisco-specific lsa",
# "snmp-server enable traps ospf cisco-specific state-change nssa-trans-change",
# "snmp-server enable traps ospf cisco-specific state-change shamlink interface",
# "snmp-server enable traps ospf cisco-specific state-change shamlink neighbor",
# "snmp-server enable traps ospf errors",
# "snmp-server enable traps ospf retransmit",
# "snmp-server enable traps ospf lsa",
# "snmp-server enable traps ospf state-change",
# "snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency",
# "snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki",
# "snmp-server host 172.16.2.9 mergedComm slb pki",
# "snmp-server group mergedGroup v3 auth",
# "snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F",
# "snmp-server community mergedComm rw testACL",
# "snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3",
# "snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43",
# "snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11",
# "snmp-server user userPaul dev v1 access 24"
# ],
# After state:
# ------------
# router-ios#show running-config | section ^snmp-server
# snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F
# snmp-server user userPaul dev v1 access 24
# snmp-server group mergedGroup v3 auth
# snmp-server community mergedComm RW testACL
# snmp-server contact contact updated using merged
# snmp-server enable traps tty
# snmp-server enable traps ospf state-change
# snmp-server enable traps ospf errors
# snmp-server enable traps ospf retransmit
# snmp-server enable traps ospf lsa
# snmp-server enable traps ospf cisco-specific state-change nssa-trans-change
# snmp-server enable traps ospf cisco-specific state-change shamlink interface
# snmp-server enable traps ospf cisco-specific state-change shamlink neighbor
# snmp-server enable traps ospf cisco-specific errors
# snmp-server enable traps ospf cisco-specific retransmit
# snmp-server enable traps ospf cisco-specific lsa
# snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency
# snmp-server enable traps msdp
# snmp-server enable traps syslog
# snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki
# snmp-server host 172.16.2.9 mergedComm slb pki
# snmp-server file-transfer access-group test protocol ftp
# snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3
# snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43
# snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11
# Using state: deleted
# Before state:
# -------------
# router-ios#show running-config | section ^snmp-server
# snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F
# snmp-server user userPaul dev v1 access 24
# snmp-server group mergedGroup v3 auth
# snmp-server community mergedComm RW testACL
# snmp-server contact contact updated using merged
# snmp-server enable traps tty
# snmp-server enable traps ospf state-change
# snmp-server enable traps ospf errors
# snmp-server enable traps ospf retransmit
# snmp-server enable traps ospf lsa
# snmp-server enable traps ospf cisco-specific state-change nssa-trans-change
# snmp-server enable traps ospf cisco-specific state-change shamlink interface
# snmp-server enable traps ospf cisco-specific state-change shamlink neighbor
# snmp-server enable traps ospf cisco-specific errors
# snmp-server enable traps ospf cisco-specific retransmit
# snmp-server enable traps ospf cisco-specific lsa
# snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency
# snmp-server enable traps msdp
# snmp-server enable traps syslog
# snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki
# snmp-server host 172.16.2.9 mergedComm slb pki
# snmp-server file-transfer access-group test protocol ftp
# snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3
# snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43
# snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11
# Deleted play:
# -------------
- name: Remove all existing configuration
cisco.ios.ios_snmp_server:
state: deleted
# Commands Fired:
# ---------------
# "commands": [
# "no snmp-server contact contact updated using merged",
# "no snmp-server file-transfer access-group test protocol ftp",
# "no snmp-server enable traps msdp",
# "no snmp-server enable traps syslog",
# "no snmp-server enable traps tty",
# "no snmp-server enable traps ospf cisco-specific errors",
# "no snmp-server enable traps ospf cisco-specific retransmit",
# "no snmp-server enable traps ospf cisco-specific lsa",
# "no snmp-server enable traps ospf cisco-specific state-change nssa-trans-change",
# "no snmp-server enable traps ospf cisco-specific state-change shamlink interface",
# "no snmp-server enable traps ospf cisco-specific state-change shamlink neighbor",
# "no snmp-server enable traps ospf errors",
# "no snmp-server enable traps ospf retransmit",
# "no snmp-server enable traps ospf lsa",
# "no snmp-server enable traps ospf state-change",
# "no snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency",
# "no snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki",
# "no snmp-server host 172.16.2.9 mergedComm slb pki",
# "no snmp-server group mergedGroup v3 auth",
# "no snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F",
# "no snmp-server community mergedComm rw testACL",
# "no snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3",
# "no snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43",
# "no snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11",
# "no snmp-server user userPaul dev v1 access 24"
# ],
# After state:
# ------------
# router-ios#show running-config | section ^snmp-server
# --------------------- EMPTY -----------------
# Using state: overridden
# Before state:
# -------------
# router-ios#show running-config | section ^snmp-server
# snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F
# snmp-server user userPaul dev v1 access 24
# snmp-server group mergedGroup v3 auth
# snmp-server community mergedComm RW testACL
# snmp-server contact contact updated using merged
# snmp-server enable traps tty
# snmp-server enable traps ospf state-change
# snmp-server enable traps ospf errors
# snmp-server enable traps ospf retransmit
# snmp-server enable traps ospf lsa
# snmp-server enable traps ospf cisco-specific state-change nssa-trans-change
# snmp-server enable traps ospf cisco-specific state-change shamlink interface
# snmp-server enable traps ospf cisco-specific state-change shamlink neighbor
# snmp-server enable traps ospf cisco-specific errors
# snmp-server enable traps ospf cisco-specific retransmit
# snmp-server enable traps ospf cisco-specific lsa
# snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency
# snmp-server enable traps msdp
# snmp-server enable traps syslog
# snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki
# snmp-server host 172.16.2.9 mergedComm slb pki
# snmp-server file-transfer access-group test protocol ftp
# snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3
# snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43
# snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11
# Overridden play:
# ----------------
- name: Override commands with provided configuration
cisco.ios.ios_snmp_server:
config:
location: 'location entry for snmp'
packet_size: 500
communities:
- acl_v4: acl_uq
name: communityOverriden
rw: true
state: overridden
# Commands Fired:
# ---------------
# "commands": [
# "no snmp-server contact contact updated using merged",
# "no snmp-server file-transfer access-group test protocol ftp",
# "snmp-server location location entry for snmp",
# "snmp-server packetsize 500",
# "no snmp-server enable traps msdp",
# "no snmp-server enable traps syslog",
# "no snmp-server enable traps tty",
# "no snmp-server enable traps ospf cisco-specific errors",
# "no snmp-server enable traps ospf cisco-specific retransmit",
# "no snmp-server enable traps ospf cisco-specific lsa",
# "no snmp-server enable traps ospf cisco-specific state-change nssa-trans-change",
# "no snmp-server enable traps ospf cisco-specific state-change shamlink interface",
# "no snmp-server enable traps ospf cisco-specific state-change shamlink neighbor",
# "no snmp-server enable traps ospf errors",
# "no snmp-server enable traps ospf retransmit",
# "no snmp-server enable traps ospf lsa",
# "no snmp-server enable traps ospf state-change",
# "no snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency",
# "no snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki",
# "no snmp-server host 172.16.2.9 mergedComm slb pki",
# "no snmp-server group mergedGroup v3 auth",
# "no snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F",
# "snmp-server community communityOvverriden rw acl_uq",
# "no snmp-server community mergedComm rw testACL",
# "no snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3",
# "no snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43",
# "no snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11",
# "no snmp-server user userPaul dev v1 access 24"
# ],
# After state:
# ------------
# router-ios#show running-config | section ^snmp-server
# snmp-server community communityOverriden RW acl_uq
# snmp-server packetsize 500
# snmp-server location location entry for snmp
# Using state: replaced
# Before state:
# -------------
# router-ios#show running-config | section ^snmp-server
# snmp-server community communityOverriden RW acl_uq
# snmp-server packetsize 500
# snmp-server location location entry for snmp
# Replaced play:
# --------------
- name: Replace commands with provided configuration
cisco.ios.ios_snmp_server:
config:
location: 'updated location entry'
packet_size: 500
communities:
- acl_v4: acl_uq
name: communityOverriden
rw: true
state: replaced
# Commands Fired:
# ---------------
# "commands": [
# "snmp-server location updated location entry"
# ],
# After state:
# ------------
# router-ios#show running-config | section ^snmp-server
# snmp-server community communityOverriden RW acl_uq
# snmp-server packetsize 500
# snmp-server location updated location entry
# Using state: gathered
# Before state:
# -------------
#router-ios#show running-config | section ^snmp-server
# snmp-server engineID remote 172.16.0.12 udp-port 25 AB0C5342FF0F
# snmp-server user userPaul dev v1 access 24
# snmp-server group mergedGroup v3 auth
# snmp-server community communityOvverriden RW acl_uq
# snmp-server community mergedComm RW testACL
# snmp-server packetsize 500
# snmp-server location updated location entry
# snmp-server contact contact updated using merged
# snmp-server enable traps tty
# snmp-server enable traps ospf state-change
# snmp-server enable traps ospf errors
# snmp-server enable traps ospf retransmit
# snmp-server enable traps ospf lsa
# snmp-server enable traps ospf cisco-specific state-change nssa-trans-change
# snmp-server enable traps ospf cisco-specific state-change shamlink interface
# snmp-server enable traps ospf cisco-specific state-change shamlink neighbor
# snmp-server enable traps ospf cisco-specific errors
# snmp-server enable traps ospf cisco-specific retransmit
# snmp-server enable traps ospf cisco-specific lsa
# snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency
# snmp-server enable traps msdp
# snmp-server enable traps syslog
# snmp-server host 172.16.2.9 informs version 2c mergedComm msdp stun pki
# snmp-server host 172.16.2.9 mergedComm slb pki
# snmp-server file-transfer access-group test protocol ftp
# snmp-server password-policy MergedPolicy define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3
# snmp-server password-policy MergedPolicy2 define min-len 12 upper-case 12 special-char 22 change 43
# snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11
# Gathered play:
# --------------
- name: Gather listed snmp config
cisco.ios.ios_snmp_server:
state: gathered
# Module Execution Result:
# ------------------------
# "gathered": {
# "communities": [
# {
# "acl_v4": "acl_uq",
# "name": "communityOvverriden",
# "rw": true
# },
# {
# "acl_v4": "testACL",
# "name": "mergedComm",
# "rw": true
# }
# ],
# "contact": "contact updated using merged",
# "engine_id": [
# {
# "id": "AB0C5342FF0F",
# "remote": {
# "host": "172.16.0.12",
# "udp_port": 25
# }
# }
# ],
# "file_transfer": {
# "access_group": "test",
# "protocol": [
# "ftp"
# ]
# },
# "groups": [
# {
# "group": "mergedGroup",
# "version": "v3",
# "version_option": "auth"
# }
# ],
# "hosts": [
# {
# "community_string": "mergedComm",
# "host": "172.16.2.9",
# "informs": true,
# "traps": [
# "msdp",
# "stun",
# "pki"
# ],
# "version": "2c"
# },
# {
# "community_string": "mergedComm",
# "host": "172.16.2.9",
# "traps": [
# "slb",
# "pki"
# ]
# }
# ],
# "location": "updated location entry",
# "packet_size": 500,
# "password_policy": [
# {
# "change": 3,
# "digits": 23,
# "lower_case": 12,
# "max_len": 24,
# "policy_name": "MergedPolicy",
# "special_char": 32,
# "upper_case": 12
# },
# {
# "change": 43,
# "min_len": 12,
# "policy_name": "MergedPolicy2",
# "special_char": 22,
# "upper_case": 12
# },
# {
# "change": 11,
# "digits": 23,
# "max_len": 12,
# "min_len": 12,
# "policy_name": "policy3",
# "special_char": 22,
# "upper_case": 12
# }
# ],
# "traps": {
# "cef": {
# "enable": true,
# "inconsistency": true,
# "peer_fib_state_change": true,
# "peer_state_change": true,
# "resource_failure": true
# },
# "msdp": true,
# "ospf": {
# "cisco_specific": {
# "error": true,
# "lsa": true,
# "retransmit": true,
# "state_change": {
# "nssa_trans_change": true,
# "shamlink": {
# "interface": true,
# "neighbor": true
# }
# }
# },
# "error": true,
# "lsa": true,
# "retransmit": true,
# "state_change": true
# },
# "syslog": true,
# "tty": true
# },
# "users": [
# {
# "acl_v4": "24",
# "group": "dev",
# "username": "userPaul",
# "version": "v1"
# }
# ]
# },
# Using state: rendered
# Rendered play:
# --------------
- name: Render the commands for provided configuration
cisco.ios.ios_snmp_server:
config:
accounting:
command: default
cache: 2
chassis_id: entry for chassis id
communities:
- acl_v6: te
name: test
ro: true
view: terst1
- acl_v4: '1322'
name: wete
ro: true
- acl_v4: paul
name: weteww
rw: true
contact: details contact
context:
- contextA
- contextB
engine_id:
- id: AB0C5342FA0A
local: true
- id: AB0C5342FAAB
remote:
host: 172.16.0.2
udp_port: 23
- id: AB0C5342FAAA
remote:
host: 172.16.0.1
udp_port: 22
file_transfer:
access_group: testAcl
protocol:
- ftp
- rcp
groups:
- group: grpFamily
version: v3
version_option: auth
- context: mycontext
group: grpFamily
version: v1
- acl_v4: '2'
group: grp1
notify: me
version: v1
- group: newtera
version: v3
version_option: priv
- group: relaplacing
version: v3
version_option: noauth
hosts:
- community_string: check
host: 172.16.2.99
informs: true
traps:
- msdp
- stun
version: 2c
- community_string: check
host: 172.16.2.99
traps:
- slb
- pki
- community_string: checktrap
host: 172.16.2.99
traps:
- isis
- hsrp
- community_string: newtera
host: 172.16.2.1
traps:
- rsrb
- pim
- rsvp
- slb
- pki
version: '3'
version_option: priv
- community_string: relaplacing
host: 172.16.2.1
traps:
- slb
- pki
version: '3'
version_option: noauth
- community_string: trapsac
host: 172.16.2.1
traps:
- tty
- bgp
version: 2c
- community_string: www
host: 172.16.1.1
traps:
- tty
- bgp
version: '3'
version_option: auth
inform:
pending: 2
ip:
dscp: 2
location: 'entry for snmp location'
packet_size: 500
password_policy:
- change: 3
digits: 23
lower_case: 12
max_len: 24
policy_name: policy1
special_char: 32
upper_case: 12
- change: 9
min_len: 12
policy_name: policy2
special_char: 22
upper_case: 12
- change: 11
digits: 23
max_len: 12
min_len: 12
policy_name: policy3
special_char: 22
upper_case: 12
queue_length: 2
source_interface: Loopback999
system_shutdown: true
trap_source: GigabitEthernet0/0
trap_timeout: 2
traps:
auth_framework:
enable: true
bgp:
cbgp2: true
enable: true
bfd:
enable: true
session_down: true
session_up: true
bridge:
enable: true
newroot: true
topologychange: true
casa: true
cef:
enable: true
inconsistency: true
peer_fib_state_change: true
peer_state_change: true
resource_failure: true
dlsw:
enable: true
eigrp: true
ethernet:
cfm:
alarm: true
evc:
status: true
event_manager: true
flowmon: true
frame_relay:
enable: true
subif:
enable: true
hsrp: true
ike:
policy:
add: true
delete: true
tunnel:
start: true
stop: true
ipmulticast: true
ipsec:
cryptomap:
add: true
attach: true
delete: true
detach: true
too_many_sas: true
tunnel:
start: true
stop: true
ipsla: true
l2tun:
pseudowire_status: true
session: true
msdp: true
ospf:
cisco_specific:
error: true
lsa: true
retransmit: true
state_change:
nssa_trans_change: true
shamlink:
interface: true
neighbor: true
error: true
lsa: true
retransmit: true
state_change: true
pim:
enable: true
invalid_pim_message: true
neighbor_change: true
rp_mapping_change: true
pki: true
rsvp: true
snmp:
authentication: true
coldstart: true
linkdown: true
linkup: true
warmstart: true
syslog: true
tty: true
users:
- acl_v4: '24'
group: groupFamily
username: paul
version: v1
- acl_v4: ipv6
group: groupFamily
username: domnic
version: v3
- group: relaplacing
username: relaplacing
version: v3
state: rendered
# Module Execution Result:
# ------------------------
# "rendered": [
# "snmp-server accounting commands default",
# "snmp-server cache interval 2",
# "snmp-server chassis-id entry for chassis id",
# "snmp-server contact details contact",
# "snmp-server file-transfer access-group testAcl protocol ftp rcp",
# "snmp-server inform pending 2",
# "snmp-server ip dscp 2",
# "snmp-server location entry for snmp location",
# "snmp-server packetsize 500",
# "snmp-server queue-length 2",
# "snmp-server trap timeout 2",
# "snmp-server source-interface informs Loopback999",
# "snmp-server trap-source GigabitEthernet0/0",
# "snmp-server system-shutdown",
# "snmp-server enable traps auth-framework",
# "snmp-server enable traps bfd session-down session-up",
# "snmp-server enable traps bgp cbgp2",
# "snmp-server enable traps bridge newroot topologychange",
# "snmp-server enable traps casa",
# "snmp-server enable traps eigrp",
# "snmp-server enable traps event-manager",
# "snmp-server enable traps flowmon",
# "snmp-server enable traps hsrp",
# "snmp-server enable traps ipsla",
# "snmp-server enable traps msdp",
# "snmp-server enable traps pki",
# "snmp-server enable traps rsvp",
# "snmp-server enable traps syslog",
# "snmp-server enable traps tty",
# "snmp-server enable traps ipmulticast",
# "snmp-server enable traps ike policy add",
# "snmp-server enable traps ike policy delete",
# "snmp-server enable traps ike tunnel start",
# "snmp-server enable traps ike tunnel stop",
# "snmp-server enable traps ipsec cryptomap add",
# "snmp-server enable traps ipsec cryptomap delete",
# "snmp-server enable traps ipsec cryptomap attach",
# "snmp-server enable traps ipsec cryptomap detach",
# "snmp-server enable traps ipsec tunnel start",
# "snmp-server enable traps ipsec tunnel stop",
# "snmp-server enable traps ipsec too-many-sas",
# "snmp-server enable traps ospf cisco-specific errors",
# "snmp-server enable traps ospf cisco-specific retransmit",
# "snmp-server enable traps ospf cisco-specific lsa",
# "snmp-server enable traps ospf cisco-specific state-change nssa-trans-change",
# "snmp-server enable traps ospf cisco-specific state-change shamlink interface",
# "snmp-server enable traps ospf cisco-specific state-change shamlink neighbor",
# "snmp-server enable traps ospf errors",
# "snmp-server enable traps ospf retransmit",
# "snmp-server enable traps ospf lsa",
# "snmp-server enable traps ospf state-change",
# "snmp-server enable traps l2tun pseudowire status",
# "snmp-server enable traps l2tun session",
# "snmp-server enable traps pim neighbor-change rp-mapping-change invalid-pim-message",
# "snmp-server enable traps snmp authentication linkdown linkup warmstart coldstart",
# "snmp-server enable traps frame-relay",
# "snmp-server enable traps cef resource-failure peer-state-change peer-fib-state-change inconsistency",
# "snmp-server enable traps dlsw",
# "snmp-server enable traps ethernet evc status",
# "snmp-server enable traps ethernet cfm alarm",
# "snmp-server host 172.16.2.99 informs version 2c check msdp stun",
# "snmp-server host 172.16.2.99 check slb pki",
# "snmp-server host 172.16.2.99 checktrap isis hsrp",
# "snmp-server host 172.16.2.1 version 3 priv newtera rsrb pim rsvp slb pki",
# "snmp-server host 172.16.2.1 version 3 noauth relaplacing slb pki",
# "snmp-server host 172.16.2.1 version 2c trapsac tty bgp",
# "snmp-server host 172.16.1.1 version 3 auth www tty bgp",
# "snmp-server group grpFamily v1 context mycontext",
# "snmp-server group grp1 v1 notify me access 2",
# "snmp-server group newtera v3 priv",
# "snmp-server group relaplacing v3 noauth",
# "snmp-server engineID local AB0C5342FA0A",
# "snmp-server engineID remote 172.16.0.2 udp-port 23 AB0C5342FAAB",
# "snmp-server engineID remote 172.16.0.1 udp-port 22 AB0C5342FAAA",
# "snmp-server community test view terst1 ro ipv6 te",
# "snmp-server community wete ro 1322",
# "snmp-server community weteww rw paul",
# "snmp-server context contextA",
# "snmp-server context contextB",
# "snmp-server password-policy policy1 define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3",
# "snmp-server password-policy policy2 define min-len 12 upper-case 12 special-char 22 change 9",
# "snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11",
# "snmp-server user paul groupFamily v1 access 24",
# "snmp-server user domnic groupFamily v3 access ipv6",
# "snmp-server user relaplacing relaplacing v3"
# ]
# Using state: parsed
# File: parsed.cfg
# ----------------
# snmp-server engineID local AB0C5342FA0A
# snmp-server engineID remote 172.16.0.2 udp-port 23 AB0C5342FAAB
# snmp-server engineID remote 172.16.0.1 udp-port 22 AB0C5342FAAA
# snmp-server user newuser newfamily v1 access 24
# snmp-server user paul familypaul v3 access ipv6 ipv6acl
# snmp-server user replaceUser replaceUser v3
# snmp-server group group0 v3 auth
# snmp-server group group1 v1 notify me access 2
# snmp-server group group2 v3 priv
# snmp-server group replaceUser v3 noauth
# snmp-server community commu1 view view1 RO ipv6 te
# snmp-server community commu2 RO 1322
# snmp-server community commu3 RW paul
# snmp-server trap timeout 2
# snmp-server trap-source GigabitEthernet0/0
# snmp-server source-interface informs Loopback999
# snmp-server packetsize 500
# snmp-server enable traps vrfmib vrf-up vrf-down vnet-trunk-up vnet-trunk-down
# snmp-server host 172.16.2.99 informs version 2c check msdp stun
# snmp-server host 172.16.2.1 version 2c trapsac tty bgp
# snmp-server host 172.16.1.1 version 3 auth group0 tty bgp
# snmp-server context contextWord1
# snmp-server context contextWord2
# snmp-server file-transfer access-group testAcl protocol ftp
# snmp-server file-transfer access-group testAcl protocol rcp
# snmp-server cache interval 2
# snmp-server password-policy policy2 define min-len 12 upper-case 12 special-char 22 change 9
# snmp-server password-policy policy3 define min-len 12 max-len 12 upper-case 12 special-char 22 digits 23 change 11
# snmp-server accounting commands default
# snmp-server inform pending 2
# Parsed play:
# ------------
- name: Parse the provided configuration with the existing running configuration
cisco.ios.ios_snmp_server:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Module Execution Result:
# ------------------------
# "parsed": {
# "accounting": {
# "command": "default"
# },
# "cache": 2,
# "communities": [
# {
# "acl_v6": "te",
# "name": "commu1",
# "ro": true,
# "view": "view1"
# },
# {
# "acl_v4": "1322",
# "name": "commu2",
# "ro": true
# },
# {
# "acl_v4": "paul",
# "name": "commu3",
# "rw": true
# }
# ],
# "context": [
# "contextWord1",
# "contextWord2"
# ],
# "engine_id": [
# {
# "id": "AB0C5342FA0A",
# "local": true
# },
# {
# "id": "AB0C5342FAAA",
# "remote": {
# "host": "172.16.0.1",
# "udp_port": 22
# }
# },
# {
# "id": "AB0C5342FAAB",
# "remote": {
# "host": "172.16.0.2",
# "udp_port": 23
# }
# }
# ],
# "file_transfer": {
# "access_group": "testAcl",
# "protocol": [
# "rcp",
# "ftp"
# ]
# },
# "groups": [
# {
# "group": "group0",
# "version": "v3",
# "version_option": "auth"
# },
# {
# "acl_v4": "2",
# "group": "group1",
# "notify": "me",
# "version": "v1"
# },
# {
# "group": "group2",
# "version": "v3",
# "version_option": "priv"
# },
# {
# "group": "replaceUser",
# "version": "v3",
# "version_option": "noauth"
# }
# ],
# "hosts": [
# {
# "community_string": "group0",
# "host": "172.16.1.1",
# "traps": [
# "tty",
# "bgp"
# ],
# "version": "3",
# "version_option": "auth"
# },
# {
# "community_string": "trapsac",
# "host": "172.16.2.1",
# "traps": [
# "tty",
# "bgp"
# ],
# "version": "2c"
# },
# {
# "community_string": "check",
# "host": "172.16.2.99",
# "informs": true,
# "traps": [
# "msdp",
# "stun"
# ],
# "version": "2c"
# }
# ],
# "inform": {
# "pending": 2
# },
# "packet_size": 500,
# "password_policy": [
# {
# "change": 9,
# "min_len": 12,
# "policy_name": "policy2",
# "special_char": 22,
# "upper_case": 12
# },
# {
# "change": 11,
# "digits": 23,
# "max_len": 12,
# "min_len": 12,
# "policy_name": "policy3",
# "special_char": 22,
# "upper_case": 12
# }
# ],
# "source_interface": "Loopback999",
# "trap_source": "GigabitEthernet0/0",
# "trap_timeout": 2,
# "traps": {
# "vrfmib": {
# "vnet_trunk_down": true,
# "vnet_trunk_up": true,
# "vrf_down": true,
# "vrf_up": true
# }
# },
# "users": [
# {
# "acl_v4": "24",
# "group": "newfamily",
# "username": "newuser",
# "version": "v1"
# },
# {
# "acl_v4": "ipv6",
# "group": "familypaul",
# "username": "paul",
# "version": "v3"
# },
# {
# "group": "replaceUser",
# "username": "replaceUser",
# "version": "v3"
# }
# ]
# }
"""
RETURN = """
before:
description: The configuration prior to the module execution.
returned: when I(state) is C(merged), C(replaced), C(overridden), C(deleted) or C(purged)
type: dict
sample: >
This output will always be in the same format as the
module argspec.
after:
description: The resulting configuration after module execution.
returned: when changed
type: dict
sample: >
This output will always be in the same format as the
module argspec.
commands:
description: The set of commands pushed to the remote device.
returned: when I(state) is C(merged), C(replaced), C(overridden), C(deleted) or C(purged)
type: list
sample:
- snmp-server host 172.16.2.99 informs version 2c check msdp stun
- snmp-server engineID remote 172.16.0.2 udp-port 23 AB0C5342FAAB
- snmp-server group grp1 v1 notify me access 2
rendered:
description: The provided configuration in the task rendered in device-native format (offline).
returned: when I(state) is C(rendered)
type: list
sample:
- snmp-server enable traps ipsec cryptomap attach
- snmp-server password-policy policy1 define max-len 24 upper-case 12 lower-case 12 special-char 32 digits 23 change 3
- snmp-server cache interval 2
gathered:
description: Facts about the network resource gathered from the remote device as structured data.
returned: when I(state) is C(gathered)
type: list
sample: >
This output will always be in the same format as the
module argspec.
parsed:
description: The device native config provided in I(running_config) option parsed into structured data as per module argspec.
returned: when I(state) is C(parsed)
type: list
sample: >
This output will always be in the same format as the
module argspec.
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.argspec.snmp_server.snmp_server import (
Snmp_serverArgs,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.config.snmp_server.snmp_server import (
Snmp_server,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(
argument_spec=Snmp_serverArgs.argument_spec,
mutually_exclusive=[["config", "running_config"]],
required_if=[
["state", "merged", ["config"]],
["state", "replaced", ["config"]],
["state", "overridden", ["config"]],
["state", "rendered", ["config"]],
["state", "parsed", ["running_config"]],
],
supports_check_mode=True,
)
result = Snmp_server(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import rospy
from dvs_slam_msgs.msg import VoxelGrid
from rospy.numpy_msg import numpy_msg
import matplotlib.cm
import colormaps as cmaps
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
cv_bridge = CvBridge()
image_pub = rospy.Publisher("dsi/confidence", Image, queue_size=10)
cmap = matplotlib.cm.get_cmap('coolwarm')
def reorder_axes(arr):
"""
Shift right axes of arr, i.e., from (0,1,2) to (2,0,1)
@param arr array of which the axes have to be shifted
"""
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 1, 2)
return arr
def normalize(img):
"""
Scales image values between 0 and 1 (min-max normalization)
@param img the image to normalize
"""
return cv2.normalize(img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
def publish_confidence(voxel_grid_msg):
"""
Compute confidence for the received voxel grid and publishes it
Topic: "voxel_grid"
@param voxel_grid_msg message containing the voxel grid
"""
msg = voxel_grid_msg.voxel_grid
h = msg.layout.dim[0].size
w = msg.layout.dim[1].size
N = msg.layout.dim[2].size
msg.data.setflags(write=True)
arr = reorder_axes(msg.data.reshape((h, w, N))) # axes: N h w
img = np.zeros((h+N, w+N), dtype=np.float32)
img[0:h, 0:w] = normalize(-np.amax(arr, axis=0))
img[h:h+N, 0:w] = normalize(-np.amax(arr, axis=1))
img[0:h, w:w+N] = normalize(-np.amax(arr, axis=2).T)
img[h+1:h+N, w+1:w+N] = 255
img = (255. * cmap(img)[:, :, :3]).astype(np.uint8)
c = (127, 127, 127)
cv2.line(img, (0, h), (img.shape[1], h), c)
cv2.line(img, (w, 0), (w, img.shape[0]), c)
image_pub.publish(cv_bridge.cv2_to_imgmsg(img, encoding="bgr8"))
if __name__ == '__main__':
topic_name = 'voxel_grid'
rospy.init_node('publish_confidence')
rospy.Subscriber(topic_name,
numpy_msg(VoxelGrid),
publish_confidence)
rospy.spin()
|
"""
Content Provider: Flickr
ETL Process: Use the API to identify all CC licensed images.
Output: TSV file containing the images and the respective meta-data.
Notes: https://www.flickr.com/help/terms/api
Rate limit: 3600 queries per hour.
"""
import argparse
import calendar
import logging
import os
import time
from datetime import datetime, timedelta
from dateutil import rrule
import modules.etlMods as etl_mods
logging.getLogger(__name__)
Ts_RANGE = int(5) #process job using 5 minute intervals
DELAY = 1.0 #time delay (in seconds)
FILE = 'flickr_{}.tsv'.format(int(time.time()))
SIZE = 500
API_KEY = os.environ['FLICKR_API_KEY']
FLICKR = 'flickr'
ENDPOINT = 'https://api.flickr.com/services/rest/?method=flickr.photos.search'
logging.basicConfig(
format='%(asctime)s: [%(levelname)s - Flickr API] =======> %(message)s',
level=logging.INFO)
def get_license(_index):
version = 2.0
cc_license = {
1: 'by-nc-sa',
2: 'by-nc',
3: 'by-nc-nd',
4: 'by',
5: 'by-sa',
6: 'by-nd',
9: 'CC0',
10: 'PDM'
}
if _index == 'all':
return cc_license.keys()
else:
_index = int(_index)
if (_index <= 0) or (_index in [7, 8]) or (_index > 10):
logging.warning('License not detected!')
return None, None
license_ = cc_license[_index]
if _index in [9, 10]:
version = 1.0
return license_, version
def get_image_url(_data):
for size in ['l', 'm', 's']: # prefer large, then medium, then small images
url_key = 'url_{}'.format(size)
height_key = 'height_{}'.format(size)
width_key = 'width_{}'.format(size)
if url_key in _data:
return (
_data.get(url_key), _data.get(height_key), _data.get(width_key))
logging.warning('Image not detected!')
return (None, None, None)
def create_meta_data_dict(_data):
meta_data = {}
if 'dateupload' in _data:
meta_data['pub_date'] = etl_mods.sanitizeString(_data.get('dateupload'))
if 'datetaken' in _data:
meta_data['date_taken'] = etl_mods.sanitizeString(_data.get('datetaken'))
description = etl_mods.sanitizeString(_data.get('description', {}).get('_content'))
if description:
meta_data['description'] = description
return meta_data
def create_tags_list(_data):
max_tags = 20
raw_tag_string = _data.get('tags', '').strip()
if raw_tag_string:
raw_tag_list = list(set(raw_tag_string.split()))[:max_tags]
return [{'name': tag.strip(), 'provider': FLICKR} for tag in raw_tag_list]
else:
return None
def extract_data(_data):
title = _data.get('title')
creator = _data.get('ownername')
image_url, height, width = get_image_url(_data)
thumbnail = _data.get('url_s')
license_, version = get_license(_data.get('license', -1))
meta_data = create_meta_data_dict(_data)
tags = create_tags_list(_data)
if 'owner' in _data:
creator_url = 'www.flickr.com/photos/{}'.format(_data['owner']).strip()
else:
creator_url = None
foreign_id = _data.get('id')
if foreign_id and creator_url:
foreign_url = '{}/{}'.format(creator_url, foreign_id)
else:
foreign_url = None
return etl_mods.create_tsv_list_row(
foreign_identifier=image_url if not foreign_id else foreign_id,
foreign_landing_url=foreign_url,
image_url=image_url,
thumbnail=thumbnail,
width=width,
height=height,
license_=license_,
license_version=version,
creator=creator,
creator_url=creator_url,
title=title,
meta_data=meta_data,
tags=tags,
watermarked='f',
provider=FLICKR,
source=FLICKR
)
def construct_api_query_string(
start_ts,
end_ts,
license_,
cur_page,
switch_date=False):
date_type = 'taken' if switch_date else 'upload'
api_query_string = (
'{0}&api_key={1}&min_{7}_date={2}&max_{7}_date={3}&license={5}'
'&media=photos&content_type=1&extras=description,license,date_upload,'
'date_taken,owner_name,tags,o_dims,url_t,url_s,url_m,url_l'
'&per_page={4}&format=json&nojsoncallback=1&page={6}'
).format(
ENDPOINT, API_KEY, start_ts, end_ts, SIZE, license_, cur_page, date_type)
return api_query_string
def process_images(start_ts, end_ts, license_, switch_date=False):
proc_time = time.time()
pages = 1
cur_page = 1
num_images = 0
while cur_page <= pages:
#loop through each page of data
logging.info('Processing page: {}'.format(cur_page))
api_query_string = construct_api_query_string(
start_ts,
end_ts,
license_,
cur_page,
switch_date
)
img_data = etl_mods.requestContent(api_query_string)
if img_data and img_data.get('stat') == 'ok':
result = img_data.get('photos', {})
pages = result.get('pages') #number of pages
cur_page = result.get('page') #current page
photos = result.get('photo') #image meta data for the current page
if photos:
# TODO update to >= python3.8, use walrus assignment
extracted = [r for r in (extract_data(p) for p in photos) if r]
num_images += len(extracted)
etl_mods.writeToFile(extracted, FILE)
cur_page += 1
etl_mods.delayProcessing(proc_time, DELAY) #throttle requests
proc_time = time.time()
logging.info('Total pages processed: {}'.format(pages))
return num_images
def exec_job(license_, start_date, _duration=1, _mode=None):
total_images = 0
start_time = datetime.strptime(start_date, '%Y-%m-%d %H:%M')
end_time = datetime.strptime(start_date, '%Y-%m-%d %H:%M') + timedelta(hours=_duration)
for dt in rrule.rrule(rrule.MINUTELY, dtstart=start_time, until=end_time):
elapsed = int((dt - start_time).seconds/60)
if elapsed % Ts_RANGE == 0:
cur_time = dt
nxt_time = cur_time + timedelta(minutes=Ts_RANGE)
logging.info(
'Processing dates: {} to {}, license: {}'\
.format(cur_time, nxt_time, get_license(license_)[0]))
#get the meta data within the time interval
total_images += process_images(cur_time, nxt_time, license_)
total_images += process_images(cur_time, nxt_time, license_, True)
logging.info('Total {} images: {}'.format(get_license(license_)[0], total_images))
def main():
logging.info('Begin: Flickr API requests')
param = None
duration = 1 #in hours
parser = argparse.ArgumentParser(description='Flickr API Job', add_help=True)
parser.add_argument('--mode', choices=['default'],
help='Identify all images that were uploaded in the previous hour [default] \nIdentify all images that were uploaded on a given date [date] or month [month].')
parser.add_argument('--date', type=lambda dt: datetime.strptime(dt, '%Y-%m-%d'),
help='Identify images uploaded on a given date (format: YYYY-MM-DD).')
parser.add_argument('--month', type=lambda dt: datetime.strptime(dt, '%Y-%m'),
help='Identify images uploaded in a given year and month (format: YYYY-MM).')
args = parser.parse_args()
if args.date:
param = args.date.strftime('%Y-%m-%d %H:%M')
duration = 24
elif args.month:
param = args.month.strftime('%Y-%m-01 %H:%M')
days = calendar.monthrange(args.month.year, args.month.month)[1]
duration = 24 * int(days)
elif args.mode:
if str(args.mode) == 'default': #the start of the previous hour
param = datetime.strftime(datetime.now() - timedelta(hours=1), '%Y-%m-%d %H:00')
else:
logging.warning('Invalid option')
logging.info('Terminating!')
#run the job and identify images for each CC license
if param:
for license_ in get_license('all'):
exec_job(license_, param, duration)
logging.info('Terminated!')
if __name__ == '__main__':
main()
|
import inspect
import os
import pickle
import re
import struct
import subprocess as sp
import textwrap
_re_var_name = re.compile(r'^[a-zA-Z_]\w*$', re.UNICODE)
_re_module_name = re.compile(r'^[a-zA-Z_.][\w.]*$', re.UNICODE)
# TODO: Use fd's besides stdin and stdout, so that you don't mess with code that reads or writes to those streams
class MakePython2:
pickle_protocol = 2
template = os.path.join(*(list(os.path.split(__file__))[:-1] + ['py2_template.py']))
def __init__(self, func=None, *, imports=None, global_values=None, copy_function_body=True,
python2_path='python2'):
"""Make a function execute within a Python 2 instance
:param func: The function to wrap. If not specified, this class instance behaves like a decorator
:param imports: Any import statements the function requires. Should be a list, where each element is either a
string (e.g., ``'sys'`` for ``import sys``)
or a tuple (e.g., ``('os.path', 'path')`` for ``import os.path as pas``)
:param global_values: A dictionary of global variables the function relies on. Key must be strings, and values
must be picklable
:param copy_function_body: Whether or not to copy the function's source code into the Python 2 instance
:param python2_path: The path to the Python 2 executable to use
"""
self.imports = imports or []
self.globals = global_values or {}
self.copy_function_body = copy_function_body
self.python2_path = python2_path
self.proc = None
if isinstance(self.imports, dict):
self.imports = list(self.imports.items())
for i, imp in enumerate(self.imports):
if isinstance(imp, str):
self.imports[i] = (imp,)
elif isinstance(imp, (tuple, list)):
if len(imp) not in [1, 2]:
raise ValueError("Imports must be given as 'name', ('name',), or ('pkg', 'name')")
if not all(isinstance(n, str) and _re_module_name.match(n) for n in imp):
raise ValueError("Invalid import name: 'import {}{}'"
.format(imp[0], 'as {}'.format(imp[1]) if len(imp) == 2 else ''))
for k in self.globals.keys():
if not isinstance(k, str):
raise ValueError("Global variables must be given as {'name': value}")
elif not _re_var_name.match(k):
raise ValueError("Invalid variable name given: '{}'".format(k))
if func:
self(func)
def _write_pkl(self, obj):
data = pickle.dumps(obj, protocol=MakePython2.pickle_protocol)
self.proc.stdin.write(struct.pack('@I', len(data)))
self.proc.stdin.write(data)
self.proc.stdin.flush()
def _read_pkl(self):
outp_length = int(struct.unpack('@I', self.proc.stdout.read(4))[0])
return pickle.loads(self.proc.stdout.read(outp_length))
def _wrapped_function(self, *args, **kwargs):
self._write_pkl((args, kwargs))
success, result = self._read_pkl()
if success:
return result
else:
raise RuntimeError(result)
@property
def function(self):
return self._wrapped_function
def __call__(self, func):
if callable(func):
function_code = textwrap.dedent(inspect.getsource(func)) if self.copy_function_body else ''
function_code = '\n'.join(line for line in function_code.split('\n') if not line.startswith('@MakePython2'))
function_name = func.__name__
elif isinstance(func, str):
function_code = ''
function_name = func
else:
raise TypeError("MakePython2 must be given either a function or an expression string to execute")
self.proc = sp.Popen([self.python2_path, MakePython2.template], executable=self.python2_path,
stdin=sp.PIPE, stdout=sp.PIPE)
self._write_pkl((self.imports, self.globals, function_name, function_code))
return self._wrapped_function
def __del__(self):
if self.proc:
self._write_pkl(None)
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.terminate()
self.proc.wait()
|
# # from django.contrib.auth.models import User, Group
# from rest_framework import viewsets
# # from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from covidmap.serializers import CovidGeneralDataSerializer
from covidmap.utils import get_most_recent_data, timer
class GetTodayCasesView(APIView):
"""
View to return to user today's covid_data
* Requires date today
* Anyone can access this view
"""
@timer
def get(self, request, format=None):
"""
Return covid data base on the date today
"""
covid_data_today = get_most_recent_data()
# covid_data_today = CovidData.objects.filter(
# date_registered=(datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d")
# )
if not covid_data_today:
return Response(data={}, status=status.HTTP_204_NO_CONTENT)
data_world = {}
data_countries = []
data_continents = []
for cdt in covid_data_today:
if cdt.continent_name == 'other':
if cdt.location_name == 'World':
data_world = CovidGeneralDataSerializer(cdt).data
elif cdt.location_name != "International":
data_continents.append(CovidGeneralDataSerializer(cdt).data)
else:
data_countries.append(CovidGeneralDataSerializer(cdt).data)
data = {
"data_world": data_world,
"data_cases_continents": data_continents,
"data_cases_countries": data_countries
}
return Response(data=data, status=status.HTTP_200_OK)
|
# coding: utf-8
import pprint
import re
import six
class IncidentSubTypeV2Do:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'incident_sub_type_id': 'str',
'incident_sub_type_name': 'str',
'incident_product_category_list': 'list[IncidentProductCategoryV2]'
}
attribute_map = {
'incident_sub_type_id': 'incident_sub_type_id',
'incident_sub_type_name': 'incident_sub_type_name',
'incident_product_category_list': 'incident_product_category_list'
}
def __init__(self, incident_sub_type_id=None, incident_sub_type_name=None, incident_product_category_list=None):
"""IncidentSubTypeV2Do - a model defined in huaweicloud sdk"""
self._incident_sub_type_id = None
self._incident_sub_type_name = None
self._incident_product_category_list = None
self.discriminator = None
if incident_sub_type_id is not None:
self.incident_sub_type_id = incident_sub_type_id
if incident_sub_type_name is not None:
self.incident_sub_type_name = incident_sub_type_name
if incident_product_category_list is not None:
self.incident_product_category_list = incident_product_category_list
@property
def incident_sub_type_id(self):
"""Gets the incident_sub_type_id of this IncidentSubTypeV2Do.
工单子类型id
:return: The incident_sub_type_id of this IncidentSubTypeV2Do.
:rtype: str
"""
return self._incident_sub_type_id
@incident_sub_type_id.setter
def incident_sub_type_id(self, incident_sub_type_id):
"""Sets the incident_sub_type_id of this IncidentSubTypeV2Do.
工单子类型id
:param incident_sub_type_id: The incident_sub_type_id of this IncidentSubTypeV2Do.
:type: str
"""
self._incident_sub_type_id = incident_sub_type_id
@property
def incident_sub_type_name(self):
"""Gets the incident_sub_type_name of this IncidentSubTypeV2Do.
工单子类型名称
:return: The incident_sub_type_name of this IncidentSubTypeV2Do.
:rtype: str
"""
return self._incident_sub_type_name
@incident_sub_type_name.setter
def incident_sub_type_name(self, incident_sub_type_name):
"""Sets the incident_sub_type_name of this IncidentSubTypeV2Do.
工单子类型名称
:param incident_sub_type_name: The incident_sub_type_name of this IncidentSubTypeV2Do.
:type: str
"""
self._incident_sub_type_name = incident_sub_type_name
@property
def incident_product_category_list(self):
"""Gets the incident_product_category_list of this IncidentSubTypeV2Do.
产品类型列表
:return: The incident_product_category_list of this IncidentSubTypeV2Do.
:rtype: list[IncidentProductCategoryV2]
"""
return self._incident_product_category_list
@incident_product_category_list.setter
def incident_product_category_list(self, incident_product_category_list):
"""Sets the incident_product_category_list of this IncidentSubTypeV2Do.
产品类型列表
:param incident_product_category_list: The incident_product_category_list of this IncidentSubTypeV2Do.
:type: list[IncidentProductCategoryV2]
"""
self._incident_product_category_list = incident_product_category_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IncidentSubTypeV2Do):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
n1 = int(input('Insira um número: '))
print('O dobro de {} é {}, \nseu triplo é {}, \ne sua raiz quadrada é {:.2f}'.format(n1, n1*2, n1*3, (pow(n1, 1/2))))
|
from abc import ABC, abstractmethod
import contextlib
from enum import Enum, unique
from typing import Dict, Optional
from mlpiot.base.utils.dataset import auto_detect_dataset, DatasetParams
from mlpiot.proto import TrainerMetadata, VisionPipelineDataset
class Trainer(ABC):
@abstractmethod
def initialize(self, environ: Dict[str, str]) -> None:
"""Initializes the `SceneDescriptor` using the given params.
Validate the given params but if only validating is possible without
heavy IO operations. Store them to be used in the `prepare` stage.
Do not load heavy external libraries or models in this stage.
environ -- a dictionary from parameter name to its string value.
"""
raise NotImplementedError
@abstractmethod
def prepare_for_training(
self,
output_metadata: TrainerMetadata):
"""Loads the internal components and fills the given metadata object"""
raise NotImplementedError
@abstractmethod
def train(
self,
dataset: VisionPipelineDataset,
validation_dataset: Optional[VisionPipelineDataset] = None)\
-> None:
"""TODO"""
raise NotImplementedError
def release(self, type_, value, traceback) -> bool:
"""release the resources"""
return False
def get_dataset(
self,
directory_path: str,
dataset_params: DatasetParams) -> VisionPipelineDataset:
"""TODO"""
return auto_detect_dataset(directory_path, dataset_params)
class TrainerLifecycleManager(object):
@unique
class _State(Enum):
NOT_INITIALIZED = 0
INITIALIZED = 1
PREPARED_FOR_TRAINING = 2
ENTERED_FOR_TRAINING = 3
RELEASED = 99
def __init__(self, implementation: Trainer):
assert isinstance(implementation, Trainer)
self.implementation = implementation
self._metadata = TrainerMetadata()
self._metadata.name = self.__class__.__name__
self._state = TrainerLifecycleManager._State.NOT_INITIALIZED
def initialize(self, environ: Dict[str, str]) -> None:
assert self._state is \
TrainerLifecycleManager._State.NOT_INITIALIZED
self.implementation.initialize(environ)
self._state = TrainerLifecycleManager._State.INITIALIZED
def release(self, type_, value, traceback) -> bool:
"""release the resources"""
suppress_exception = self.implementation.release(
type_, value, traceback)
self._state = TrainerLifecycleManager._State.RELEASED
return suppress_exception
def get_dataset(
self,
directory_path: str,
dataset_params: DatasetParams) -> VisionPipelineDataset:
return self.implementation.get_dataset(
directory_path, dataset_params)
class _PreparedForTraining(contextlib.AbstractContextManager):
def __init__(
self, lifecycle_manager: 'TrainerLifecycleManager'):
self.lifecycle_manager = lifecycle_manager
def __enter__(self):
assert self.lifecycle_manager._state is \
TrainerLifecycleManager._State.PREPARED_FOR_TRAINING
self.lifecycle_manager._state = \
TrainerLifecycleManager._State.ENTERED_FOR_TRAINING
return self
def __exit__(self, type_, value, traceback):
assert self.lifecycle_manager._state is \
TrainerLifecycleManager._State.ENTERED_FOR_TRAINING
return self.lifecycle_manager.release(
type_, value, traceback)
def train(self, dataset: VisionPipelineDataset,
validation_dataset: Optional[VisionPipelineDataset] = None):
assert self.lifecycle_manager._state is \
TrainerLifecycleManager._State.ENTERED_FOR_TRAINING
self.lifecycle_manager.implementation.train(
dataset, validation_dataset=validation_dataset)
def prepare_for_training(self):
assert self._state is \
TrainerLifecycleManager._State.INITIALIZED
self.implementation.prepare_for_training(self._metadata)
prepared = TrainerLifecycleManager._PreparedForTraining(self)
self._state = \
TrainerLifecycleManager._State.PREPARED_FOR_TRAINING
return prepared
|
from random import randint, seed
import hashkernel.base_x as bx
def random_bytes(sz):
return bytes(randint(0, 255) for _ in range(sz))
b58 = bx.base_x(58)
def test_nulls():
assert b58.decode("12") == b"\x00\x01"
assert b58.decode(b"12") == b"\x00\x01"
assert b58.encode(b"\0\1") == "12"
assert b58.decode("1") == b"\x00"
assert b58.encode(b"\0") == "1"
assert b58.decode("") == b""
assert b58.encode(b"") == ""
try:
b58.encode("")
assert False
except TypeError:
pass
def test_randomized():
all_codecs = [bx.base_x(k) for k in bx.alphabets]
seed(0)
for sz in [1, 2, 0, 3, 1, 77, 513, 732]:
b = random_bytes(sz)
for codec in all_codecs:
s = codec.encode(b)
assert codec.decode(s) == b
s = codec.encode_check(b)
assert codec.decode_check(s) == b
|
import time
def execute_athena_query(output_location, query, athena_client):
"""Executes the given individual query against athena and return the result.
Keyword arguments:
output_location -- the s3 location to output the results of the execution to
query -- the query to execute
"""
print(
f"Executing query and sending output results to '{output_location}'"
)
query_start_resp = athena_client.start_query_execution(
QueryString=query, ResultConfiguration={"OutputLocation": output_location}
)
print(f"Query start response {query_start_resp}")
if 'QueryExecutionId' in query_start_resp:
execution_state = poll_athena_query_status(query_start_resp["QueryExecutionId"], athena_client)
if execution_state != "SUCCEEDED":
print(f"Non successful execution state returned: {execution_state}")
raise KeyError(
f"Athena query execution failed with final execution status of '{execution_state}'"
)
return athena_client.get_query_results(
QueryExecutionId=query_start_resp["QueryExecutionId"]
)
else:
raise Exception("Athena response didn't contain QueryExecutionId")
def poll_athena_query_status(id, athena_client):
"""Polls athena for the status of a query.
Keyword arguments:
id -- the id of the query in athena
"""
print(f"Polling athena query status for id {id}")
time_taken = 1
while True:
query_execution_resp = athena_client.get_query_execution(QueryExecutionId=id)
state = query_execution_resp["QueryExecution"]["Status"]["State"]
if state in ("SUCCEEDED", "FAILED", "CANCELLED"):
print(
f"Athena query execution finished in {str(time_taken)} seconds with status of '{state}'"
)
return state
time.sleep(1)
time_taken += 1
|
from .backend import Backend
class TPUBackend(Backend):
pass
|
import json
import os
import cv2 as cv
import numpy as np
from keras.applications.inception_resnet_v2 import preprocess_input
from keras.models import load_model
if __name__ == '__main__':
img_size = 139
model = load_model('models/model.10-0.0156.hdf5')
names = [f for f in os.listdir('data') if f.endswith('png')]
dummy_input = np.zeros((1, img_size, img_size, 3), dtype=np.float32)
results = []
for name in names:
print('processing ' + name)
alias = name.split('.')[0]
filename = os.path.join('data', name)
image_inputs = np.empty((1, img_size, img_size, 3), dtype=np.float32)
image_bgr = cv.imread(filename)
image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
image_rgb = image_bgr[:, :, ::-1].astype(np.float32)
image_inputs[0] = preprocess_input(image_rgb)
y_pred = model.predict([image_inputs, dummy_input, dummy_input])
embedding = y_pred[0, 0:128]
results.append({'alias': alias, 'embedding': embedding.tolist()})
with open('data/results.json', 'w') as file:
json.dump(results, file, indent=4)
|
# http://www.codewars.com/kata/5612e743cab69fec6d000077/
a = "dev"
b = "Lab"
name = a + b
|
# Generated by Django 2.1.4 on 2019-09-03 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0005_auto_20190903_1508'),
]
operations = [
migrations.AddField(
model_name='game',
name='round',
field=models.IntegerField(default=1),
),
]
|
from .. import *
from ..bitswap import get_bit, set_bit
#
# Fast, efficient 8-bit math functions specifically
# designed for high-performance LED programming.
#
# Because of the AVR(Arduino) and ARM assembly language
# implementations provided, using these functions often
# results in smaller and faster code than the equivalent
# program using plain "C" arithmetic and logic.
#
#
# Included are:
#
#
# - Saturating unsigned 8-bit add and subtract.
# Instead of wrapping around if an overflow occurs,
# these routines just 'clamp' the output at a maxumum
# of 255, or a minimum of 0. Useful for adding pixel
# values. E.g., qadd8( 200, 100) = 255.
#
# qadd8( i, j) == MIN( (i + j), 0xFF )
# qsub8( i, j) == MAX( (i - j), 0 )
#
# - Saturating signed 8-bit ("7-bit") add.
# qadd7( i, j) == MIN( (i + j), 0x7F)
#
#
# - Scaling (down) of unsigned 8- and 16- bit values.
# Scaledown value is specified in 1/256ths.
# scale8( i, sc) == (i * sc) / 256
# scale16by8( i, sc) == (i * sc) / 256
#
# Example: scaling a 0-255 value down into a
# range from 0-99:
# downscaled = scale8( originalnumber, 100);
#
# A special version of scale8 is provided for scaling
# LED brightness values, to make sure that they don't
# accidentally scale down to total black at low
# dimming levels, since that would look wrong:
# scale8_video( i, sc) = ((i * sc) / 256) +? 1
#
# Example: reducing an LED brightness by a
# dimming factor:
# new_bright = scale8_video( orig_bright, dimming);
#
#
# - Fast 8- and 16- bit unsigned random numbers.
# Significantly faster than Arduino random(), but
# also somewhat less random. You can add entropy.
# random8() == random from 0..255
# random8( n) == random from 0..(N-1)
# random8( n, m) == random from N..(M-1)
#
# random16() == random from 0..65535
# random16( n) == random from 0..(N-1)
# random16( n, m) == random from N..(M-1)
#
# random16_set_seed( k) == seed = k
# random16_add_entropy( k) == seed += k
#
#
# - Absolute value of a signed 8-bit value.
# abs8( i) == abs( i)
#
#
# - 8-bit math operations which return 8-bit values.
# These are provided mostly for completeness,
# not particularly for performance.
# mul8( i, j) == (i * j) & 0xFF
# add8( i, j) == (i + j) & 0xFF
# sub8( i, j) == (i - j) & 0xFF
#
#
# - Fast 16-bit approximations of sin and cos.
# Input angle is a uint16_t from 0-65535.
# Output is a signed int16_t from -32767 to 32767.
# sin16( x) == sin( (x/32768.0) * pi) * 32767
# cos16( x) == cos( (x/32768.0) * pi) * 32767
# Accurate to more than 99% in all cases.
#
# - Fast 8-bit approximations of sin and cos.
# Input angle is a uint8_t from 0-255.
# Output is an UNsigned uint8_t from 0 to 255.
# sin8( x) == (sin( (x/128.0) * pi) * 128) + 128
# cos8( x) == (cos( (x/128.0) * pi) * 128) + 128
# Accurate to within about 2%.
#
#
# - Fast 8-bit "easing in/out" function.
# ease8InOutCubic(x) == 3(x^i) - 2(x^3)
# ease8InOutApprox(x) ==
# faster, rougher, approximation of cubic easing
# ease8InOutQuad(x) == quadratic (vs cubic) easing
#
# - Cubic, Quadratic, and Triangle wave functions.
# Input is a uint8_t representing phase withing the wave,
# similar to how sin8 takes an angle 'theta'.
# Output is a uint8_t representing the amplitude of
# the wave at that point.
# cubicwave8( x)
# quadwave8( x)
# triwave8( x)
#
# - Square root for 16-bit integers. About three times
# faster and five times smaller than Arduino's built-in
# generic 32-bit sqrt routine.
# sqrt16( uint16_t x ) == sqrt( x)
#
# - Dimming and brightening functions for 8-bit
# light values.
# dim8_video( x) == scale8_video( x, x)
# dim8_raw( x) == scale8( x, x)
# dim8_lin( x) == (x<128) ? ((x+1)/2) : scale8(x,x)
# brighten8_video( x) == 255 - dim8_video( 255 - x)
# brighten8_raw( x) == 255 - dim8_raw( 255 - x)
# brighten8_lin( x) == 255 - dim8_lin( 255 - x)
# The dimming functions in particular are suitable
# for making LED light output appear more 'linear'.
#
#
# - Linear interpolation between two values, with the
# fraction between them expressed as an 8- or 16-bit
# fixed point fraction (fract8 or fract16).
# lerp8by8( fromU8, toU8, fract8 )
# lerp16by8( fromU16, toU16, fract8 )
# lerp15by8( fromS16, toS16, fract8 )
# == from + (( to - from ) * fract8) / 256)
# lerp16by16( fromU16, toU16, fract16 )
# == from + (( to - from ) * fract16) / 65536)
# map8( in, rangeStart, rangeEnd)
# == map( in, 0, 255, rangeStart, rangeEnd);
#
# - Optimized memmove, memcpy, and memset, that are
# faster than standard avr-libc 1.8.
# memmove8( dest, src, bytecount)
# memcpy8( dest, src, bytecount)
# memset8( buf, value, bytecount)
#
# - Beat generators which return sine or sawtooth
# waves in a specified number of Beats Per Minute.
# Sine wave beat generators can specify a low and
# high range for the output. Sawtooth wave beat
# generators always range 0-255 or 0-65535.
# beatsin8( BPM, low8, high8)
# = (sine(beatphase) * (high8-low8)) + low8
# beatsin16( BPM, low16, high16)
# = (sine(beatphase) * (high16-low16)) + low16
# beatsin88( BPM88, low16, high16)
# = (sine(beatphase) * (high16-low16)) + low16
# beat8( BPM) = 8-bit repeating sawtooth wave
# beat16( BPM) = 16-bit repeating sawtooth wave
# beat88( BPM88) = 16-bit repeating sawtooth wave
# BPM is beats per minute in either simple form
# e.g. 120, or Q8.8 fixed-point form.
# BPM88 is beats per minute in ONLY Q8.8 fixed-point
# form.
#
# Lib8tion is pronounced like 'libation': lie-BAY-shun
QADD8_C = 1
QADD7_C = 1
QSUB8_C = 1
SCALE8_C = 1
SCALE16BY8_C = 1
SCALE16_C = 1
ABS8_C = 1
MUL8_C = 1
QMUL8_C = 1
ADD8_C = 1
SUB8_C = 1
EASE8_C = 1
AVG8_C = 1
AVG7_C = 1
AVG16_C = 1
AVG15_C = 1
BLEND8_C = 1
# @defgroup lib8tion Fast math functions
# A variety of functions for working with numbers.
# @{
# # # # # # # # # # # # # # # # # # # # # # # #
#
# typdefs for fixed-point fractional types.
#
# sfract7 should be interpreted as signed 128ths.
# fract8 should be interpreted as unsigned 256ths.
# sfract15 should be interpreted as signed 32768ths.
# fract16 should be interpreted as unsigned 65536ths.
#
# Example: if a fract8 has the value "64", that should be interpreted
# as 64/256ths, or one-quarter.
#
#
# fract8 range is 0 to 0.99609375
# in steps of 0.00390625
#
# sfract7 range is -0.9921875 to 0.9921875
# in steps of 0.0078125
#
# fract16 range is 0 to 0.99998474121
# in steps of 0.00001525878
#
# sfract15 range is -0.99996948242 to 0.99996948242
# in steps of 0.00003051757
#
# ANSI unsigned short _Fract. range is 0 to 0.99609375
# in steps of 0.00390625
class fract8(int): # < ANSI: unsigned short _Fract
pass
# ANSI: signed short _Fract. range is -0.9921875 to 0.9921875
# in steps of 0.0078125
class sfract7(int): # < ANSI: signed short _Fract
pass
# ANSI: unsigned _Fract. range is 0 to 0.99998474121
# in steps of 0.00001525878
class fract16(int): # < ANSI: unsigned _Fract
pass
# ANSI: signed _Fract. range is -0.99996948242 to 0.99996948242
# in steps of 0.00003051757
class sfract15(int): # < ANSI: signed _Fract
pass
# accumXY types should be interpreted as X bits of integer,
# and Y bits of fraction.
# E.g., accum88 has 8 bits of int, 8 bits of fraction
class accum88(int): # < ANSI: unsigned short _Accum. 8 bits int, 8 bits fraction
pass
class saccum78(int): # < ANSI: signed short _Accum. 7 bits int, 8 bits fraction
pass
class accum1616(int): # < ANSI: signed _Accum. 16 bits int, 16 bits fraction
pass
class saccum1516(int): # < ANSI: signed _Accum. 15 bits int, 16 bits fraction
pass
class accum124(int): # < no direct ANSI counterpart. 12 bits int, 4 bits fraction
pass
class saccum114(int): # < no direct ANSI counterpart. 1 bit int, 14 bits fraction
pass
# typedef for IEEE754 "binary32" float type internals
class IEEE754binary32_t(object):
def __init__(
self,
i=0,
f=0.0,
mantissa=0,
exponent=0,
signbit=0,
mant7=0,
mant16=0,
exp_=0,
sb_=0,
mant_lo8=0,
mant_hi16_exp_lo1=0,
sb_exphi7=0
):
self.i = i
self.f = f
def _get_bits(value, num_bits):
res = 0
for i in range(num_bits):
res = set_bit(res, i, get_bit(value, i))
return res
self.mantissa = _get_bits(mantissa, 23)
self.exponent = _get_bits(exponent, 8)
self.signbit = _get_bits(signbit, 1)
self.mant7 = _get_bits(mant7, 7)
self.mant16 = _get_bits(mant16, 16)
self.exp_ = _get_bits(exp_, 8)
self.sb_ = _get_bits(sb_, 1)
self.mant_lo8 = _get_bits(mant_lo8, 8)
self.mant_hi16_exp_lo1 = _get_bits(mant_hi16_exp_lo1, 16)
self.sb_exphi7 = _get_bits(sb_exphi7, 8)
from .math8 import *
from .random8 import *
from .scale8 import *
from .trig8 import *
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
#
# float-to-fixed and fixed-to-float conversions
#
# Note that anything involving a 'float' on AVR will be slower.
# sfract15ToFloat: conversion from sfract15 fixed point to
# IEEE754 32-bit float.
def sfract15ToFloat(y):
return y / 32768.0
# conversion from IEEE754 float in the range (-1,1)
# to 16-bit fixed point. Note that the extremes of
# one and negative one are NOT representable. The
# representable range is basically
def floatToSfract15(f):
return f * 32768.0
# # # # # # # # # # # # # # # # # # # # # # # #
#
# linear interpolation, such as could be used for Perlin noise, etc.
#
# A note on the structure of the lerp functions:
# The cases for b>a and b<=a are handled separately for
# speed: without knowing the relative order of a and b,
# the value (a-b) might be overflow the width of a or b,
# and have to be promoted to a wider, slower type.
# To avoid that, we separate the two cases, and are able
# to do all the math in the same width as the arguments,
# which is much faster and smaller on AVR.
# linear interpolation between two unsigned 8-bit values,
# with 8-bit fraction
def lerp8by8(a, b, frac):
if b > a:
delta = b - a
scaled = scale8(delta, frac)
result = a + scaled
else:
delta = a - b
scaled = scale8(delta, frac)
result = a - scaled
return result
# linear interpolation between two unsigned 16-bit values,
# with 16-bit fraction
def lerp16by16(a, b, frac):
if b > a:
delta = b - a
scaled = scale16(delta, frac)
result = a + scaled
else:
delta = a - b
scaled = scale16( delta, frac)
result = a - scaled
return result
# linear interpolation between two unsigned 16-bit values,
# with 8-bit fraction
def lerp16by8(a, b, frac):
if b > a:
delta = b - a
scaled = scale16by8(delta, frac)
result = a + scaled
else:
delta = a - b
scaled = scale16by8(delta, frac)
result = a - scaled
return result
# linear interpolation between two signed 15-bit values,
# with 8-bit fraction
def lerp15by8(a, b, frac):
if b > a:
delta = b - a
scaled = scale16by8(delta, frac)
result = a + scaled
else:
delta = a - b
scaled = scale16by8(delta, frac)
result = a - scaled
return result
# linear interpolation between two signed 15-bit values,
# with 8-bit fraction
def lerp15by16(a, b, frac):
if b > a:
delta = b - a
scaled = scale16(delta, frac)
result = a + scaled
else:
delta = a - b
scaled = scale16(delta, frac)
result = a - scaled
return result
# map8: map from one full-range 8-bit value into a narrower
# range of 8-bit values, possibly a range of hues.
#
# E.g. map myValue into a hue in the range blue..purple..pink..red
# hue = map8( myValue, HUE_BLUE, HUE_RED);
#
# Combines nicely with the waveform functions (like sin8, etc)
# to produce continuous hue gradients back and forth:
#
# hue = map8( sin8( myValue), HUE_BLUE, HUE_RED);
#
# Mathematically simiar to lerp8by8, but arguments are more
# like Arduino's "map"; this function is similar to
#
# map( in, 0, 255, rangeStart, rangeEnd)
#
# but faster and specifically designed for 8-bit values.
def map8(in_, rangeStart, rangeEnd):
rangeWidth = rangeEnd - rangeStart
out = scale8(in_, rangeWidth)
out += rangeStart
return out
# # # # # # # # # # # # # # # # # # # # # # # #
#
# easing functions; see http:# easings.net
#
# ease8InOutQuad: 8-bit quadratic ease-in / ease-out function
# Takes around 13 cycles on AVR
def ease8InOutQuad(i):
j = i
if j & 0x80:
j = 255 - j
jj = scale8(j, j)
jj2 = jj << 1
if i & 0x80:
jj2 = 255 - jj2
return jj2
# ease16InOutQuad: 16-bit quadratic ease-in / ease-out function
# C implementation at this point
def ease16InOutQuad(i):
j = i
if j & 0x8000:
j = 65535 - j
jj = scale16(j, j)
jj2 = jj << 1
if i & 0x8000:
jj2 = 65535 - jj2
return jj2
# ease8InOutCubic: 8-bit cubic ease-in / ease-out function
# Takes around 18 cycles on AVR
def ease8InOutCubic(i):
ii = scale8_LEAVING_R1_DIRTY(i, i)
iii = scale8_LEAVING_R1_DIRTY(ii, i)
r1 = (3 * ii) - ( 2 * iii)
# the code generated for the above *'s automatically
# cleans up R1, so there's no need to explicitily call
cleanup_R1()
result = r1
# if we got "256", return 255:
if r1 & 0x100:
result = 255
return result
# ease8InOutApprox: fast, rough 8-bit ease-in/ease-out function
# shaped approximately like 'ease8InOutCubic',
# it's never off by more than a couple of percent
# from the actual cubic S-curve, and it executes
# more than twice as fast. Use when the cycles
# are more important than visual smoothness.
# Asm version takes around 7 cycles on AVR.
def ease8InOutApprox(i):
if i < 64:
# start with slope 0.5
i /= 2.0
elif i > (255 - 64):
# end with slope 0.5
i = 255 - i
i /= 2.0
i = 255 - i
else:
# in the middle, use slope 192/128 = 1.5
i -= 64
i += i / 2.0
i += 32
return i
# triwave8: triangle (sawtooth) wave generator. Useful for
# turning a one-byte ever-increasing value into a
# one-byte value that oscillates up and down.
#
# input output
# 0..127 0..254 (positive slope)
# 128..255 254..0 (negative slope)
#
# On AVR this function takes just three cycles.
#
def triwave8(in_):
if in_ & 0x80:
in_ = 255 - in_
out = in_ << 1
return out
# quadwave8 and cubicwave8: S-shaped wave generators (like 'sine').
# Useful for turning a one-byte 'counter' value into a
# one-byte oscillating value that moves smoothly up and down,
# with an 'acceleration' and 'deceleration' curve.
#
# These are even faster than 'sin8', and have
# slightly different curve shapes.
#
# quadwave8: quadratic waveform generator. Spends just a little more
# time at the limits than 'sine' does.
def quadwave8(in_):
return ease8InOutQuad(triwave8(in_))
# cubicwave8: cubic waveform generator. Spends visibly more time
# at the limits than 'sine' does.
def cubicwave8(in_):
return ease8InOutCubic(triwave8(in_))
# squarewave8: square wave generator. Useful for
# turning a one-byte ever-increasing value
# into a one-byte value that is either 0 or 255.
# The width of the output 'pulse' is
# determined by the pulsewidth argument:
#
# ~~~
# If pulsewidth is 255, output is always 255.
# If pulsewidth < 255, then
# if input < pulsewidth then output is 255
# if input >= pulsewidth then output is 0
# ~~~
#
# the output looking like:
#
# ~~~
# 255 +--pulsewidth--+
# . | |
# 0 0 +--------(256-pulsewidth)--------
# ~~~
#
# @param in
# @param pulsewidth
# @returns square wave output
def squarewave8(in_, pulsewidth=128):
if in_ < pulsewidth or pulsewidth == 255:
return 255
else:
return 0
# Template class for represneting fractional ints.
class q(object):
def __init__(self, T, F, I):
self._T = T
self._F = F
self._I = I
self.i = T()
self.f = T()
def __call__(self, i, f=None):
res = q(self._T, self._F, self._I)
if isinstance(i, float) and f is None:
fx = i
res.i = self._T(fx)
res.f = self._T((fx - self.i) * (1 << self._F))
else:
if not isinstance(i, self._T):
i = self._T(i)
if not isinstance(f, self._T):
f = self._T(f)
def _get_bits(value, bit_count):
ret = 0
for bit_num in range(bit_count):
ret = set_bit(ret, bit_num, get_bit(value, bit_num))
return ret
res.i = self._T(_get_bits(i, self._I))
res.f = self._T(_get_bits(f, self._F))
return res
def __mul__(self, v):
return (v * self.i) + ((v * self.f) >> self._F)
# A 4.4 integer (4 bits integer, 4 bits fraction)
q44 = q(int, 4, 4)
# A 6.2 integer (6 bits integer, 2 bits fraction)
q62 = q(int, 6, 2)
# A 8.8 integer (8 bits integer, 8 bits fraction)
q88 = q(int, 8, 8)
# A 12.4 integer (12 bits integer, 4 bits fraction)
q124 = q(int, 12, 4)
# Beat generators - These functions produce waves at a given
# number of 'beats per minute'. Internally, they use
# the Arduino function 'millis' to track elapsed time.
# Accuracy is a bit better than one part in a thousand.
#
# beat8( BPM ) returns an 8-bit value that cycles 'BPM' times
# per minute, rising from 0 to 255, resetting to zero,
# rising up again, etc.. The output of this function
# is suitable for feeding directly into sin8, and cos8,
# triwave8, quadwave8, and cubicwave8.
# beat16( BPM ) returns a 16-bit value that cycles 'BPM' times
# per minute, rising from 0 to 65535, resetting to zero,
# rising up again, etc. The output of this function is
# suitable for feeding directly into sin16 and cos16.
# beat88( BPM88) is the same as beat16, except that the BPM88 argument
# MUST be in Q8.8 fixed point format, e.g. 120BPM must
# be specified as 120*256 = 30720.
# beatsin8( BPM, uint8_t low, uint8_t high) returns an 8-bit value that
# rises and falls in a sine wave, 'BPM' times per minute,
# between the values of 'low' and 'high'.
# beatsin16( BPM, uint16_t low, uint16_t high) returns a 16-bit value
# that rises and falls in a sine wave, 'BPM' times per
# minute, between the values of 'low' and 'high'.
# beatsin88( BPM88, ...) is the same as beatsin16, except that the
# BPM88 argument MUST be in Q8.8 fixed point format,
# e.g. 120BPM must be specified as 120*256 = 30720.
#
# BPM can be supplied two ways. The simpler way of specifying BPM is as
# a simple 8-bit integer from 1-255, (e.g., "120").
# The more sophisticated way of specifying BPM allows for fractional
# "Q8.8" fixed point number (an 'accum88') with an 8-bit integer part and
# an 8-bit fractional part. The easiest way to construct this is to multiply
# a floating point BPM value (e.g. 120.3) by 256, (e.g. resulting in 30796
# in this case), and pass that as the 16-bit BPM argument.
# "BPM88" MUST always be specified in Q8.8 format.
#
# Originally designed to make an entire animation project pulse with brightness.
# For that effect, add this line just above your existing call to "FastLED.show()":
#
# uint8_t bright = beatsin8( 60 /*BPM*/, 192 /*dimmest*/, 255 /*brightest*/ ));
# FastLED.setBrightness( bright );
# FastLED.show();
#
# The entire animation will now pulse between brightness 192 and 255 once per second.
# The beat generators need access to a millisecond counter.
# On Arduino, this is "millis()". On other platforms, you'll
# need to provide a function with this signature:
# uint32_t get_millisecond_timer();
# that provides similar functionality.
# You can also force use of the get_millisecond_timer function
# by #defining USE_GET_MILLISECOND_TIMER.
#if (defined(ARDUINO) || defined(SPARK) || defined(FASTLED_HAS_MILLIS)) && !defined(USE_GET_MILLISECOND_TIMER)
# Forward declaration of Arduino function 'millis'.
# uint32_t millis();
def get_millisecond_timer():
import utime
return utime.ticks_ms()
GET_MILLIS = get_millisecond_timer
# beat16 generates a 16-bit 'sawtooth' wave at a given BPM,
# with BPM specified in Q8.8 fixed-point format; e.g.
# for this function, 120 BPM MUST BE specified as
# 120*256 = 30720.
# If you just want to specify "120", use beat16 or beat8.
def beat88(beats_per_minute_88, timebase=0):
# BPM is 'beats per minute', or 'beats per 60000ms'.
# To avoid using the (slower) division operator, we
# want to convert 'beats per 60000ms' to 'beats per 65536ms',
# and then use a simple, fast bit-shift to divide by 65536.
#
# The ratio 65536:60000 is 279.620266667:256; we'll call it 280:256.
# The conversion is accurate to about 0.05%, more or less,
# e.g. if you ask for "120 BPM", you'll get about "119.93".
return ((GET_MILLIS() - timebase) * beats_per_minute_88 * 280) >> 16
# beat16 generates a 16-bit 'sawtooth' wave at a given BPM
def beat16(beats_per_minute, timebase=0):
# Convert simple 8-bit BPM's to full Q8.8 accum88's if needed
if beats_per_minute < 256:
beats_per_minute <<= 8
return beat88(beats_per_minute, timebase)
# beat8 generates an 8-bit 'sawtooth' wave at a given BPM
def beat8(beats_per_minute, timebase=0):
return beat16(beats_per_minute, timebase) >> 8
# beatsin88 generates a 16-bit sine wave at a given BPM,
# that oscillates within a given range.
# For this function, BPM MUST BE SPECIFIED as
# a Q8.8 fixed-point value; e.g. 120BPM must be
# specified as 120*256 = 30720.
# If you just want to specify "120", use beatsin16 or beatsin8.
def beatsin88(
beats_per_minute_88,
lowest=0,
highest=65535,
timebase=0,
phase_offset=0
):
beat = beat88(beats_per_minute_88, timebase)
beatsin = sin16(beat + phase_offset) + 32768
rangewidth = highest - lowest
scaledbeat = scale16(beatsin, rangewidth)
result = lowest + scaledbeat
return result
# beatsin16 generates a 16-bit sine wave at a given BPM,
# that oscillates within a given range.
def beatsin16(
beats_per_minute,
lowest=0,
highest=65535,
timebase=0,
phase_offset=0
):
beat = beat16( beats_per_minute, timebase)
beatsin = sin16( beat + phase_offset) + 32768
rangewidth = highest - lowest
scaledbeat = scale16(beatsin, rangewidth)
result = lowest + scaledbeat
return result
# beatsin8 generates an 8-bit sine wave at a given BPM,
# that oscillates within a given range.
def beatsin8(
beats_per_minute,
lowest=0,
highest=255,
timebase=0,
phase_offset=0
):
beat = beat8(beats_per_minute, timebase)
beatsin = sin8(beat + phase_offset)
rangewidth = highest - lowest
scaledbeat = scale8(beatsin, rangewidth)
result = lowest + scaledbeat
return result
# Return the current seconds since boot in a 16-bit value. Used as part of the
# "every N time-periods" mechanism
def seconds16():
ms = GET_MILLIS()
s16 = int(ms / 1000)
return s16
# Return the current minutes since boot in a 16-bit value. Used as part of the
# "every N time-periods" mechanism
def minutes16():
ms = GET_MILLIS()
m16 = int(ms / 60000) & 0xFFFF
return m16
# Return the current hours since boot in an 8-bit value. Used as part of the
# "every N time-periods" mechanism
def hours8():
ms = GET_MILLIS()
h8 = int(ms / 3600000) & 0xFF
return h8
# Helper routine to divide a 32-bit value by 1024, returning
# only the low 16 bits. You'd think this would be just
# result = (in32 >> 10) & 0xFFFF;
# and on ARM, that's what you want and all is well.
# But on AVR that code turns into a loop that executes
# a four-byte shift ten times: 40 shifts in all, plus loop
# overhead. This routine gets exactly the same result with
# just six shifts (vs 40), and no loop overhead.
# Used to convert millis to 'binary seconds' aka bseconds:
# one bsecond == 1024 millis.
def div1024_32_16(in32):
out16 = int(in32 >> 10) & 0xFFFF
return out16
# bseconds16 returns the current time-since-boot in
# "binary seconds", which are actually 1024/1000 of a
# second long.
def bseconds16():
ms = GET_MILLIS()
s16 = div1024_32_16(ms)
return s16
# Classes to implement "Every N Milliseconds", "Every N Seconds",
# "Every N Minutes", "Every N Hours", and "Every N BSeconds".
#if 1
class _CEveryNTimePeriods(object):
_TIMETYPE = None
_TIMEGETTER = None
def __init__(self, period=None):
self.mPeriod = self._TIMETYPE()
self.mPrevTrigger = self._TIMETYPE()
self.reset()
if period is None:
self.mPeriod = 1
else:
self.setPeriod(period)
def setPeriod(self, period):
self.mPeriod = period
def getTime(self):
return self._TIMETYPE(self._TIMEGETTER())
def getPeriod(self):
return self.mPeriod
def getElapsed(self):
return self.getTime() - self.mPrevTrigger
def getRemaining(self):
return self.mPeriod - self.getElapsed()
def getLastTriggerTime(self):
return self.mPrevTrigger
def ready(self):
isReady = self.getElapsed() >= self.mPeriod
if isReady:
self.reset()
return isReady
def reset(self):
self.mPrevTrigger = self.getTime()
def trigger(self):
self.mPrevTrigger = self.getTime() - self.mPeriod
def __bool__(self):
return self.ready()
def INSTANTIATE_EVERY_N_TIME_PERIODS(NAME, TIMETYPE, TIMEGETTER):
cls = type(NAME, (_CEveryNTimePeriods,), dict(_TIMETYPE=TIMETYPE, _TIMEGETTER=TIMEGETTER))
return cls
CEveryNMillis = INSTANTIATE_EVERY_N_TIME_PERIODS('CEveryNMillis', int, GET_MILLIS)
CEveryNSeconds = INSTANTIATE_EVERY_N_TIME_PERIODS('CEveryNSeconds', int, seconds16)
CEveryNBSeconds = INSTANTIATE_EVERY_N_TIME_PERIODS('CEveryNBSeconds', int, bseconds16)
CEveryNMinutes = INSTANTIATE_EVERY_N_TIME_PERIODS('CEveryNMinutes', int, minutes16)
CEveryNHours = INSTANTIATE_EVERY_N_TIME_PERIODS('CEveryNHours', int, hours8)
def CONCAT_HELPER(x, y):
return str(x) + str(y)
def CONCAT_MACRO(x, y):
return CONCAT_HELPER(x, y)
def EVERY_N_MILLIS(N):
return EVERY_N_MILLIS_I(CONCAT_MACRO(PER, __COUNTER__ ), N)
def EVERY_N_MILLIS_I(NAME, N):
static CEveryNMillis NAME(N)
if( NAME )
def EVERY_N_SECONDS(N):
return EVERY_N_SECONDS_I(CONCAT_MACRO(PER, __COUNTER__ ), N)
def EVERY_N_SECONDS_I(NAME, N):
static CEveryNSeconds NAME(N)
if( NAME )
def EVERY_N_BSECONDS(N):
return EVERY_N_BSECONDS_I(CONCAT_MACRO(PER, __COUNTER__ ), N)
def EVERY_N_BSECONDS_I(NAME, N):
static CEveryNBSeconds NAME(N)
if( NAME )
def EVERY_N_MINUTES(N):
return EVERY_N_MINUTES_I(CONCAT_MACRO(PER, __COUNTER__ ), N)
def EVERY_N_MINUTES_I(NAME, N):
static CEveryNMinutes NAME(N)
if( NAME )
def EVERY_N_HOURS(N):
return EVERY_N_HOURS_I(CONCAT_MACRO(PER, __COUNTER__ ), N)
def EVERY_N_HOURS_I(NAME, N):
static CEveryNHours NAME(N)
if( NAME )
class CEveryNMilliseconds(CEveryNMillis):
pass
def EVERY_N_MILLISECONDS(N):
return EVERY_N_MILLIS(N)
def EVERY_N_MILLISECONDS_I(NAME, N):
return EVERY_N_MILLIS_I(NAME, N)
RAND16_SEED = 1337
rand16seed = RAND16_SEED
# memset8, memcpy8, memmove8:
# optimized avr replacements for the standard "C" library
# routines memset, memcpy, and memmove.
#
# There are two techniques that make these routines
# faster than the standard avr-libc routines.
# First, the loops are unrolled 2X, meaning that
# the average loop overhead is cut in half.
# And second, the compare-and-branch at the bottom
# of each loop decrements the low byte of the
# counter, and if the carry is clear, it branches
# back up immediately. Only if the low byte math
# causes carry do we bother to decrement the high
# byte and check that result for carry as well.
# Results for a 100-byte buffer are 20-40% faster
# than standard avr-libc, at a cost of a few extra
# bytes of code.
#
#
# #if 0
# # TEST / VERIFICATION CODE ONLY BELOW THIS POINT
# #include <Arduino.h>
# #include "lib8tion.h"
#
# void test1abs( int8_t i)
# {
# Serial.print("abs("); Serial.print(i); Serial.print(") = ");
# int8_t j = abs8(i);
# Serial.print(j); Serial.println(" ");
# }
#
# void testabs()
# {
# delay(5000);
# for( int8_t q = -128; q != 127; q++) {
# test1abs(q);
# }
# for(;;){};
# }
#
#
# void testmul8()
# {
# delay(5000);
# byte r, c;
#
# Serial.println("mul8:");
# for( r = 0; r <= 20; r += 1) {
# Serial.print(r); Serial.print(" : ");
# for( c = 0; c <= 20; c += 1) {
# byte t;
# t = mul8( r, c);
# Serial.print(t); Serial.print(' ');
# }
# Serial.println(' ');
# }
# Serial.println("done.");
# for(;;){};
# }
#
#
# void testscale8()
# {
# delay(5000);
# byte r, c;
#
# Serial.println("scale8:");
# for( r = 0; r <= 240; r += 10) {
# Serial.print(r); Serial.print(" : ");
# for( c = 0; c <= 240; c += 10) {
# byte t;
# t = scale8( r, c);
# Serial.print(t); Serial.print(' ');
# }
# Serial.println(' ');
# }
#
# Serial.println(' ');
# Serial.println("scale8_video:");
#
# for( r = 0; r <= 100; r += 4) {
# Serial.print(r); Serial.print(" : ");
# for( c = 0; c <= 100; c += 4) {
# byte t;
# t = scale8_video( r, c);
# Serial.print(t); Serial.print(' ');
# }
# Serial.println(' ');
# }
#
# Serial.println("done.");
# for(;;){};
# }
#
#
#
# void testqadd8()
# {
# delay(5000);
# byte r, c;
# for( r = 0; r <= 240; r += 10) {
# Serial.print(r); Serial.print(" : ");
# for( c = 0; c <= 240; c += 10) {
# byte t;
# t = qadd8( r, c);
# Serial.print(t); Serial.print(' ');
# }
# Serial.println(' ');
# }
# Serial.println("done.");
# for(;;){};
# }
#
# void testnscale8x3()
# {
# delay(5000);
# byte r, g, b, sc;
# for( byte z = 0; z < 10; z++) {
# r = random8(); g = random8(); b = random8(); sc = random8();
#
# Serial.print("nscale8x3_video( ");
# Serial.print(r); Serial.print(", ");
# Serial.print(g); Serial.print(", ");
# Serial.print(b); Serial.print(", ");
# Serial.print(sc); Serial.print(") = [ ");
#
# nscale8x3_video( r, g, b, sc);
#
# Serial.print(r); Serial.print(", ");
# Serial.print(g); Serial.print(", ");
# Serial.print(b); Serial.print("]");
#
# Serial.println(' ');
# }
# Serial.println("done.");
# for(;;){};
# }
#
# #endif
|
from asyncorm.serializers.serializer import ModelSerializer, ModelSerializerMeta, SerializerMethod
__all__ = ("ModelSerializerMeta", "SerializerMethod", "ModelSerializer")
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.infrastructure.xml_utils import dumps_networkapi, loads
from networkapi.infrastructure.ipaddr import IPv6Address
import logging
from networkapi.requisicaovips.models import RequisicaoVipsError
from networkapi.rest import RestResource, UserNotAuthorizedError
from networkapi.ip.models import Ipv6, IpNotFoundError, IpError
from networkapi.util import is_valid_ipv6, is_valid_int_param
from networkapi.exception import InvalidValueError
from django.forms.models import model_to_dict
class RequestAllVipsIPv6Resource(RestResource):
log = logging.getLogger('RequestAllVipsIPv6Resource')
def handle_post(self, request, user, *args, **kwargs):
"""
Handles POST requests to list all the VIPs related to IPv6.
URL: vip/ipv6/all/
"""
self.log.info("Get all the VIPs related to IPv6")
try:
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.VIPS_REQUEST, AdminPermission.READ_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
raise UserNotAuthorizedError(None)
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
return self.response_error(3, u'There is no value to the networkapi tag of XML request.')
vip_map = networkapi_map.get('vip')
if vip_map is None:
return self.response_error(3, u'There is no value to the vip tag of XML request.')
# Get XML data
ip_str = str(vip_map['ipv6'])
all_prop = str(vip_map['all_prop'])
# Valid IPv6
if not is_valid_ipv6(ip_str):
self.log.error(
u'Parameter ipv6 is invalid. Value: %s.', ip_str)
raise InvalidValueError(None, 'ipv6', ip_str)
# Valid all_prop
if not is_valid_int_param(all_prop):
self.log.error(
u'Parameter all_prop is invalid. Value: %s.', all_prop)
raise InvalidValueError(None, 'all_prop', all_prop)
all_prop = int(all_prop)
if all_prop not in (0, 1):
self.log.error(
u'Parameter all_prop is invalid. Value: %s.', all_prop)
raise InvalidValueError(None, 'all_prop', all_prop)
blocks = str(IPv6Address(ip_str).exploded).split(':')
# Find Ipv6 by blocks to check if it exist
ipv6 = Ipv6.get_by_blocks(blocks[0], blocks[1], blocks[2], blocks[
3], blocks[4], blocks[5], blocks[6], blocks[7])
# Business Rules
list_ips = []
for ip in ipv6:
ips_map = dict()
ips_map = model_to_dict(ip)
# Find all VIPs related to ipv6
if all_prop == 1:
ips_map["vips"] = ip.requisicaovips_set.all().values()
else:
vips = ip.requisicaovips_set.all().values_list(
'id', flat=True)
ips_map["vips"] = [int(item) for item in vips]
list_ips.append(ips_map)
# Return XML
vips_map = dict()
vips_map['ips'] = list_ips
return self.response(dumps_networkapi(vips_map))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except IpNotFoundError, e:
return self.response_error(119)
except UserNotAuthorizedError:
return self.not_authorized()
except (RequisicaoVipsError, IpError):
return self.response_error(1)
|
import os
import shutil
import tempfile
from .query import Query
from .metadata import Metadata
from .system import system
from . import exceptions
from . import helpers
from . import errors
from . import config
class File(Metadata):
"""File representation
API | Usage
-------- | --------
Public | `from frictionless import File`
Under the hood, File uses available loaders so it can open from local, remote,
and any other supported schemes. The File class inherits from the Metadata class
all the metadata's functionality
```python
from frictionless import File
with File('data/text.txt') as file:
file.read_text()
```
Parameters:
source (any): file source
scheme? (str): file scheme
format? (str): file format
hashing? (str): file hashing
encoding? (str): file encoding
compression? (str): file compression
compression_path? (str): file compression path
control? (dict): file control
dialect? (dict): table dialect
query? (dict): table query
newline? (str): python newline e.g. '\n',
stats? ({hash: str, bytes: int, rows: int}): stats object
Raises:
FrictionlessException: if there is a metadata validation error
"""
def __init__(
self,
source,
*,
scheme=None,
format=None,
hashing=None,
encoding=None,
compression=None,
compression_path=None,
control=None,
dialect=None,
query=None,
newline=None,
stats=None,
):
# Set attributes
self.setinitial("source", source)
self.setinitial("scheme", scheme)
self.setinitial("format", format)
self.setinitial("hashing", hashing)
self.setinitial("encoding", encoding)
self.setinitial("compression", compression)
self.setinitial("compressionPath", compression_path)
self.setinitial("control", control)
self.setinitial("dialect", dialect)
self.setinitial("query", query)
self.setinitial("newline", newline)
self.setinitial("stats", stats)
self.__loader = None
# Detect attributes
detect = helpers.detect_source_scheme_and_format(source)
self.__detected_compression = config.DEFAULT_COMPRESSION
self.__detected_compression_path = config.DEFAULT_COMPRESSION_PATH
if detect[1] in config.COMPRESSION_FORMATS:
self.__detected_compression = detect[1]
source = source[: -len(detect[1]) - 1]
if compression_path:
source = os.path.join(source, compression_path)
detect = helpers.detect_source_scheme_and_format(source)
self.__detected_scheme = detect[0] or config.DEFAULT_SCHEME
self.__detected_format = detect[1] or config.DEFAULT_FORMAT
# Initialize file
super().__init__()
def __enter__(self):
if self.closed:
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def __iter__(self):
self.__read_raise_closed()
return iter(self.__loader.__text_stream)
@property
def path(self):
"""
Returns:
str: file path
"""
return self.source if isinstance(self.source, str) else "memory"
@Metadata.property
def source(self):
"""
Returns:
any: file source
"""
return self.get("source")
@Metadata.property
def scheme(self):
"""
Returns:
str?: file scheme
"""
return self.get("scheme", self.__detected_scheme)
@Metadata.property
def format(self):
"""
Returns:
str?: file format
"""
return self.get("format", self.__detected_format)
@Metadata.property
def hashing(self):
"""
Returns:
str?: file hashing
"""
return self.get("hashing", config.DEFAULT_HASHING)
@Metadata.property
def encoding(self):
"""
Returns:
str?: file encoding
"""
return self.get("encoding", config.DEFAULT_ENCODING)
@Metadata.property
def compression(self):
"""
Returns:
str?: file compression
"""
return self.get("compression", self.__detected_compression)
@Metadata.property
def compression_path(self):
"""
Returns:
str?: file compression path
"""
return self.get("compressionPath", self.__detected_compression_path)
@Metadata.property
def control(self):
"""
Returns:
Control?: file control
"""
control = self.get("control")
if control is None:
control = system.create_control(self, descriptor=control)
return self.metadata_attach("control", control)
return control
@Metadata.property
def dialect(self):
"""
Returns:
Dialect?: table dialect
"""
dialect = self.get("dialect")
if dialect is None:
dialect = system.create_dialect(self, descriptor=dialect)
return self.metadata_attach("dialect", dialect)
return dialect
@Metadata.property
def query(self):
"""
Returns:
Query?: table query
"""
query = self.get("query")
if query is None:
query = Query()
return self.metadata_attach("query", query)
return query
@Metadata.property
def newline(self):
"""
Returns:
str?: file newline
"""
return self.get("newline")
@Metadata.property
def stats(self):
"""
Returns:
dict: file stats
"""
return self.get("stats")
@Metadata.property(cache=False)
def byte_stream(self):
"""File byte stream
The stream is available after opening the file
Returns:
io.ByteStream: file byte stream
"""
if self.__loader:
return self.__loader.byte_stream
@Metadata.property(cache=False)
def text_stream(self):
"""File text stream
The stream is available after opening the file
Returns:
io.TextStream: file text stream
"""
if self.__loader:
return self.__loader.text_stream
# Expand
def expand(self):
"""Expand metadata"""
self.setdefault("scheme", self.scheme)
self.setdefault("format", self.format)
self.setdefault("hashing", self.hashing)
self.setdefault("encoding", self.encoding)
self.setdefault("compression", self.compression)
self.setdefault("compressionPath", self.compression_path)
# Open/close
def open(self):
"""Open the file as "io.open" does"""
self.close()
try:
self.stats = {"hash": "", "bytes": 0, "rows": 0}
# NOTE: handle cases like Inline/SQL/etc
self.__loader = system.create_loader(self)
self.__loader.open()
return self
except Exception:
self.close()
raise
def close(self):
"""Close the file as "filelike.close" does"""
if self.__loader:
self.__loader.close()
self.__loader = None
@property
def closed(self):
"""Whether the file is closed
Returns:
bool: if closed
"""
return self.__loader is None
# Read
def read_bytes(self):
"""Read bytes from the file
Returns:
bytes: file bytes
"""
self.__read_raise_closed()
return self.__loader.byte_stream.read1()
def read_text(self):
"""Read bytes from the file
Returns:
str: file text
"""
result = ""
self.__read_raise_closed()
for line in self.__loader.text_stream:
result += line
return result
def __read_raise_closed(self):
if not self.__loader:
note = 'the file has not been opened by "file.open()"'
raise exceptions.FrictionlessException(errors.Error(note=note))
# Write
def write(self, target):
"""Write the file to the target
Parameters:
target (str): target path
"""
with tempfile.NamedTemporaryFile(delete=False) as file:
shutil.copyfileobj(self.byte_stream, file)
helpers.move_file(file.name, target)
# Metadata
metadata_strict = True
metadata_profile = { # type: ignore
"type": "object",
"required": ["source"],
"properties": {
"source": {},
"scheme": {"type": "string"},
"format": {"type": "string"},
"hashing": {"type": "string"},
"encoding": {"type": "string"},
"compression": {"type": "string"},
"compressionPath": {"type": "string"},
"contorl": {"type": "object"},
"dialect": {"type": "object"},
"query": {"type": "object"},
"newline": {"type": "string"},
"stats": {
"type": "object",
"required": ["hash", "bytes", "rows"],
"properties": {
"hash": {"type": "string"},
"bytes": {"type": "number"},
"rows": {"type": "number"},
},
},
},
}
def metadata_process(self):
super().metadata_process()
# Control
control = self.get("control")
if control is not None:
control = system.create_control(self, descriptor=control)
dict.__setitem__(self, "control", control)
# Dialect
dialect = self.get("dialect")
if dialect is not None:
dialect = system.create_dialect(self, descriptor=dialect)
dict.__setitem__(self, "dialect", dialect)
# Query
query = self.get("query")
if query is not None:
query = Query(query)
dict.__setitem__(self, "query", query)
|
import sys
from typing import Union, Any, Callable
def esc(*codes: Union[int, str]) -> str:
return t_('\x1b[{}m').format(t_(';').join(t_(str(c)) for c in codes))
def t_(b: Union[bytes, Any]) -> str:
return b.decode() if isinstance(b, bytes) else b
def make_color(start, end: str) -> Callable[[str], str]:
def color_func(s: str) -> str:
return s if not sys.stdout.isatty() else start + t_(s) + end
return color_func
FG_END = esc(39)
BG_END = esc(49)
HL_END = esc(22, 27, 39)
class Color:
black = make_color(esc(30), FG_END)
red = make_color(esc(31), FG_END)
green = make_color(esc(32), FG_END)
yellow = make_color(esc(33), FG_END)
blue = make_color(esc(34), FG_END)
magenta = make_color(esc(35), FG_END)
cyan = make_color(esc(36), FG_END)
white = make_color(esc(37), FG_END)
black_bg = make_color(esc(40), BG_END)
red_bg = make_color(esc(41), BG_END)
green_bg = make_color(esc(42), BG_END)
yellow_bg = make_color(esc(43), BG_END)
blue_bg = make_color(esc(44), BG_END)
magenta_bg = make_color(esc(45), BG_END)
cyan_bg = make_color(esc(46), BG_END)
white_bg = make_color(esc(47), BG_END)
black_hl = make_color(esc(1, 30, 7), HL_END)
red_hl = make_color(esc(1, 31, 7), HL_END)
green_hl = make_color(esc(1, 32, 7), HL_END)
yellow_hl = make_color(esc(1, 33, 7), HL_END)
blue_hl = make_color(esc(1, 34, 7), HL_END)
magenta_hl = make_color(esc(1, 35, 7), HL_END)
cyan_hl = make_color(esc(1, 36, 7), HL_END)
white_hl = make_color(esc(1, 37, 7), HL_END)
bold = make_color(esc(1), esc(22))
italic = make_color(esc(3), esc(23))
underline = make_color(esc(4), esc(24))
strike = make_color(esc(9), esc(29))
blink = make_color(esc(5), esc(25))
|
# Generated by Django 3.1.4 on 2021-01-23 07:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('process', '0014_auto_20210123_1610'),
]
operations = [
migrations.RemoveField(
model_name='process',
name='process',
),
migrations.AddField(
model_name='process',
name='曜日ごと',
field=models.BooleanField(default=False, verbose_name='曜日ごと'),
),
migrations.AddField(
model_name='process',
name='7日平均',
field=models.BooleanField(default=False, verbose_name='7日平均'),
),
]
|
# -*- coding: utf-8 -*-
"""
forms.py
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from flask_wtf import Form
from wtforms import TextField, TextAreaField, SelectField, DecimalField, \
validators
from wtforms.validators import ValidationError
from nereid import abort
from trytond.pool import Pool
class GiftCardForm(Form):
"""
A form for purchasing gift cards
"""
recipient_name = TextField('Recipient Name', [validators.Optional()])
recipient_email = TextField('Recipient Email')
message = TextAreaField('Message', [validators.Optional()])
selected_amount = SelectField('Select Amount', choices=[], coerce=int)
open_amount = DecimalField('Amount', default=0)
def __init__(self, product, *args, **kwargs):
super(GiftCardForm, self).__init__(*args, **kwargs)
Product = Pool().get('product.product')
if not isinstance(product, Product):
abort(400)
try:
self.gc_product, = Product.search([
('id', '=', product.id),
('is_gift_card', '=', True)
], limit=1)
except ValueError as e:
e.message = 'Expected Gift Card, Got %s' % (product.rec_name)
raise
self.fill_choices()
if self.gc_product.gift_card_delivery_mode in ['virtual', 'combined']:
self.recipient_email.validators = [
validators.Required(), validators.Email()
]
else:
self.recipient_email.validators = [
validators.Optional(), validators.Email()
]
def fill_choices(self):
choices = []
if self.gc_product.allow_open_amount:
choices = [(0, 'Set my Own')]
self.selected_amount.choices = choices + [
(p.id, p.price) for p in self.gc_product.gift_card_prices
]
def validate_open_amount(form, field):
if not form.gc_product.allow_open_amount:
return
if (form.selected_amount.data == 0) and not (
form.gc_product.gc_min <= field.data <= form.gc_product.gc_max
):
raise ValidationError(
"Amount between %s and %s is allowed." % (
form.gc_product.gc_min, form.gc_product.gc_max
)
)
|
# Distributed under the Apache License, Version 2.0.
# See accompanying NOTICE file for details.
import os
import cv2
import json
import ntpath
import numpy as np
from scipy.spatial import cKDTree
from trailblazer.traffic.vital import Detection
from trailblazer.traffic.vital import Track
from trailblazer.traffic.vital import TrackSet
from trailblazer.traffic.vital import Event
from trailblazer.traffic.base_layer import BaseLayer
from trailblazer.traffic.road_network import RoadNetwork
from trailblazer.utils import path_utils as path_utils, pixel_utils as px
"""
** kw18 format ***
Index Metadata
0 Track-id
1 Track-length (# of detections)
2 Frame-number (-1 if not available)
3-4 Tracking-plane-loc(x,y) (Could be same as World-loc)
5-6 Velocity(x,y)
7-8 Image-loc(x,y)
9-12 Img-bbox(top-left x, top-left y, bottom-rightB x, bottom-right y)
13 Area (0 - when not available)
14-16 World-loc(x,y,z) (longitude, latitude, 0 when not available)
17 Timestamp(-1 if not available)
18 Track-confidence(-1_when_not_available)
"""
def read_kw18(kw18_file):
track_set = {}
with open(kw18_file) as kw18:
for line in kw18:
if line.startswith('#'):
continue
columns = line.split(' ')
detection = Detection()
track_id = int(columns[0])
detection.frame_number = int(columns[2])
detection.tracking_plane_loc_x = float(columns[3])
detection.tracking_plane_loc_y = float(columns[4])
detection.velocity_x = float(columns[5])
detection.velocity_y = float(columns[6])
detection.image_loc_x = float(columns[7])
detection.image_loc_y = float(columns[8])
detection.image_bbox_TL_x = float(columns[9])
detection.image_bbox_TL_y = float(columns[10])
detection.image_bbox_BR_x = float(columns[11])
detection.image_bbox_BR_y = float(columns[12])
detection.area = float(columns[13])
detection.lon = float(columns[14])
detection.lat = float(columns[15])
if detection.lat < -60 or detection.lat > 60:
detection.lon = float(columns[15])
detection.lat = float(columns[14])
detection.alt = float(columns[16])
detection.timestamp = float(columns[17])
if track_id not in track_set.keys():
track = Track()
track.id = track_id
track_set[track_id] = track
else:
track = track_set[track_id]
track.detections.append(detection)
for tid,track in track_set.items():
track.compute_bounds()
return track_set
def write_kw18(track_set, kw18_file):
kw18_header_string = '# 1:Track-id 2:Track-length 3:Frame-number' + \
' 4-5:Tracking-plane-loc(x,y) 6-7:velocity(x,y)' + \
' 8-9:Image-loc(x,y)' + \
' 10-13:Img-bbox(TL_x,TL_y,BR_x,BR_y)' + \
' 14:Area 15-17:World-loc(x,y,z) 18:timestamp' + \
' 19:object-type-id 20:activity-type-id\n'
kw18 = open(kw18_file, 'w')
kw18.write(kw18_header_string)
for tid, track in track_set.items():
for detection in track.detections:
kw18.write(str(track.id) + ' ' +
str(len(track.detections)) + ' ' +
str(detection.frame_number) + ' ' +
str(detection.tracking_plane_loc_x) + ' ' +
str(detection.tracking_plane_loc_y) + ' ' +
str(detection.velocity_x) + ' ' +
str(detection.velocity_y) + ' ' +
str(detection.image_loc_x) + ' ' +
str(detection.image_loc_y) + ' ' +
str(detection.image_bbox_TL_x) + ' ' +
str(detection.image_bbox_TL_y) + ' ' +
str(detection.image_bbox_BR_x) + ' ' +
str(detection.image_bbox_BR_y) + ' ' +
str(detection.area) + ' ' +
str(detection.lon) + ' ' +
str(detection.lat) + ' ' +
str(detection.alt) + ' ' +
str(detection.timestamp) + ' -1\n'
)
def read_pNeuma(csv_file):
line_cnt = 0
track_set = {}
with open(csv_file) as csv:
for line in csv:
line_cnt = line_cnt + 1
if line.startswith('#') or line_cnt == 1:
continue
columns = line.split(';')
track_id = int(columns[0])
#if track_id != 8:
# continue
type = columns[1]
travel_dist = float(columns[2])
avg_speed = float(columns[3])
if track_id not in track_set.keys():
track = Track()
track.id = track_id
track_set[track_id] = track
else:
track = track_set[track_id]
print("Processing track "+str(track_id))
movement_start = False
for d in range(4,len(columns)-1,6):
if not movement_start and float(columns[d+3]) == 0 and float(columns[d+4]) == 0:
continue
movement_start = True
detection = Detection()
detection.timestamp = float(columns[d + 5])
detection.frame_number = int(round(detection.timestamp / 0.04))
# columns[d+2] is speed
detection.tracking_plane_loc_x = 0
detection.tracking_plane_loc_y = 0
detection.velocity_x = float(columns[d+3])
detection.velocity_y = float(columns[d+4])
detection.image_loc_x = 0
detection.image_loc_y = 0
detection.image_bbox_TL_x = 0
detection.image_bbox_TL_y = 0
detection.image_bbox_BR_x = 0
detection.image_bbox_BR_y = 0
detection.area = 0
detection.lon = float(columns[d+1])
detection.lat = float(columns[d])
detection.alt = 0
track.detections.append(detection)
track.compute_bounds()
return track_set
def kw18_to_json(kw18_fname, json_fname):
with open(kw18_fname) as f:
data = [line.split() for line in f.readlines()[1:]]
data = [[int(m) for m in l[:3]] + [float(m) for m in l[3:]] for l in data]
col = ['id', 'len', 'frame', 'tx', 'ty', 'vx', 'vy', 'ix', 'iy',
'left', 'top', 'right', 'bottom', 'area', 'x', 'y', 'alt', 't', 'conf']
jdata = [dict(zip(col, l)) for l in data]
with open(json_fname, 'wt') as f:
json.dump(jdata, f)
def write_kwe(event_set, kwe_file):
with open(kwe_file, 'w') as of:
for eId, e in event_set.items():
if len(e.tracks) == 0:
continue
e.compute_bounds()
of.write('0 ' + # Step (??)
str(e.type) + ' ' +
str(e.id) + ' ' +
str(e.start_time) + ' ' + # Start time
str(e.start_frame) + ' ' + # Start frame
str(e.end_time) + ' ' + # Stop time
str(e.end_frame) + ' ' + # Stop frame
'1.0 ' + # Probability
str(e.min_x) + ' ' + # Bounding box min x
str(e.min_y) + ' ' + # Bounding box min y
str(e.max_x) + ' ' + # Bounding box max x
str(e.max_y) + ' ' + # Bounding box max y
str(len(e.tracks)) # Number of tracks in event
)
for track in e.tracks:
of.write(str(' ' + str(track.id))) # track id's in the event
for track in e.tracks:
of.write(' ' + str(track.start_time) +
' ' + str(track.start_frame) +
' ' + str(track.end_time) +
' ' + str(track.end_frame)
)
def create_vpView(track_set, out_dir, osm_fname=None, base_layer_fname=None):
if base_layer_fname is not None:
base_layer = BaseLayer.open(base_layer_fname)
else:
if osm_fname is not None:
print('Creating base layer from osm file...')
road_network = RoadNetwork.from_osm(osm_fname)
base_layer = BaseLayer.from_road_network(road_network)
else:
print('Creating image base layer from tracks...')
base_layer = BaseLayer.from_track_list(track_set)
base_layer_fname = out_dir + '/base_layer.tif'
print('Writing out the base image...' + base_layer_fname)
base_layer.save_geotiff(base_layer_fname)
# Write new image coordinates to each track detection
print("Converting world coordinates to image coordinates")
px.world_to_image_coordinates(base_layer, track_set)
write_vpView_files(track_set, out_dir, base_layer_fname)
def write_vpView_files(track_set, out_dir, base_layer):
if isinstance(base_layer,BaseLayer):
print('Writing out the base image...')
base_layer_fname = os.path.abspath(out_dir + '/base_layer.tif').replace('\\', '/')
base_layer.save_geotiff(base_layer_fname)
else:# Should be string location of the base_layer file
base_layer_fname = os.path.abspath(base_layer).replace('\\', '/')
base_layer_cmpts = os.path.splitext(base_layer_fname)
vpView_base_layer_fname = base_layer_cmpts[0] + base_layer_cmpts[1]
out_dir = os.path.abspath(out_dir).replace('\\', '/')
path_utils.clean_directory(out_dir)
# Relative names for use in the vpView image list (either the original mask or the recolored version)
vpView_base_layer_rname = os.path.relpath(vpView_base_layer_fname, out_dir).replace('\\', '/')
# Write out a new base_layer with a different color scheme
#recolor_base_layer(base_layer_fname, vpView_base_layer_fname,
# bg_color=(255, 255, 255),
# road_color=(0, 0, 0),
# build_low_color=(32, 0, 0),
# build_high_color=(255, 0, 0))
print('Creating a image list file referencing the base image...')
imlist_fname = out_dir + '/image_list.txt'
min_frame, max_frame = TrackSet.frame_bounds(track_set)
with open(imlist_fname, 'w') as f:
for _ in range(max_frame + 1):
f.write(vpView_base_layer_rname+'\n')
print('Writing out the new kw18 file with image coordinates...')
out_kw18_fname = out_dir + '/tracks.kw18'
write_kw18(track_set, out_kw18_fname)
print('Writing out the vpView project file...')
with open(out_dir + '/vpview.prj', 'w') as f:
f.write('DataSetSpecifier=./' + ntpath.basename(imlist_fname) + '\n')
f.write('TracksFile=./' + ntpath.basename(out_kw18_fname) + '\n')
print('Finished!')
def recolor_base_layer(input, output, bg_color, road_color, build_low_color, build_high_color):
im = cv2.imread(input)
im = im[..., ::-1]
indexed = im[..., 2].astype(np.int16) - (im[..., 0] != 0)
build_colors = np.stack([np.linspace(lo, hi, 256) for lo, hi in
zip(build_low_color, build_high_color)], axis=-1)[1:]
palette = np.concatenate(([bg_color], build_colors, [road_color])).round()
assert ((0 <= palette) & (palette < 256)).all()
im_out = palette.astype(np.uint8)[indexed]
cv2.imwrite(output, im_out)
# The point of this algorithm is to preserve the original track during the downsample
# This does copy the detections, ds_track_set will have new copies of the detections
# This does no kinematic interpolation between times
# This works best if the kw18 detections are in a consistently sampled time interval
def downsample_track_set(track_set, rate_Hz):
ds_track_set = {}
sample_s = 1 / rate_Hz
for tid, track in track_set.items():
# Make a new track to downsample to
ds_track = Track()
ds_track.id = tid
ds_track_set[tid] = ds_track
# Walk the detections in time, keeping detections that occur
# at our requested rate
for det in track.detections:
if det.timestamp % sample_s == 0:
# TODO check the time difference and interpolate if the track time is not on the time step
ds_det = det.copy()
ds_det.frame_number = int(round(ds_det.timestamp/sample_s))
ds_track.detections.append(ds_det)
ds_track.compute_bounds()
return ds_track_set
# This algorithm randomly drops detections from a track given a
# probability of detection. Modifies the track_set in place and
# returns it as a convenience
def pd_track_set(track_set, probability_of_detection):
for tid, track in track_set.items():
track.detections = list(np.array(track.detections)
[np.random.rand(len(track.detections))
<= probability_of_detection])
track.compute_bounds()
return track_set
# Generate false alarm detections based on a false alarm rate for
# a given area and time
# false detections per km^2 per minute
def generate_false_alarms(time_bounds, frame_bounds, base_layer, false_alarm_rate):
ret = base_layer.extent_meters
area = (ret[1]-ret[0])*(ret[3]-ret[2])/1e6 # km^2
# Bounds of the detection (meters)
bb_w, bb_h = 2, 2
gsd = base_layer.gsd[0]
nframes = frame_bounds[1] - frame_bounds[0] + 1
times = np.linspace(time_bounds[0], time_bounds[1], nframes)
interval = (time_bounds[1] - time_bounds[0]) * (nframes / (nframes - 1))
false_dets = false_alarm_rate*area*interval/60
false_dets = np.random.poisson(lam=false_dets)
out_dets = []
for i, _ in enumerate(range(false_dets)):
ind = np.random.randint((frame_bounds[1] - frame_bounds[0]) + 1)
frame_number = frame_bounds[0] + ind
timestamp = times[ind]
xc = np.random.rand()*base_layer.res_x
yc = np.random.rand()*base_layer.res_y
bbox = [xc - (bb_w / gsd),
yc + (bb_h / gsd),
xc + (bb_w / gsd),
yc - (bb_h / gsd)]
area = bb_w*bb_h
lon_lat = base_layer.lon_lat_from_image_coords([xc, yc])
world_xc, world_yc = base_layer.meters_from_image_coords([xc, yc])
det = Detection()
det.frame_number = frame_number
det.tracking_plane_loc_x = world_xc
det.tracking_plane_loc_y = world_yc
det.image_loc_x = xc
det.image_loc_y = yc
det.image_bbox_TL_x = bbox[0]
det.image_bbox_TL_y = bbox[1]
det.image_bbox_BR_x = bbox[2]
det.image_bbox_BR_y = bbox[3]
det.area = area
det.lon = lon_lat[0]
det.lat = lon_lat[1]
det.alt = 0
det.timestamp = timestamp
out_dets.append(det)
return out_dets
# Generate false alarm detections to a new new track_set
# false detections per km^2 per minute
def pfa_track_set(start_id, time_bounds, frame_bounds, base_layer, false_alarm_rate):
pfa_tracks = {}
for i, fa_det in enumerate(generate_false_alarms(time_bounds,
frame_bounds,
base_layer,
false_alarm_rate)):
fa_track_id = start_id + i + 1
fa_track = Track()
fa_track.id = fa_track_id
fa_track.detections = [fa_det]
fa_track.compute_bounds()
pfa_tracks[fa_track_id] = fa_track
return pfa_tracks
# Modifies the track_set in place and returns it as a convenience
# false detections per km^2 per minute
def add_false_alarms(track_set, base_layer, false_alarm_rate):
pfa_tracks = pfa_track_set(max(track_set.keys()),
TrackSet.time_bounds(track_set),
TrackSet.frame_bounds(track_set),
base_layer, false_alarm_rate)
track_set.update(pfa_tracks)
return track_set
# Turns all detections into single detection tracks
# Creates a single detection from multiple detections in the same pixel on the same frame
# Removes any detections under a specified speed
def clean_track_set(track_set):
# Copy and remove tracks based on speed
moving_tracks = {}
for tid, track in track_set.items():
new_trk = Track()
new_trk.id = tid
moving_tracks[tid] = new_trk
if len(track.detections)==0:
print('wtf')
continue
last_det = track.detections[0].copy()
new_trk.detections.append(last_det)
if len(track.detections) > 1:
for det in track.detections[1:]:
# Check to see if the speed between this detection
# and the last detection is below our threshold
# If so, don't add it
avg_speed = Detection.dist(det,last_det)/(det.timestamp - last_det.timestamp)
if avg_speed > 0.1: # m/s
new_trk.detections.append(det)
last_det = det.copy()
# Sort moving detections by frame
frames = {}
for tid, track in moving_tracks.items():
for det in track.detections:
if det.frame_number not in frames:
frames[det.frame_number] = []
frame_dets = frames[det.frame_number]
if len(frame_dets) == 0: # Always add a detection if frame is empty
frame_dets.append(det)
continue
# Look for detections already in this frame
# and check if this new detection pixel location
# is the same as any detection already in this frame
px_match = False
for d in frame_dets:
if int(d.image_loc_x) == int(det.image_loc_x) and \
int(d.image_loc_y) == int(det.image_loc_y):
# Same pixel, let's combine them
# TODO AVERAGE PIXEL LOCATIONS
d.average(det)
px_match = True
break
if not px_match:
frame_dets.append(det)
# Put each detection on its own track
new_tid = 0
det_set = {}
for fid,frame in frames.items():
for det in frame:
trk = Track()
trk.id = new_tid
trk.detections.append(det)
trk.compute_bounds()
det_set[trk.id] = trk
new_tid+=1
# Ok, all clean and ready to go
return det_set
# Drops detections if they're occluded with respect to an occlusion
# mask. Modifies the track_set in place and returns it as a
# convenience
def occlude_track_set(track_set, occlusion_mask_layer):
# Occlusions mask should be of type BaseLayer
# Actual boolean mask indicating that the pixel is not occluded
# (blue value of 0 indicates no building occlusion)
mask = occlusion_mask_layer.image[:, :, 2] == 0
for tid, track in track_set.items():
kept_detections = []
for detection in track.detections:
im_pt = occlusion_mask_layer.image_coords_from_lon_lat(
detection.lon,
detection.lat)
im_pt = np.round(im_pt).astype(np.int)
clipped_im_pt = im_pt.clip([0, 0], np.array(mask.T.shape) - 1)
if not (im_pt == clipped_im_pt).all():
print(f"Warning: clipping coordinates to mask bounds ({im_pt} "
f"to {clipped_im_pt})")
if mask.T[tuple(clipped_im_pt)]:
# The pixel is not occluded
kept_detections.append(detection)
track.detections = kept_detections
track.compute_bounds()
return track_set
# This algorithm randomly drops detections as they pass through an intersection.
# Modifies the track_set in place and returns it as a convenience
def pdintersections_track_set(track_set,
base_layer,
road_network,
probability_of_detection=1.0,
radius_m=10):
intersections = [base_layer.meters_from_lon_lat(*xy) for xy in
road_network.node_position(
road_network._intersection_nodes)]
tree = cKDTree(intersections)
for tid, track in track_set.items():
pd_intersection_rolls = {}
drop = False
pd_detections = []
for det in track.detections:
drop = False
for intersection_i in tree.query_ball_point(
base_layer.meters_from_lon_lat(det.lon, det.lat),
radius_m):
if intersection_i not in pd_intersection_rolls:
pd_intersection_rolls[intersection_i] = np.random.rand()
if(pd_intersection_rolls[intersection_i] >
probability_of_detection):
drop = True
if not drop:
pd_detections.append(det)
track.detections = pd_detections
track.compute_bounds()
return track_set
# Computes detection image coordinates via the provided base layer
# using the detections' lon and lat. Then converts these resampled
# images coordinates back out to world coordinates.
# Modifies the track_set in place and returns it as a convenience
def compute_gsd_world_coordinates(track_set, base_layer):
# Compute the image coordinates based on the world coordinates
px.world_to_image_coordinates(base_layer, track_set)
# Now compute the world coordinates based on those image coordinates
# Depending on the image GSD, the world coordinates can significantly degrade
px.image_to_world_coordinates(base_layer, track_set)
return track_set
def main():
# Find all kw18 files from a directory wild card
for root, dirs, files in os.walk("C:/Programming/Trailblazer/data/pNeuma/"):
for name in files:
if name.endswith('.csv'):
csv_file = os.path.join(root, name)
track_set = read_pNeuma(csv_file)
TrackSet.compute_bounds(track_set)
lons,lats = TrackSet.lat_lon_bounds(track_set)
print("csv : "+csv_file)
print("Lat Extents:"+str(lats))
print("Lon Extents:"+str(lons))
print("\n")
if name.endswith('.kw18'):
kw18_file = os.path.join(root, name)
track_set = read_kw18(kw18_file)
TrackSet.compute_bounds(track_set)
lons,lats = TrackSet.lat_lon_bounds(track_set)
print("kw18 : "+kw18_file)
print("Lat Extents:"+str(lats))
print("Lon Extents:"+str(lons))
print("\n")
if root.find('convoy')>-1:
kw18_file = os.path.join(root, name)
convoy_ids_file = os.path.join(root,"sumo_convoy_ids_.txt")
if os.path.exists(convoy_ids_file):
with open(convoy_ids_file) as id_file:
ids = id_file.readline().split(' ')
track_set = read_kw18(kw18_file)
event_set = {}
event = Event()
event.id = 0
event.type = 7
event_set[0] = event
for id in ids:
track = track_set[int(id)]
if track is not None:
event.tracks.append(track)
kwe_file = root+'/sumo_convoy.kwe'
write_kwe(event_set, kwe_file)
print("Writing "+kwe_file)
#track_set = kw_utils.read_kw18(kw18_file)
if __name__ == "__main__":
main()
|
from collections.abc import Mapping
def nested_set(d, keys, value):
"""Set d[keys[0]]...[keys[-1]] to `value`.
Example:
>>> d = {}
>>> nested_set(d, ['person', 'address', 'city'], 'New York')
>>> d
{'person': {'address': {'city': 'New York'}}}
From:
https://stackoverflow.com/questions/13687924/setting-a-value-in-a-nested-python-dictionary-given-a-list-of-indices-and-value
"""
for key in keys[:-1]:
d = d.setdefault(key, {})
d[keys[-1]] = value
def nested_update(d, u):
"""Update values of a nested dictionnary of varying depth"""
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = nested_update(d.get(k, {}), v)
else:
d[k] = v
return d
|
import sys
def main():
input = sys.argv[1]
output = sys.argv[2]
with open(output, 'w') as out:
out.write('*Nodes\nid*int\tlabel*string\n')
nodes = {}
node_id = 0
has_weight = False
for line in open(input):
row = line.split()
if len(row) == 3:
has_weight = True
source,target = row[0:2]
if source not in nodes:
node_id += 1
nodes[source] = node_id
out.write(f'{node_id} "{source}"\n')
if target not in nodes:
node_id += 1
nodes[target] = node_id
out.write(f'{node_id} "{target}"\n')
out.write('*UndirectedEdges\nsource*int\ttarget*int\tweight*float\n')
if has_weight:
for line in open(input):
source,target,weight = line.split()[0:3]
out.write(f'{nodes[source]}\t{nodes[target]}\t{weight.strip()}\n')
else:
for line in open(input):
source,target = line.split()[0:2]
out.write(f'{nodes[source]}\t{nodes[target]}\t1\n')
if __name__ == "__main__":
main()
|
## @file
# Automate the process of building the various reset vector types
#
# Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import glob
import os
import subprocess
import sys
def RunCommand(commandLine):
return subprocess.call(commandLine)
def FixupForRawSection(sectionFile):
d = open(sectionFile, 'rb').read()
c = ((len(d) + 4 + 7) & ~7) - 4
if c > len(d):
c -= len(d)
f = open(sectionFile, 'wb')
f.write(b'\x90' * c)
f.write(d)
f.close()
for filename in glob.glob(os.path.join('Bin', '*.raw')):
os.remove(filename)
arch = 'ia32'
debugType = None
output = os.path.join('Bin', 'ResetVector')
output += '.' + arch
if debugType is not None:
output += '.' + debugType
output += '.raw'
nasmpath = os.path.join(os.environ.get ('NASM_PREFIX', ''), 'nasm')
commandLine = (
nasmpath,
'-D', 'ARCH_%s' % arch.upper(),
'-D', 'DEBUG_%s' % str(debugType).upper(),
'-o', output,
'Vtf0.nasmb',
)
ret = RunCommand(commandLine)
print ('\tASM\t' + output)
if ret != 0: sys.exit(ret)
FixupForRawSection(output)
print ('\tFIXUP\t' + output)
|
"""Middleware that captures all 'eth_sign' requests to the JSON-RPC-Server.
An adaptation of the signing middleware from `web3.py
<https://github.com/ethereum/web3.py/blob/master/web3/middleware/signing.py>`_.
This middleware intercepts all 'eth_sign' requests to
an ethereum JSON RPC-Server and signs messages with a local private key.
"""
from functools import singledispatch
from typing import Dict, List, Set, Tuple, Union
from eth_account import Account, messages
from eth_account.local import LocalAccount
from eth_keys.datatypes import PrivateKey
from hexbytes import HexBytes
@singledispatch
def _to_account(private_key_or_account):
"""Get a `LocalAccount` instance from a private_key or a `LocalAccount`.
Note that this function is overloaded based on the type on input. This
implementation is the base case where none of the supported types are
matched and we throw an exception.
"""
raise TypeError(
"key must be one of the types:"
"eth_keys.datatype.PrivateKey, "
"eth_account.local.LocalAccount, "
"or raw private key as a hex string or byte string. "
"Was of type {0}".format(type(private_key_or_account))
)
def _private_key_to_account(private_key):
"""Get the account associated with the private key."""
if isinstance(private_key, PrivateKey):
private_key = private_key.to_hex()
else:
private_key = HexBytes(private_key).hex()
return Account().privateKeyToAccount(private_key)
_to_account.register(LocalAccount, lambda x: x)
_to_account.register(PrivateKey, _private_key_to_account)
_to_account.register(str, _private_key_to_account)
_to_account.register(bytes, _private_key_to_account)
def construct_local_message_signer(
private_key_or_account: Union[
Union[LocalAccount, PrivateKey, str],
List[Union[LocalAccount, PrivateKey, str]],
Tuple[Union[LocalAccount, PrivateKey, str]],
Set[Union[LocalAccount, PrivateKey, str]],
]
):
"""Construct a local messager signer middleware.
:param private_key_or_account: a single private key or a tuple,
list, or set of private keys. Keys can be any of the following
formats:
- An `eth_account.LocalAccount`:code: object
- An `eth_keys.PrivateKey`:code: object
- A raw private key as a hex `string`:code: or as `bytes`:code:
:returns: callable local_message_signer_middleware
>>> private_key=(
... "f2f48ee19680706196e2e339e5da3491186e0c4c5030670656b0e0164837257d"
... )
>>> from web3 import Web3, HTTPProvider
>>> Web3(
... HTTPProvider("https://mainnet.infura.io/v3/API_KEY")
... ).middleware_stack.add(
... construct_local_message_signer(private_key)
... )
"""
if not isinstance(private_key_or_account, (list, tuple, set)):
private_key_or_account = [private_key_or_account]
accounts = [_to_account(pkoa) for pkoa in private_key_or_account]
address_to_accounts: Dict[str, LocalAccount] = {
account.address: account for account in accounts
}
def local_message_signer_middleware(
make_request, web3
): # pylint: disable=unused-argument
def middleware(method, params):
if method != "eth_sign":
return make_request(method, params)
account_address, message = params[:2]
account = address_to_accounts[account_address]
# We will assume any string which looks like a hex is expected
# to be converted to hex. Non-hexable strings are forcibly
# converted by encoding them to utf-8
try:
message = HexBytes(message)
except Exception: # pylint: disable=broad-except
message = HexBytes(message.encode("utf-8"))
msg_hash_hexbytes = messages.defunct_hash_message(message)
ec_signature = account.signHash(msg_hash_hexbytes)
return {"result": ec_signature.signature}
return middleware
return local_message_signer_middleware
|
default_app_config = 'councilmatic_core.apps.CouncilmaticConfig'
|
def characterReplacement(s, k):
max_length, i, seen = 0, 0, set()
while i < (l := len(s)):
curr, j, rem, length = s[i], i, k, 0
while rem and j < l:
if s[j] != curr or j in seen: # if we've already seen it, then it isn't the same letter
rem -= 1
else:
seen.add(j)
length += 1
j += 1
max_length = max(max_length, length)
i += 1
return max_length
def characterReplacement2(s, k):
maxLength, s_len = 0, len(s)
for i, char in enumerate(s):
length, rem = 0, k
for j in range(i+1, s_len):
if not rem:
break
if s[j] != char:
rem -= 1
length += 1
maxLength = max(maxLength, length)
return maxLength
|
from django.urls import path, include
from .views import current_user, UserList, CliqueRequests, UserEmailDetails, CliqueIdMembers, InvitationDetails, UserToDos, index, UserDetails, CliqueDetails, CliqueEvents, UserEvents, UserInvitations, CliqueMembers, UserSchedules, ScheduleTimeFrames, CliqueAnnouncements, CliqueCliqueMessages, UserDirectMessagesSent, UserDirectMessagesRecieved, UserCliques, RelatedCliques, ManyRelatedCliques
urlpatterns = [
path('', index),
path('currentUser/', current_user),
path('addUsers/', UserList.as_view()),
path('userDetails/<str:username>/', UserDetails.as_view()),
path('userEmailDetails/<str:userEmail>/', UserEmailDetails.as_view()),
path('cliqueDetails/<str:name>/', CliqueDetails.as_view()),
path('cliqueMembers/<str:name>/', CliqueMembers.as_view()),
path('cliqueidMembers/<int:id>/', CliqueIdMembers.as_view()),
path('cliqueAnnouncements/<str:name>/', CliqueAnnouncements.as_view()),
path('userInvitations/<str:username>/', UserInvitations.as_view()),
path('invitationDetails/<str:inviteeEmail>/', InvitationDetails.as_view()),
path('cliqueRequests/<str:cliqueName>/', CliqueRequests.as_view()),
path('cliqueEvents/<str:name>/', CliqueEvents.as_view()),
path('userEvents/<str:username>/', UserEvents.as_view()),
path('userSchedules/<str:username>/', UserSchedules.as_view()),
path('scheduleTimeFrames/<int:scheduleId>/', ScheduleTimeFrames.as_view()),
path('cliqueCliqueMesssages/<str:name>/', CliqueCliqueMessages.as_view()),
path('userDirectMessagesSent/<str:username>/', UserDirectMessagesSent.as_view()),
path('userDirectMessagesRecieved/<str:username>/', UserDirectMessagesRecieved.as_view()),
path('userToDos/<str:username>/', UserToDos.as_view()),
path('relatedCliques/<str:name>/', RelatedCliques.as_view()),
path('manyRelatedCliques/<str:names>/', ManyRelatedCliques.as_view()),
path('userCliques/<str:username>/', UserCliques.as_view()),
]
|
# coding: utf-8
import time
import os
import tornado.web
from pony import orm
from ._base import BaseHandler
from collipa.helpers import get_year, get_month, gen_random_str, mkdir_p, get_relative_path
from collipa import config
class UploadHandler(BaseHandler):
@orm.db_session
@tornado.web.authenticated
def post(self, category):
if not self.has_permission:
return
if not self.request.files or 'myfile' not in self.request.files:
self.write({"status": "error",
"message": "对不起,请选择文件"})
return
file_type_list = []
if category == 'music':
file_type_list = ['audio/mpeg', 'audio/x-wav', 'audio/mp3']
if not file_type_list:
return
send_file = self.request.files['myfile'][0]
if send_file['content_type'] not in file_type_list:
if category == 'music':
self.write({"status": "error",
"message": "对不起,仅支持 mp3, wav 格式的音乐文件"})
return
if category == 'music':
if len(send_file['body']) > 20 * 1024 * 1024:
self.write({"status": "error",
"message": "对不起,请上传20M以下的音乐文件"})
return
user = self.current_user
if category == 'music':
upload_path = os.path.join(config.upload_path, 'music', get_year(), get_month())
else:
return
mkdir_p(upload_path)
timestamp = str(int(time.time())) + gen_random_str() + '_' + str(user.id)
image_format = send_file['filename'].split('.').pop().lower()
filename = timestamp + '.' + image_format
file_path = os.path.join(upload_path, filename)
with open(file_path, 'wb') as f:
f.write(send_file['body'])
path = '/' + get_relative_path(file_path)
if not self.is_ajax:
return
return self.write({
'path': path,
'status': "success",
'message': '上传成功',
'category': category,
'content_type': send_file['content_type'],
})
|
import numpy as np
from copulae.copula import Summary, TailDep
from copulae.special.debye import debye_1, debye_2
from copulae.special.optimize import find_root
from copulae.special.special_func import log1mexp, log1pexp, poly_log
from copulae.stats import random_uniform
from copulae.stats.log import random_log_series_ln1p
from copulae.types import Array
from copulae.utility import array_io, as_array
from ._shared import valid_rows_in_u
from .abstract import AbstractArchimedeanCopula
class FrankCopula(AbstractArchimedeanCopula):
r"""
A Frank copula is an Archimedean copula. In the bivariate case, its parameters can interpolate between
a lower limit of :math:`-\infty` (countermonotonicity) and an upper limit of :math:`\infty` (comonotonicity).
A Frank copula is defined as
.. math::
C_\theta (u_1, \dots, u_d) = \frac{1}{\theta}
\log \left(1 + \frac{ \prod_i (e^{\theta u_i} - 1) }{e^{-\theta} - 1} \right)
"""
def __init__(self, theta=np.nan, dim=2):
"""
Creates a Frank copula instance
Parameters
----------
theta: float, optional
Number specifying the copula parameter
dim: int, optional
Dimension of the copula
"""
super().__init__(dim, theta, 'Frank')
assert not (dim != 2 and theta < 0), 'Frank Copula parameter must be >= 0 when dimension == 2'
self._bounds = (-np.inf if dim == 2 else 0), np.inf
@array_io
def dipsi(self, u: Array, degree=1, log=False):
assert degree in (1, 2), 'degree can only be 1 or 2'
s = 1 if log or degree % 2 == 0 else -1
ut = u * self.params
if degree == 1:
v = self.params / np.expm1(ut)
else:
v = (self.params ** 2 * np.exp(ut)) / np.expm1(ut) ** 2
return s * (np.log(v) if log else v)
@array_io(optional=True)
def drho(self, x=None): # pragma: no cover
if x is None:
x = self.params
return 12 * (x / np.expm1(x) - 3 * debye1(x) + 2 * debye1(x)) / x ** 2
@array_io(optional=True)
def dtau(self, x=None): # pragma: no cover
if x is None:
x = self.params
return (x / np.expm1(x) + 1 - debye1(x) / x) * (2 / x) ** 2
@array_io
def ipsi(self, u, log=False):
r = np.asarray(u) * self.params
res = np.copy(r)
res[np.isnan(r)] = np.nan
em = np.expm1(-self.params)
# for small inputs, u <= 0.01
small_mask = np.abs(r) <= 0.01 * abs(self.params)
res[small_mask] = -np.log(np.expm1(-r[small_mask]) / em)
big_mask = np.abs(r) > 0.01 * abs(self.params)
e = np.exp(-self.params)
mid_mask = (e > 0) & (np.abs(self.params - r) < 0.5) # theta * (1 - u) < 0.5
m1 = big_mask & mid_mask
m2 = big_mask & ~mid_mask
r[m1] = -np.log1p(e * np.expm1((self.params - r[m1])) / em)
r[m2] = -np.log1p((np.exp(-r[m2]) - e) / em)
return np.log(r) if log else r
def irho(self, rho: Array): # pragma: no cover
# TODO frank: add inverse rho
return NotImplemented
@array_io
def itau(self, tau):
res = np.array([find_root(lambda x: self._tau(x) - t,
2.2e-16 if t > 0 else -1e20,
1e20 if t > 0 else -2.2e-16) for t in tau.ravel()])
res = res.reshape(tau.shape)
res[tau == 0] = tau[tau == 0]
return res
@property
def lambda_(self): # pragma: no cover
return TailDep(0, 0)
@property
def params(self):
return self._theta
@params.setter
def params(self, theta):
if self.dim > 2 and theta < 0:
raise ValueError('theta must be positive when dim > 2')
self._theta = float(theta)
@array_io(dim=2)
def pdf(self, u: Array, log=False):
assert not np.isnan(self.params), "Copula must have parameters to calculate parameters"
n, d = u.shape
theta = self.params
ok = valid_rows_in_u(u)
res = np.repeat(np.nan, n)
u_ = u[ok]
u_sum = u_.sum(1)
lp = log1mexp(theta)
lpu = log1mexp(theta * u_)
lu = lpu.sum(1)
li_arg = np.exp(lp + (lpu - lp).sum(1))
li = poly_log(li_arg, 1 - d, log=True)
res[ok] = (d - 1) * np.log(theta) + li - theta * u_sum - lu
return res if log else np.exp(res)
def psi(self, s):
assert not np.isnan(self.params), "Copula must have parameters to calculate psi"
s = np.asarray(s)
if self.params <= -36:
return -log1pexp(-s - self.params) / self.params
elif self.params < 0:
return -np.log1p(np.exp(-s) * np.expm1(-self.params)) / self.params
elif self.params == 0:
return np.exp(-s)
else:
const = log1mexp(self.params)
m = np.less(s, const, where=~np.isnan(s))
s[m] = np.nan
s[~m] = -log1mexp(s[~m] - log1mexp(self.params)) / self.params
return s.item(0) if s.size == 1 else s
def random(self, n: int, seed: int = None):
u = random_uniform(n, self.dim, seed)
if abs(self.params) < 1e-7:
return u
if self.dim == 2:
v = u[:, 1]
a = -abs(self.params)
v = -1 / a * np.log1p(-v * np.expm1(-a) / (np.exp(-a * u[:, 0]) * (v - 1) - v))
u[:, 1] = 1 - v if self.params > 0 else v
return u
# alpha too large
if log1mexp(self.params) == 0:
return np.ones((n, self.dim))
fr = random_log_series_ln1p(-self.params, n)[:, None]
return self.psi(-np.log(u) / fr)
@property
def rho(self):
return self._rho(self.params)
def summary(self):
return Summary(self, {"theta": self.params})
@property
def tau(self):
t = self.params
if np.isclose(t, 0):
return t / 9
return self._tau(self.params)
@staticmethod
def _rho(theta):
if np.isclose(theta, 0):
return theta / 6
return 1 + 12 / theta * (debye2(theta) - debye1(theta))
@staticmethod
def _tau(theta):
theta = np.asarray(theta)
if theta.size == 1:
theta = float(theta)
return 1 + 4 * (debye1(theta) - 1) / theta
def debye1(x):
"""
Custom debye order 1 that takes care of negative numbers or non-finite numbers
Parameters
----------
x: array_like
Numeric vector
Returns
-------
ndarray or scalar
Debye order 1 numbers
See Also
--------
:code:`copulae.special.debye.debye_1`: The debye order 1 function
"""
x = as_array(x)
fin = np.isfinite(x)
d = np.ravel(np.abs(x))
with np.errstate(invalid='ignore'):
if np.all(fin):
d = debye_1(d)
else:
d[fin] = debye_1(d[fin])
d = np.ravel(d)
pinf = np.isinf(x) & (x > 0)
if np.any(pinf):
d[pinf] = 0 # set positive infinity to 0 (but not na, thus can't use ~fin)
d = np.ravel(d)
d[x < 0] -= x[x < 0] / 2
return d.item(0) if d.size == 1 else d
def debye2(x):
"""
Custom debye order 2 that takes care of negative numbers or non-finite numbers
Parameters
----------
x: array_like
Numeric vector
Returns
-------
ndarray or scalar
Debye order 2 numbers
See Also
--------
:code:`copulae.special.debye.debye_2`: The debye order 2 function
"""
x = as_array(x)
fin = np.isfinite(x)
d = np.ravel(np.abs(x))
with np.errstate(invalid='ignore'):
if np.all(fin):
d = debye_2(d)
else:
d[fin] = debye_2(d[fin])
d = np.ravel(d)
pinf = np.isposinf(x)
if np.any(pinf):
d[pinf] = 0 # set positive infinity to 0 (but not na, thus can't use ~fin)
d = np.ravel(d)
d[x < 0] -= 2 / 3 * x[x < 0]
return d.item(0) if d.size == 1 else d
|
import numpy as np
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
restart_from_save = None
rng = np.random.RandomState(37148)
# transformations
p_transform = {'patch_size': (512, 512),
'channels': 3}
# only lossless augmentations
p_augmentation = {
'rot90_values': [0, 1, 2, 3],
'flip': [0, 1]
}
# mean and std values for imagenet
mean = np.asarray([0.485, 0.456, 0.406])
mean = mean[:, None, None]
std = np.asarray([0.229, 0.224, 0.225])
std = std[:, None, None]
# data preparation function
def data_prep_fun(x, y, random_gen):
downscale_factor = 2
x = x.resize((x.width//downscale_factor, x.height//downscale_factor))
y = y.resize((y.width//downscale_factor, y.height//downscale_factor))
x = np.asanyarray(x)
y = np.asanyarray(y)
x = np.swapaxes(x, 0, 2)
x = np.swapaxes(x, 1, 2)
x = (x / 255. - mean) / std
x = x.astype(np.float32)
y = y / 255.
y = y[None, :, :]
y = y.astype(np.float32)
x, y = data_transforms.random_crop_x_y(x, y, p_transform['patch_size'][0], p_transform['patch_size'][1], random_gen)
return x, y
def data_reverse_tf(x):
x = 255 * (std * x + mean)
x = np.clip(x, 0, 255)
x = x.astype(int)
x = np.swapaxes(x, 0, 2)
x = np.swapaxes(x, 0, 1)
return x
train_data_prep_fun = partial(data_prep_fun, random_gen=rng)
valid_data_prep_fun = partial(data_prep_fun, random_gen=np.random.RandomState(0))
# data iterators
batch_size = 6
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
# dataset1 = app.get_id_pairs('test_data/test1/trainA', 'test_data/test1_hed/trainA')
dataset1 = app.get_id_pairs('ir2day_3108/trainA', 'hed_ir2day_3108/trainA')
dataset2 = app.get_id_pairs('ir2day_3108/trainB', 'hed_ir2day_3108/trainB')
img_id_pairs = [dataset1, dataset2]
id_pairs = app.train_val_test_split(img_id_pairs, train_fraction=.7, val_fraction=.15, test_fraction=.15)
bad_ids = []
id_pairs['train'] = [x for x in id_pairs['train'] if x not in bad_ids]
id_pairs['valid'] = [x for x in id_pairs['valid'] if x not in bad_ids]
id_pairs['test'] = [x for x in id_pairs['test'] if x not in bad_ids]
train_data_iterator = data_iterators.EdgeDataGenerator(mode='all',
batch_size=chunk_size,
img_id_pairs=id_pairs['train'],
data_prep_fun=train_data_prep_fun,
label_prep_fun=train_data_prep_fun,
rng=rng,
full_batch=True, random=True, infinite=True)
valid_data_iterator = data_iterators.EdgeDataGenerator(mode='all',
batch_size=chunk_size,
img_id_pairs=id_pairs['valid'],
data_prep_fun=valid_data_prep_fun,
label_prep_fun=valid_data_prep_fun,
rng=rng,
full_batch=False, random=False, infinite=False)
test_data_iterator = data_iterators.EdgeDataGenerator(mode='all',
batch_size=chunk_size,
img_id_pairs=id_pairs['test'],
data_prep_fun=valid_data_prep_fun,
label_prep_fun=valid_data_prep_fun,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples // chunk_size
max_nchunks = nchunks_per_epoch * 40
print('max_nchunks', max_nchunks)
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(4 * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-4,
int(max_nchunks * 0.3): 2e-4,
int(max_nchunks * 0.6): 1e-4,
int(max_nchunks * 0.8): 3e-5,
int(max_nchunks * 0.9): 1e-5
}
# models
class VGG16features(nn.Module):
def __init__(self, initialize_weights = True, activation=F.relu):
super(VGG16features, self).__init__()
self.activation = activation
self.init_pad = torch.nn.ReflectionPad2d(32)
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=(33, 33))
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
if initialize_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** .5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1_1(x)
x = self.activation(x)
x = self.conv1_2(x)
c1 = self.activation(x)
x = self.pool1(c1)
x = self.conv2_1(x)
x = self.activation(x)
x = self.conv2_2(x)
c2 = self.activation(x)
x = self.pool2(c2)
x = self.conv3_1(x)
x = self.activation(x)
x = self.conv3_2(x)
x = self.activation(x)
x = self.conv3_3(x)
c3 = self.activation(x)
x = self.pool3(c3)
x = self.conv4_1(x)
x = self.activation(x)
x = self.conv4_2(x)
x = self.activation(x)
x = self.conv4_3(x)
c4 = self.activation(x)
x = self.pool4(c4)
x = self.conv5_1(x)
x = self.activation(x)
x = self.conv5_2(x)
x = self.activation(x)
x = self.conv5_3(x)
c5 = self.activation(x)
x = self.pool5(c5)
return x
def forward_hypercol(self, x):
x = self.conv1_1(x)
x = self.activation(x)
x = self.conv1_2(x)
c1 = self.activation(x)
x = self.pool1(c1)
x = self.conv2_1(x)
x = self.activation(x)
x = self.conv2_2(x)
c2 = self.activation(x)
x = self.pool2(c2)
x = self.conv3_1(x)
x = self.activation(x)
x = self.conv3_2(x)
x = self.activation(x)
x = self.conv3_3(x)
c3 = self.activation(x)
x = self.pool3(c3)
x = self.conv4_1(x)
x = self.activation(x)
x = self.conv4_2(x)
x = self.activation(x)
x = self.conv4_3(x)
c4 = self.activation(x)
x = self.pool4(c4)
x = self.conv5_1(x)
x = self.activation(x)
x = self.conv5_2(x)
x = self.activation(x)
x = self.conv5_3(x)
c5 = self.activation(x)
return c1, c2, c3, c4, c5
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, initialize_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if initialize_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
initialize_weights = False if pretrained else True
VGG16fs = VGG16features(initialize_weights=initialize_weights)
model = VGG(VGG16fs, initialize_weights=initialize_weights, **kwargs)
if pretrained:
state_dict = torch.utils.model_zoo.load_url(torchvision.models.vgg.model_urls['vgg16'])
new_state_dict = {}
original_layer_ids = set()
# copy the classifier entries and make a mapping for the feature mappings
for key in state_dict.keys():
if 'classifier' in key:
new_state_dict[key] = state_dict[key]
elif 'features' in key:
original_layer_ids.add(int(key.split('.')[1]))
sorted_original_layer_ids = sorted(list(original_layer_ids))
layer_ids = set()
for key in model.state_dict().keys():
if 'classifier' in key:
continue
elif 'features' in key:
layer_id = key.split('.')[1]
layer_ids.add(layer_id)
sorted_layer_ids = sorted(list(layer_ids))
for key, value in state_dict.items():
if 'features' in key:
original_layer_id = int(key.split('.')[1])
original_param_id = key.split('.')[2]
idx = sorted_original_layer_ids.index(original_layer_id)
new_layer_id = sorted_layer_ids[idx]
new_key = 'features.' + new_layer_id + '.' + original_param_id
new_state_dict[new_key] = value
model.load_state_dict(new_state_dict)
return model, VGG16fs
class HEDNet(nn.Module):
def __init__(self, activation=F.relu):
self.inplanes = 64
super(HEDNet, self).__init__()
self.score_dsn1 = nn.Conv2d(64, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn2 = nn.Conv2d(128, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn3 = nn.Conv2d(256, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn4 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn5 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample3 = nn.Upsample(scale_factor=4, mode='bilinear')
self.upsample4 = nn.Upsample(scale_factor=8, mode='bilinear')
self.upsample5 = nn.Upsample(scale_factor=16, mode='bilinear')
self.crop = torch.nn.ReflectionPad2d(-32)
self.drop = nn.Dropout(p=.5)
self.cd1 = nn.Conv2d(1472, 512, kernel_size=1, stride=1, padding=0)
self.cd2 = nn.Conv2d(512, 512, kernel_size=1, stride=1, padding=0)
self.cd3 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** .5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
__, VGG16fs = vgg16(pretrained=True)
self.VGG16fs = VGG16fs
def forward(self, x):
c1, c2, c3, c4, c5 = self.VGG16fs.forward_hypercol(x)
s1 = self.score_dsn1(c1)
s2 = self.score_dsn2(c2)
s3 = self.score_dsn3(c3)
s4 = self.score_dsn4(c4)
s5 = self.score_dsn5(c5)
s2 = self.upsample2(s2)
s3 = self.upsample3(s3)
s4 = self.upsample4(s4)
s5 = self.upsample5(s5)
s1 = F.sigmoid(s1)
s2 = F.sigmoid(s2)
s3 = F.sigmoid(s3)
s4 = F.sigmoid(s4)
s5 = F.sigmoid(s5)
out = 0.2 * s1 + 0.2 * s2 + 0.2 * s3 + 0.2 * s4 + 0.2 * s5
return self.crop(out)
def build_model():
net = HEDNet()
return namedtuple('Model', ['l_out'])(net)
def _assert_no_grad(variable):
assert not variable.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
class SimpleBCELoss(nn.Module):
def __init__(self, size_average=True):
super(SimpleBCELoss, self).__init__()
self.size_average = size_average
def forward(self, input, target):
_assert_no_grad(target)
print('input', input.size(), 'target', target.size(), 'max', torch.max(target).data.cpu().numpy(), 'min', torch.min(target).data.cpu().numpy())
return F.binary_cross_entropy(input, target, size_average = self.size_average)
class SimpleMSELoss(nn.Module):
def __init__(self):
super(SimpleMSELoss, self).__init__()
def forward(self, input, target):
_assert_no_grad(target)
sq_err = (target - input) ** 2
return torch.mean(sq_err)
class WeightedBCELoss(nn.Module):
def __init__(self, size_average=True):
super(WeightedBCELoss, self).__init__()
self.size_average = size_average
def forward(self, input, target):
_assert_no_grad(target)
beta = 1 - torch.mean(target)
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = 1 - beta + (2 * beta - 1) * target
return F.binary_cross_entropy(input, target, weights, self.size_average)
class WeightedMSELoss(nn.Module):
def __init__(self):
super(WeightedMSELoss, self).__init__()
def forward(self, input, target):
_assert_no_grad(target)
err = (target - input)
sq_err = err**2
sign_err = torch.sign(err)
is_pos_err = (sign_err + 1) / 2
is_neg_err = (sign_err - 1) / -2
edge_mass = torch.sum(target)
empty_mass = torch.sum(1-target)
total_mass = edge_mass + empty_mass
weight_pos_err = empty_mass / total_mass
weight_neg_err = edge_mass / total_mass
pos_part = weight_pos_err * is_pos_err * sq_err
neg_part = weight_neg_err * is_neg_err * sq_err
weighted_sq_errs = neg_part + pos_part
return torch.mean(weighted_sq_errs)
def build_objective():
return WeightedMSELoss()
def build_objective2():
return SimpleMSELoss()
def score(preds, gts):
return app.cont_f_score(preds, gts)
def intermediate_valid_predictions(xs, gts, preds, pid, it_valid, n_save=10):
path = pathfinder.METADATA_PATH + '/checkpoints/' + pid
utils.auto_make_dir(path)
pred_id = 0
for batch_x, batch_gt, batch_pred in zip(xs, gts, preds):
for x, gt, pred in zip(batch_x, batch_gt, batch_pred):
if pred_id >= n_save:
break
# save pred
pred = 255 * pred
pred = pred.astype(int)
app.save_image(pred[0], path + '/' + str(it_valid) + '_' + str(pred_id) + '_pred.jpg', mode='L')
# save ground truth
gt = 255 * gt
gt = gt.astype(int)
app.save_image(gt, path + '/' + str(it_valid) + '_' + str(pred_id) + '_real.jpg', mode='L')
# save input
data_reverse_tf(x)
app.save_image(x, path + '/' + str(it_valid) + '_' + str(pred_id) + '_input.jpg', mode='RGB')
pred_id += 1
# # updates
# def build_updates(model, learning_rate):
# return optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0002)
# updates
def build_updates(model, learning_rate):
return optim.Adam(model.parameters(), lr=learning_rate)
|
import xlwings as xw
import xwpandas as xp
import pandas as pd
import numpy as np
import csv
from xlwings.constants import Constants as C
import importlib
import time
#%%
arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']),
np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])]
index = pd.MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'], [0, 'two']],
labels=[[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]],
names=['first', 'second'])
mdf = pd.DataFrame(np.random.randn(6, 6), index=index[:6], columns=index[:6])
mdf.iloc[:, 2:4] = mdf.iloc[:, 2:4].astype(str).applymap(lambda x: x.replace('.','').replace('-',''))
bigmdf = pd.concat([mdf]*50000)
#%%
def time_elapsed(func, *args, **kwargs):
import time
start_time = time.time()
res = func(*args, **kwargs)
print("--- %s seconds ---" % round(time.time() - start_time, 2))
return res
#%%
importlib.reload(xp.core)
time_elapsed(xp.save, bigmdf, '.temp/xl.xlsx')
time_elapsed(xp.core._df_toxlwings, bigmdf)
time_elapsed(lambda x:x.to_excel('.temp/xl.xlsx'), bigmdf)
def write_xlsxwriter(df, path):
writer = pd.ExcelWriter(path, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
writer.close()
time_elapsed(write_xlsxwriter, bigmdf, '.temp/xl.xlsx')
|
from .models import User
from .auth import orminAuth
from .resources import RegisterResource, LoginResource
|
import time
import itertools as it # NOQA
class LongIterable:
def __init__(self, duration=20):
self.duration = duration
n = 1000
start = time.perf_counter()
def test_iter():
for index in range(n):
yield index
for _ in test_iter():
pass
stop = time.perf_counter()
stop = time.perf_counter()
time_per_iter = ((stop - start) / n) * 5 # rough adjustment factor
num_iters = int((self.duration / time_per_iter))
self.num_iters = num_iters
def __len__(self):
return self.num_iters
def __iter__(self):
for index in range(self.num_iters):
yield index
def check_performance_versus_tqdm():
import progiter
import tqdm
import ubelt as ub
self = iterable = LongIterable(duration=3)
progkw = dict(
leave=True,
mininterval=2.0,
)
with ub.Timer('tqdm') as timer1:
Progress = tqdm.tqdm
prog = Progress(iterable, **progkw)
for idx in prog:
pass
with ub.Timer('progiter.progiter') as timer2:
Progress = progiter.ProgIter
prog = Progress(iterable, **progkw)
for idx in prog:
pass
with ub.Timer('ub progiter') as timer3:
Progress = ub.ProgIter
prog = Progress(iterable, **progkw)
for idx in prog:
pass
print('timer1.elapsed = {!r}'.format(timer1.elapsed))
print('timer2.elapsed = {!r}'.format(timer2.elapsed))
print('timer3.elapsed = {!r}'.format(timer3.elapsed))
|
import os
import cv2
import re
import glob
import math
import torch
import numpy as np
from skimage import io
from torchvision import transforms
from torch.utils.data import Dataset
from config import parser
age_cls_unit = int(parser['RacNet']['age_cls_unit'])
# distribution of IMDB-WIKi dataset I: IMDB-Wiki
imdb_distr = [25, 63, 145, 54, 46, 113, 168, 232, 455, 556,
752, 1089, 1285, 1654, 1819, 1844, 2334, 2828,
3346, 4493, 6279, 7414, 7706, 9300, 9512, 11489,
10481, 12483, 11280, 13096, 12766, 14346, 13296,
12525, 12568, 12278, 12694, 11115, 12089, 11994,
9960, 9599, 9609, 8967, 7940, 8267, 7733, 6292,
6235, 5596, 5129, 4864, 4466, 4278, 3515, 3644,
3032, 2841, 2917, 2755, 2853, 2380, 2169, 2084,
1763, 1671, 1556, 1343, 1320, 1121, 1196, 949,
912, 710, 633, 581, 678, 532, 491, 428, 367,
339, 298, 203, 258, 161, 136, 134, 121, 63, 63,
82, 40, 37, 24, 16, 18, 11, 4, 9]
imdb_distr[age_cls_unit - 1] = sum(imdb_distr[age_cls_unit - 1:])
imdb_distr = imdb_distr[:age_cls_unit]
imdb_distr = np.array(imdb_distr, dtype='float')
# distribution of test dataset: FG-NET
fg_distr = [10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10,
9, 8, 8, 9, 9, 5, 7, 6, 6, 7, 6, 9, 5, 4, 6, 5, 7, 6, 3, 3, 5, 5, 4, 4, 2,
3, 5, 2, 2, 2, 3, 2, 3, 3, 2, 2, 2, 0, 0, 1, 0, 1, 3, 1, 1, 0, 0, 0, 1, 0, 0]
fg_distr[age_cls_unit - 1] = sum(fg_distr[age_cls_unit - 1:])
fg_distr = fg_distr[:age_cls_unit]
fg_distr = np.array(fg_distr, dtype='float') + 1
# step 1: correct different distribution between datasets
loss_weight = fg_distr / imdb_distr
# step 2: normalize the weight so that the expected weight for a random sample
# from training dataset equals to 1, i.e. sum(weight * 1/imdb_distr ) = 1
loss_weight = loss_weight / sum(loss_weight / imdb_distr)
# >>> (loss_weight * 100).astype('int')
# array([1398, 554, 241, 647, 760, 309, 208, 150, 76, 57, 46,
# 32, 27, 21, 19, 18, 14, 12, 10, 7, 4, 3,
# 4, 3, 2, 2, 2, 1, 2, 1, 2, 1, 1,
# 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
# 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2,
# 2, 1, 1, 2, 0])
loss_weight = torch.from_numpy(np.array(loss_weight, dtype='float'))
loss_weight = loss_weight.type(torch.FloatTensor)
class FaceDataset(Dataset):
""" read images from disk dynamically """
def __init__(self, datapath, transformer):
"""
init function
:param datapath: datapath to aligned folder
:param transformer: image transformer
"""
if datapath[-1] != '/':
print("[WARNING] PARAM: datapath SHOULD END WITH '/'")
datapath += '/'
self.datapath = datapath
self.pics = [f[len(datapath) : ] for f in
glob.glob(datapath + "*.jpg")]
self.transformer = transformer
self.age_divde = float(parser['DATA']['age_divide'])
self.age_cls_unit = int(parser['RacNet']['age_cls_unit'])
self.age_cls = {x: self.GaussianProb(x) for x in range(1, self.age_cls_unit + 1)}
self.age_cls_zeroone = {x: self.ZeroOneProb(x) for x in range(1, self.age_cls_unit + 1)}
def __len__(self):
return len(self.pics)
def GaussianProb(self, true, var = 2.5):
x = np.array(range(1, self.age_cls_unit + 1), dtype='float')
probs = np.exp(-np.square(x - true) / (2 * var ** 2)) / (var * (2 * np.pi ** .5))
return probs / probs.max()
def ZeroOneProb(self, true):
x = np.zeros(shape=(self.age_cls_unit, ))
x[true - 1] = 1
return x
def __getitem__(self, idx):
"""
get images and labels
:param idx: image index
:return: image: transformed image, gender: torch.LongTensor, age: torch.FloatTensor
"""
# read image and labels
img_name = self.datapath + self.pics[idx]
img = io.imread(img_name)
if len(img.shape) == 2: # gray image
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
(age, gender) = re.findall(r"([^_]*)_([^_]*)_[^_]*.jpg", self.pics[idx])[0]
age = max(1., min(float(age), float(self.age_cls_unit)))
# preprcess images
if self.transformer:
img = transforms.ToPILImage()(img)
image = self.transformer(img)
else:
image = torch.from_numpy(img)
# preprocess labels
gender = float(gender)
gender = torch.from_numpy(np.array([gender], dtype='float'))
gender = gender.type(torch.LongTensor)
age_rgs_label = torch.from_numpy(np.array([age / self.age_divde], dtype='float'))
age_rgs_label = age_rgs_label.type(torch.FloatTensor)
age_cls_label = self.age_cls[int(age)]
# age_cls_label = self.age_cls_zeroone[int(age)]
age_cls_label = torch.from_numpy(np.array([age_cls_label], dtype='float'))
age_cls_label = age_cls_label.type(torch.FloatTensor)
# image of shape [256, 256]
# gender of shape [,1] and value in {0, 1}
# age of shape [,1] and value in [0 ~ 10)
return image, gender, age_rgs_label, age_cls_label
|
"""API Blueprint
This is a subclass of Flask's Blueprint
It provides added features:
- Decorators to specify Marshmallow schema for view functions I/O
- API documentation registration
Documentation process works in several steps:
- At import time
- When a MethodView or a view function is decorated, relevant information
is automatically added to the object's ``_apidoc`` attribute.
- The ``Blueprint.doc`` decorator stores additional information in a separate
``_api_manual_doc``. It allows the user to specify documentation
information that flask-smorest can not - or does not yet - infer from the
code.
- The ``Blueprint.route`` decorator registers the endpoint in the Blueprint
and gathers all information about the endpoint in
``Blueprint._auto_docs[endpoint]`` and
``Blueprint._manual_docs[endpoint]``.
- At initialization time
- Schema instances are replaced either by their reference in the `schemas`
section of the spec if applicable, otherwise by their json representation.
- Automatic documentation is adapted to OpenAPI version and deep-merged with
manual documentation.
- Endpoints documentation is registered in the APISpec object.
"""
from collections import OrderedDict
from functools import wraps
from copy import deepcopy
from flask import Blueprint as FlaskBlueprint
from flask.views import MethodViewType
from .utils import deepupdate, load_info_from_docstring
from .arguments import ArgumentsMixin
from .response import ResponseMixin
from .pagination import PaginationMixin
from .etag import EtagMixin
from .spec import (
DEFAULT_REQUEST_BODY_CONTENT_TYPE, DEFAULT_RESPONSE_CONTENT_TYPE)
class Blueprint(
FlaskBlueprint,
ArgumentsMixin, ResponseMixin, PaginationMixin, EtagMixin):
"""Blueprint that registers info in API documentation"""
# Order in which the methods are presented in the spec
HTTP_METHODS = ['OPTIONS', 'HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']
DEFAULT_LOCATION_CONTENT_TYPE_MAPPING = {
"json": "application/json",
"form": "application/x-www-form-urlencoded",
"files": "multipart/form-data",
}
DOCSTRING_INFO_DELIMITER = "---"
def __init__(self, *args, **kwargs):
self.description = kwargs.pop('description', '')
super().__init__(*args, **kwargs)
# _[manual|auto]_docs are ordered dicts storing endpoints documentation
# {
# endpoint: {
# 'get': documentation,
# 'post': documentation,
# ...
# },
# ...
# }
self._auto_docs = OrderedDict()
self._manual_docs = OrderedDict()
self._endpoints = []
def route(self, rule, *, parameters=None, **options):
"""Decorator to register url rule in application
Also stores doc info for later registration
Use this to decorate a :class:`MethodView <flask.views.MethodView>` or
a resource function.
:param str rule: URL rule as string.
:param str endpoint: Endpoint for the registered URL rule (defaults
to function name).
:param list parameters: List of parameters relevant to all operations
in this path, only used to document the resource.
:param dict options: Options to be forwarded to the underlying
:class:`werkzeug.routing.Rule <Rule>` object.
"""
def decorator(func):
# By default, endpoint name is function name
endpoint = options.pop('endpoint', func.__name__)
# Prevent registering several times the same endpoint
# by silently renaming the endpoint in case of collision
if endpoint in self._endpoints:
endpoint = '{}_{}'.format(endpoint, len(self._endpoints))
self._endpoints.append(endpoint)
if isinstance(func, MethodViewType):
view_func = func.as_view(endpoint)
else:
view_func = func
# Add URL rule in Flask and store endpoint documentation
self.add_url_rule(rule, endpoint, view_func, **options)
self._store_endpoint_docs(endpoint, func, parameters, **options)
return func
return decorator
def _store_endpoint_docs(self, endpoint, obj, parameters, **options):
"""Store view or function doc info"""
endpoint_auto_doc = self._auto_docs.setdefault(
endpoint, OrderedDict())
endpoint_manual_doc = self._manual_docs.setdefault(
endpoint, OrderedDict())
def store_method_docs(method, function):
"""Add auto and manual doc to table for later registration"""
# Get auto documentation from decorators
# and summary/description from docstring
# Get manual documentation from @doc decorator
auto_doc = getattr(function, '_apidoc', {})
auto_doc.update(
load_info_from_docstring(
function.__doc__,
delimiter=self.DOCSTRING_INFO_DELIMITER
)
)
manual_doc = getattr(function, '_api_manual_doc', {})
# Store function auto and manual docs for later registration
method_l = method.lower()
endpoint_auto_doc[method_l] = auto_doc
endpoint_manual_doc[method_l] = manual_doc
# MethodView (class)
if isinstance(obj, MethodViewType):
for method in self.HTTP_METHODS:
if method in obj.methods:
func = getattr(obj, method.lower())
store_method_docs(method, func)
# Function
else:
methods = options.pop('methods', None) or ['GET']
for method in methods:
store_method_docs(method, obj)
endpoint_auto_doc['parameters'] = parameters
def register_views_in_doc(self, app, spec):
"""Register views information in documentation
If a schema in a parameter or a response appears in the spec
`schemas` section, it is replaced by a reference in the parameter or
response documentation:
"schema":{"$ref": "#/components/schemas/MySchema"}
"""
# This method uses the documentation information associated with each
# endpoint in self._[auto|manual]_docs to provide documentation for
# corresponding route to the spec object.
# Deepcopy to avoid mutating the source
# Allows registering blueprint multiple times (e.g. when creating
# multiple apps during tests)
auto_docs = deepcopy(self._auto_docs)
for endpoint, endpoint_auto_doc in auto_docs.items():
parameters = endpoint_auto_doc.pop('parameters')
doc = OrderedDict()
for method_l, endpoint_doc in endpoint_auto_doc.items():
# Format operations documentation in OpenAPI structure
self._prepare_doc(endpoint_doc, spec.openapi_version)
# Tag all operations with Blueprint name
endpoint_doc['tags'] = [self.name]
# Merge auto_doc and manual_doc into doc
manual_doc = self._manual_docs[endpoint][method_l]
doc[method_l] = deepupdate(endpoint_doc, manual_doc)
# Thanks to self.route, there can only be one rule per endpoint
full_endpoint = '.'.join((self.name, endpoint))
rule = next(app.url_map.iter_rules(full_endpoint))
spec.path(rule=rule, operations=doc, parameters=parameters)
def _prepare_doc(self, operation, openapi_version):
"""Format operation documentation in OpenAPI structure
The decorators store all documentation information in a dict structure
that is close to OpenAPI doc structure, so this information could
_almost_ be copied as is. Yet, some adjustemnts may have to be
performed, especially if the spec structure differs between OpenAPI
versions: the OpenAPI version is not known when the decorators are
applied but only at registration time when this method is called.
"""
# OAS 2
if openapi_version.major < 3:
if 'responses' in operation:
for resp in operation['responses'].values():
if 'example' in resp:
resp['examples'] = {
DEFAULT_RESPONSE_CONTENT_TYPE: resp.pop('example')}
if 'parameters' in operation:
for param in operation['parameters']:
if param['in'] in (
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING
):
content_type = (
param.pop('content_type', None) or
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING[
param['in']]
)
if content_type != DEFAULT_REQUEST_BODY_CONTENT_TYPE:
operation['consumes'] = [content_type, ]
# body and formData are mutually exclusive
break
# OAS 3
else:
if 'responses' in operation:
for resp in operation['responses'].values():
for field in ('schema', 'example', 'examples'):
if field in resp:
(
resp
.setdefault('content', {})
.setdefault(DEFAULT_RESPONSE_CONTENT_TYPE, {})
[field]
) = resp.pop(field)
if 'parameters' in operation:
for param in operation['parameters']:
if param['in'] in (
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING
):
request_body = {
x: param[x]
for x in ('description', 'required')
if x in param
}
fields = {
x: param.pop(x)
for x in ('schema', 'example', 'examples')
if x in param
}
content_type = (
param.pop('content_type', None) or
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING[
param['in']]
)
request_body['content'] = {content_type: fields}
operation['requestBody'] = request_body
# There can be only one requestBody
operation['parameters'].remove(param)
if not operation['parameters']:
del operation['parameters']
break
@staticmethod
def doc(**kwargs):
"""Decorator adding description attributes to a view function
Values passed as kwargs are copied verbatim in the docs
Example: ::
@blp.doc(description="Return pets based on ID",
summary="Find pets by ID"
)
def get(...):
...
"""
def decorator(func):
@wraps(func)
def wrapper(*f_args, **f_kwargs):
return func(*f_args, **f_kwargs)
# Don't merge manual doc with auto-documentation right now.
# Store it in a separate attribute to merge it later.
# The deepcopy avoids modifying the wrapped function doc
wrapper._api_manual_doc = deepupdate(
deepcopy(getattr(wrapper, '_api_manual_doc', {})), kwargs)
return wrapper
return decorator
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['maps'] = """
type: group
short-summary: Manage Azure Maps.
"""
helps['maps account'] = """
type: group
short-summary: Manage Azure Maps accounts.
"""
helps['maps account create'] = """
type: command
short-summary: Create a maps account.
parameters:
- name: --accept-tos
short-summary: Accept the Terms of Service, and do not prompt for confirmation.
long-summary: |
By creating an Azure Maps account, you agree that you have read and agree to the
License (https://azure.microsoft.com/support/legal/) and
Privacy Statement (https://privacy.microsoft.com/privacystatement).
"""
helps['maps account delete'] = """
type: command
short-summary: Delete a maps account.
examples:
- name: Delete a maps account. (autogenerated)
text: az maps account delete --name MyMapsAccount --resource-group MyResourceGroup
crafted: true
"""
helps['maps account keys'] = """
type: group
short-summary: Manage Azure Maps account keys.
"""
helps['maps account keys list'] = """
type: command
short-summary: List the keys to use with the Maps APIs.
long-summary: |
A key is used to authenticate and authorize access to the Maps REST APIs. Only one key is needed at a time; two are given to provide seamless key regeneration.
examples:
- name: List the keys to use with the Maps APIs (autogenerated)
text: az maps account keys list --name MyMapsAccount --resource-group MyResourceGroup
crafted: true
"""
helps['maps account keys renew'] = """
type: command
short-summary: Renew either the primary or secondary key for use with the Maps APIs.
long-summary: |
This command immediately invalidates old API keys. Only the renewed keys can be used to connect to maps.
"""
helps['maps account list'] = """
type: command
short-summary: Show all maps accounts in a subscription or in a resource group.
"""
helps['maps account show'] = """
type: command
short-summary: Show the details of a maps account.
examples:
- name: Show the details of a maps account. (autogenerated)
text: az maps account show --name MyMapsAccount --resource-group MyResourceGroup
crafted: true
"""
helps['maps account update'] = """
type: command
short-summary: Update the properties of a maps account.
"""
|
"""
Routines for input and output.
This includes routines to produce, merge and analyse data produced by
simulations.
:Author:
Eric Huang
"""
import numpy as np
import datetime
import os
import json
from .bpauli import bvectors_to_ints
from .utils import sizeof_fmt
def serialize_results(
i_trial: int, n_trials: int,
L_list: np.ndarray, p_list: np.ndarray, L_repeats: np.ndarray,
start_time: datetime.datetime, time_elapsed: datetime.timedelta,
time_remaining: datetime.timedelta, eta: datetime.datetime,
p_est: np.ndarray, p_se: np.ndarray,
n_fail: np.ndarray, n_try: np.ndarray,
effective_errors: list
) -> dict:
"""Convert results to dict."""
return {
'parameters': {
'i_trial': i_trial,
'n_trials': n_trials,
'L_list': L_list.tolist(),
'p_list': p_list.tolist(),
'L_repeats': L_repeats.tolist(),
'n_list': [int(3*L**3) for L in L_list],
},
'time': {
'start_time': str(start_time),
'time_elapsed': str(time_elapsed),
'time_remaining': str(time_remaining),
'eta': str(eta),
},
'statistics': {
'p_est': p_est.tolist(),
'p_se': p_se.tolist(),
},
'results': {
'n_fail': n_fail.tolist(),
'n_try': n_try.tolist(),
'effective_errors': [
[
bvectors_to_ints(effective_errors[i_L][i_p])
for i_p in range(len(p_list))
]
for i_L in range(len(L_list))
],
},
}
def dump_results(export_json: str, results_dict: dict, verbose: bool = True):
"""Save results dict to json file."""
with open(export_json, 'w') as f:
json.dump(results_dict, f)
if verbose:
print(
f'Results written to {export_json} '
f'({sizeof_fmt(os.path.getsize(export_json))})'
)
|
#coding: UTF-8
import ConfigParser
configFile='config.ini'
def get_butlers_from_ini():
parser = ConfigParser.ConfigParser()
try:
parser.readfp(open(configFile))
modules = parser.get("Cases","modules")
return modules.split(",")
except:
print "Read config.ini error"
|
from openerp.osv import osv, fields
from datetime import date
import datetime
import re
class student_ragistration(osv.osv):
_name = "student_reg_rec"
_columns = {
'state': fields.selection(
[('new', 'New'),('approve', 'Approve'),('confirmed', 'Confirmed')], 'Status'),
'seq_id_a' : fields.char("Sequence", readonly=True),
'student_id' : fields.char("Student Id"),
'student_fname' : fields.char("Name"),
'student_mname' : fields.char(" "),
'student_lname' : fields.char(" "),
'sex' : fields.selection([('m','Male'),('f','Female')],"Gender"),
'mob_no' : fields.char("Mobile No."),
'street': fields.char('Address'),
'street2': fields.char(' '),
'zip': fields.char(' ', change_default=True, size=24),
'city': fields.char(" "),
'state_id': fields.many2one("state_reg", ' '),
'country_id': fields.many2one('country_reg', ' '),
'date_of_birth': fields.date("DOB"),
'email' : fields.char("Email Id"),
'age': fields.char("Age"),
#------------------parents information address--------------------------------
'parent_fname' : fields.char("Name"),
'parent_mname' : fields.char(" "),
'parent_lname' : fields.char(" "),
'parent_occupation' : fields.char("Occupation"),
'sex_' : fields.selection([('m','Male'),('f','Female')],"Gender"),
'pmob_no' : fields.char("Mobile No."),
'pstreet': fields.char('Address'),
'pstreet2': fields.char(' '),
'pzip': fields.char(' ', change_default=True, size=24),
'pcity': fields.char(" "),
'pstate_id': fields.many2one("state_reg", ' '),
'pcountry_id': fields.many2one('country_reg', ' '),
'pdate_of_birth': fields.date("DOB"),
'pEmail Id' : fields.char("Email Id"),
#---------------------------
'course_name' :fields.selection([('mca',"MCA"),('btech','BTECH')],"Course"),
'branch_name' : fields.selection([("cs","CS"),('it',"IT"),('me','ME')],"Branch"),
#---------------------------------------------------------------------------------------------------------
#shoplist = ['apple', 'mango', 'carrot', 'banana']
}
_defaults = {
'state': 'new',
'seq_id_a':"Sequence#"
}
def create(self, cr, user, vals, context=None):
vals['seq_id_a'] =\
self.pool.get('ir.sequence').get(cr, user, 'rec_seq')
return super(student_ragistration, self).create(cr, user, vals, context)
def mymod_new(self, cr, uid, ids):
#print"------------"
#self.write(cr, uid, ids, { 'state' : 'new' })
return True
def mymod_approve(self, cr, uid, ids):
print"------------"
self.write(cr, uid, ids, { 'state' : 'approve' })
return True
def mymod_confirmed(self, cr, uid, ids):
self.write(cr, uid, ids, { 'state' : 'confirmed' })
return True
#---------------methods for constraints-------------------
def mob_no_cons(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids, context=context)
print "``````````````````````````````````````", context
for data in record:
if len(data.mob_no) <10:
return False
return True
_constraints = [
(mob_no_cons, 'Error: You have entered an invalid mobile number', ['mob_no']),
]
def onchange_email(self, cr, uid, ids, email):
if email:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None:
return True
else:
raise osv.except_osv('Invalid Email',"Please enter a valid email address")
def onchange_zip_code(self, cr, uid, ids, zip):
if zip:
if len(zip)==6:
return True
else:
raise osv.except_osv('Invalid zip_code Code',"Please enter a valid zip_code Code")
'''def onchange_getage_id(self,cr,uid,ids,date_of_birth,context=None):
current_date=datetime.now()
current_year=current_date.year
birth_date = parser.parse(date_of_birth)
current_age=current_year-birth_date.year
val = {
'age':current_age
}
return {'value': val}'''
#-----------------other relational classes----------------
class student_state(osv.osv):
_name = "state_reg"
_rec_name='State Name'
_columns = {
'State Name':fields.char(""),
}
class student_country(osv.osv):
_name = "country_reg"
_rec_name='Country Name'
_columns = {
'Country Name':fields.char(""),
}
#--------------------------------------------------
|
class Constraint(object):
# Each constraint must have an implicit test
def __init__(self, types=tuple(), test=lambda *args: False):
self.types = types
#self.test = None # None instead of test
self.test = test
# TODO - separate the test or keep it here? We could try to plan without the test
class ConstraintType(Constraint):
# For use when combining in ExplicitSet...
# Has extra variables about what makes this constraint interesting
pass
#####################################
class ExplicitSet(object):
def __init__(self, in_vars, out_vars, constraint_pairs):
self.in_vars = in_vars # Conditioned variables
self.out_vars = out_vars # The order matters here
self.constraints = {con: vars for con, vars in constraint_pairs.iteritems()}
class FiniteSet(ExplicitSet):
def __init__(self, in_vars, out_vars, constraint_pairs):
super(FiniteSet, self).__init__(in_vars, out_vars, constraint_pairs)
def construct_set(self, in_values):
assert len(in_values) == len(self.in_vars) # And types match
raise NotImplementedError()
# Returns a set (list) on the fly
class CountableSet(ExplicitSet):
def __init__(self, in_vars, out_vars, constraint_pairs):
super(CountableSet, self).__init__(in_vars, out_vars, constraint_pairs)
def construct_set(self, in_values):
assert len(in_values) == len(self.in_vars) # And types match
#raise NotImplementedError()
for a in range(4):
yield a
# Returns a generator on the fly
# NOTE - assume chart contained within zero and one
# NOTE - maybe manifold means something more abstract now
class Manifold(ExplicitSet):
def __init__(self, in_vars, out_vars, constraint_pairs):
self.latent_d = [] # Latent upon being conditioned by in_vars
# Don't really need to know the exterior variables
self.chart_fns = []
# TODO - what if you only want to produce some values from this intersection
# Need to marginalize out the rest
# NOTE - supplied just a
# x in X, y in Y = (0, 1)^d - open interval
# phi(X) subset Y
# Assume phi^-1
# Define the domain and codomain of the chart
# Codomain is more appropriate
# The domain is more reasonable
# How could I even call the function for something not in its domain?
# Maybe its codomain is the full (0, 1) but we consider a submanifold
# So we consider the codomain of the inclusion map
# I actually mean range instead of codomain
super(Manifold, self).__init__(in_vars, out_vars, constraint_pairs)
def get_chart(self, in_values):
return
# Certify that the produced trajectory meets the test
class MetricSpace(ExplicitSet):
pass
class FiniteUnionSet(ExplicitSet):
pass
class CountableUnionSet(ExplicitSet):
pass
#####################################
class Atlas(object):
def __init__(self, m, n, charts):
assert n <= m
self.n = n
self.m = m
self.charts = charts
# Inclusion map
# InverseChart
# NOTE - assume domain is [0,1]
class Chart(object):
def __init__(self, codomain_fn, inverse_fn):
self.codomain_fn = codomain_fn
self.inverse_fn = inverse_fn
class Foliation(object):
pass
|
"""
Copyright notice
================
Copyright (C) 2018
Julian Gruendner <juliangruendner@googlemail.com>
"""
import queue
import socketserver
import threading
import time
import ssl
import uuid
import os
import json
import ipaddress
from ds_http.ds_http import HTTPRequest, HTTPResponse
from logger import Logger
KEY_FILE = "./cert/queuekey.pem"
CERT_FILE = "./cert/queuecert.pem"
CACERT = "./cert/queuecacert.pem"
proxystate = None
class ProxyHandler(socketserver.StreamRequestHandler):
def __init__(self, request, client_address, server):
self.peer = True
self.target = None
# Just for debugging
self._host = None
self._port = 0
socketserver.StreamRequestHandler.__init__(self, request, client_address, server)
def handle(self):
global proxystate
try:
req = HTTPRequest.build(self.rfile)
except Exception as e:
proxystate.log.debug(e.__str__() + ": Error on reading request message")
return
if req is None:
return
proxystate.log.debug(req.serialize())
req = req.clone()
orig_ip = req.getHeader("X-Real-IP")
if len(orig_ip) > 0:
orig_ip = orig_ip[0]
if not proxystate.isIPAllowed(orig_ip):
proxystate.log.error("rejecting ip : " + str(orig_ip))
return
self.handleQpRequest(req)
def handleQpRequest(self, req):
queryParams = req.getQueryParams()
if 'getQueuedRequest' in queryParams:
self.getQueuedRequest()
elif 'setQueuedResponse' in queryParams:
self.setQueuedResponse(req)
elif 'resetQueue' in queryParams:
self.resetQueue()
elif 'ping' in queryParams:
self.ping()
elif 'queueSizes' in queryParams:
self.get_q_sizes()
else:
self.execQueueRequest(req)
def execQueueRequest(self, req):
reqUu = str(uuid.uuid4())
proxystate.log.debug("queueing request with id %s" % (reqUu))
self.setQueuedRequest(req, reqUu)
proxystate.log.debug("getting repsonse of queued request with id %s" % (reqUu))
self.getQueuedResponse(reqUu)
def setQueuedRequest(self, req, reqUu):
try:
req.addHeader('reqId', reqUu)
proxystate.reqQueue.put(req)
proxystate.resQueueList[reqUu] = queue.Queue()
except queue.Full as e:
proxystate.log.debug(e.__str__())
return
def getQueuedRequest(self):
try:
req = proxystate.reqQueue.get(timeout=proxystate.requestTimeout)
except queue.Full as e:
proxystate.log.debug(e.__str__())
return
except queue.Empty:
res = HTTPResponse('HTTP/1.1', 204, 'NO CONTENT')
self.sendResponse(res.serialize())
return
res = HTTPResponse('HTTP/1.1', 200, 'OK')
res.body = req.serialize()
self.sendResponse(res.serialize())
def setQueuedResponse(self, req):
try:
reqId = req.getQueryParams()['reqId'][0]
res = req.getBody()
proxystate.resQueueList[reqId].put(res)
except queue.Full as e:
proxystate.log.debug(e.__str__())
return
res = HTTPResponse('HTTP/1.1', 200, 'OK')
self.sendResponse(res.serialize())
def getQueuedResponse(self, reqId):
try:
res = proxystate.resQueueList[reqId].get(timeout=proxystate.responseTimeout)
del proxystate.resQueueList[reqId]
except Exception:
res = HTTPResponse('HTTP/1.1', 503, 'Queue timed out - poll server did not respond in seconds ' + str(proxystate.responseTimeout)).serialize()
self.sendResponse(res)
proxystate.log.debug("sending response with id %s back to client" % (reqId))
self.sendResponse(res)
def resetQueue(self):
proxystate.resQueueList = {}
proxystate.reqQueue.queue.clear()
res = HTTPResponse('HTTP/1.1', 200, 'OK', body="queue reset \n")
self.sendResponse(res.serialize())
def ping(self):
proxystate.log.info("Being pinged")
res = HTTPResponse('HTTP/1.1', 200, 'OK', body="queue is still alive \n")
self.sendResponse(res.serialize())
def get_q_sizes(self):
queue_sizes = {"request_q_size": proxystate.reqQueue.qsize(), "response_q_size": list(proxystate.resQueueList.keys())}
res = HTTPResponse('HTTP/1.1', 200, 'OK', body=json.dumps(queue_sizes))
self.sendResponse(res.serialize())
def sendResponse(self, res):
self.wfile.write(res.encode('latin-1'))
self.wfile.flush() # see if flushing improves performance
class ThreadedHTTPProxyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
class ProxyServer():
def __init__(self, init_state):
global proxystate
proxystate = init_state
self.proxyServer_port = proxystate.listenport
self.proxyServer_host = proxystate.listenaddr
def startProxyServer(self):
global proxystate
self.proxyServer = ThreadedHTTPProxyServer((self.proxyServer_host, self.proxyServer_port), ProxyHandler)
proxystate.log.info("Starting Queue...")
if proxystate.https:
proxystate.log.info("Starting Queue with HTTPS wrapper...")
if os.path.isfile(CACERT):
self.proxyServer.socket = ssl.wrap_socket(self.proxyServer.socket, keyfile=KEY_FILE, certfile=CERT_FILE, ca_certs=CACERT, server_side=True)
else:
self.proxyServer.socket = ssl.wrap_socket(self.proxyServer.socket, keyfile=KEY_FILE, certfile=CERT_FILE, server_side=True)
server_thread = threading.Thread(target=self.proxyServer.serve_forever)
server_thread.setDaemon(True)
proxystate.log.info("Starting queue server, with configurations:" +
" port: %d, loglevel: %s, req_timeout: %s, res_timeout: %s, allowed_ips: %s, "
% (self.proxyServer_port, proxystate.log.get_level(), proxystate.requestTimeout,
proxystate.responseTimeout, proxystate.allowed_ips))
server_thread.start()
while True:
time.sleep(0.1)
def stopProxyServer(self):
self.proxyServer.shutdown()
class ProxyState:
def __init__(self, port=8001, addr="0.0.0.0"):
self.listenport = port
self.listenaddr = addr
# Internal state
self.log = Logger()
self.redirect = None
self.reqQueue = queue.Queue()
self.resQueueList = {}
self.responseTimeout = None
self.requestTimeout = None
self.allowed_ips = None
@staticmethod
def getTargetHost(req):
global proxystate
# Determine the target host (check if redirection is in place)
if proxystate.redirect is None:
target = req.getHost()
else:
target = proxystate.redirect
return target
def isIPAllowed(self, ip):
for allowed_ip in self.allowed_ips:
# Try as ip network
try:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_ip):
return True
except ValueError:
# Try as ip address
try:
if ipaddress.ip_address(ip) == ipaddress.ip_address(allowed_ip):
return True
except ValueError:
## One value is not an IP address
pass
# IP was neither found as an IP address nor in a network
return False
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from newrelic.packages import six
from newrelic.api.external_trace import ExternalTrace
from newrelic.api.transaction import current_transaction
from newrelic.common.object_wrapper import ObjectWrapper
def httplib_endheaders_wrapper(wrapped, instance, args, kwargs,
scheme, library):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
def _connect_unbound(instance, *args, **kwargs):
return instance
if instance is None:
instance = _connect_unbound(*args, **kwargs)
connection = instance
if hasattr(connection, '_nr_library_info'):
library, scheme = connection._nr_library_info
url = '%s://%s:%s' % (scheme, connection.host, connection.port)
# Check if the NR headers have already been added. This is just in
# case a higher level library which uses httplib underneath so
# happened to have been instrumented to also add the headers.
try:
skip_headers = getattr(connection, '_nr_skip_headers', False)
with ExternalTrace(library=library, url=url) as tracer:
# Add the tracer to the connection object. The tracer will be
# used in getresponse() to add back into the external trace,
# after the trace has already completed, details from the
# response headers.
if not skip_headers and hasattr(
tracer, 'generate_request_headers'):
outgoing_headers = tracer.generate_request_headers(transaction)
for header_name, header_value in outgoing_headers:
connection.putheader(header_name, header_value)
connection._nr_external_tracer = tracer
return wrapped(*args, **kwargs)
finally:
try:
del connection._nr_skip_headers
except AttributeError:
pass
def httplib_getresponse_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
connection = instance
tracer = getattr(connection, '_nr_external_tracer', None)
if not tracer:
return wrapped(*args, **kwargs)
response = wrapped(*args, **kwargs)
# Make sure we remove the tracer from the connection object so that it
# doesn't hold onto objects. Do this after we call the wrapped function so
# if an exception occurs the higher library might retry the call again with
# the same connection object. Both urllib3 and requests do this in Py2.7
del connection._nr_external_tracer
if hasattr(tracer, 'process_response'):
tracer.process_response(response.status, response.getheaders())
return response
def httplib_putheader_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
# Remember if we see any NR headers being set. This is only doing
# it if we see either, but they should always both be getting set.
def nr_header(header, *args, **kwargs):
return header.upper() in ('NEWRELIC',
'X-NEWRELIC-ID', 'X-NEWRELIC-TRANSACTION')
connection = instance
if nr_header(*args, **kwargs):
connection._nr_skip_headers = True
return wrapped(*args, **kwargs)
def instrument(module):
if six.PY2:
library = 'httplib'
else:
library = 'http'
module.HTTPConnection.endheaders = ObjectWrapper(
module.HTTPConnection.endheaders,
None,
functools.partial(httplib_endheaders_wrapper, scheme='http',
library=library))
module.HTTPSConnection.endheaders = ObjectWrapper(
module.HTTPConnection.endheaders,
None,
functools.partial(httplib_endheaders_wrapper, scheme='https',
library=library))
module.HTTPConnection.getresponse = ObjectWrapper(
module.HTTPConnection.getresponse,
None,
httplib_getresponse_wrapper)
module.HTTPConnection.putheader = ObjectWrapper(
module.HTTPConnection.putheader,
None,
httplib_putheader_wrapper)
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn')
A = pd.read_csv('ref.csv', delim_whitespace=True).append(pd.read_csv('file.csv', delim_whitespace=True))
plt.figure(figsize=(11, 8));
plt.title('Performance');
plt.cla();
plt.subplot(311)
plt.title('Feedback and Gain');
sns.lineplot(x='Episode', y='Reward', data=A, hue='Control')
plt.xticks(range(0,100, 10))
lims = plt.xlim()
plt.subplot(312)
sns.lineplot(x='Episode', y='Gain', data=A, hue='Control')
plt.xticks(range(0,100, 10))
plt.xlim(lims)
plt.subplot(313)
sns.lineplot(x='Episode', y='Feedback', data=A, hue='Control')
plt.xticks(range(0,100, 10))
# sns.lineplot(x='Episode', y='EFB', data=A, hue='Control')
# sns.lineplot(x='Episode', y='Feedback', data=A, hue='Control').lineplot(x='Episode', y='EFB', data=A, hue='Control')
plt.xlim(lims)
# plt.ylim((0,1.5))
plt.tight_layout()
plt.savefig('live_view.pdf') |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import numpy as np
import torch
from core.evaluate import accuracy
from core.inference import get_final_preds
from core.inference import get_max_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
from utils.utils import get_network_grad_flow
# from utils.vis import save_pretty_debug_images as save_debug_images
logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------------
def train_lambda_012(config, train_loader, model, criterion_lambda, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict, print_prefix=''):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
model_grads = AverageMeter()
diversity_losses = AverageMeter()
pose_losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target_a, target_weight_a, meta_a, target_b, target_weight_b, meta_b, target_c, target_weight_c, meta_c) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
B, C, H, W = input.shape
##--- 0s and 1s--------
lambda_val = 0 ##binary dim0: 0, dim1: 0
lambda_vec_zero = torch.zeros(B, 2).cuda()
lambda_val = 1 ##binary dim0: 1, dim1: 0
lambda_vec_one = torch.zeros(B, 2).cuda()
lambda_vec_one[:, 0] += 1
lambda_val = 2 ##binary dim0: 0, dim1: 1
lambda_vec_two = torch.zeros(B, 2).cuda()
lambda_vec_two[:, 1] += 1
# lambda_val = torch.cat([torch.zeros(B), torch.zeros(B)+1, torch.zeros(B)+2], dim=0) ### 3B x 2
# lambda_vec = torch.cat([lambda_vec_zero, lambda_vec_one, lambda_vec_two], dim=0) ### 3B x 2
# --------------duplicate-----------------------------
# num_candidates = 3
# input = torch.cat([input]*num_candidates, dim=0)
# target_a = torch.cat([target_a]*num_candidates, dim=0)
# target_weight_a = torch.cat([target_weight_a]*num_candidates, dim=0)
# meta_a['joints'] = torch.cat([meta_a['joints']]*num_candidates, dim=0)
# meta_a['joints_vis'] = torch.cat([meta_a['joints_vis']]*num_candidates, dim=0)
# target_b = torch.cat([target_b]*num_candidates, dim=0)
# target_weight_b = torch.cat([target_weight_b]*num_candidates, dim=0)
# meta_b['joints'] = torch.cat([meta_b['joints']]*num_candidates, dim=0)
# meta_b['joints_vis'] = torch.cat([meta_b['joints_vis']]*num_candidates, dim=0)
# target_c = torch.cat([target_c]*num_candidates, dim=0)
# target_weight_c = torch.cat([target_weight_c]*num_candidates, dim=0)
# meta_c['joints'] = torch.cat([meta_c['joints']]*num_candidates, dim=0)
# meta_c['joints_vis'] = torch.cat([meta_c['joints_vis']]*num_candidates, dim=0)
# # --------------------------------
# # compute output
# outputs = model(input, lambda_vec)
# target_a = target_a.cuda(non_blocking=True)
# target_weight_a = target_weight_a.cuda(non_blocking=True)
# target_b = target_b.cuda(non_blocking=True)
# target_weight_b = target_weight_b.cuda(non_blocking=True)
# target_c = target_c.cuda(non_blocking=True)
# target_weight_c = target_weight_c.cuda(non_blocking=True)
# output = outputs
# start_idx = 0; end_idx = start_idx + B
# loss_a_lambda = criterion_lambda(output[start_idx:end_idx], target_a, target_weight_a) ##size = B
# start_idx = B; end_idx = start_idx + B
# loss_b_lambda = criterion_lambda(output[start_idx:end_idx], target_b, target_weight_b) ##size = B
# start_idx = 2*B; end_idx = start_idx + B
# loss_c_lambda = criterion_lambda(output[start_idx:end_idx], target_c, target_weight_c) ##size = B
# pose_loss = loss_a_lambda.mean() + loss_b_lambda.mean() + loss_c_lambda.mean()
# loss = pose_loss
# # compute gradient and do update step
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# # --------------------------------
target_a = target_a.cuda(non_blocking=True)
target_weight_a = target_weight_a.cuda(non_blocking=True)
target_b = target_b.cuda(non_blocking=True)
target_weight_b = target_weight_b.cuda(non_blocking=True)
target_c = target_c.cuda(non_blocking=True)
target_weight_c = target_weight_c.cuda(non_blocking=True)
# --------------------------------
# compute output
outputs_zero = model(input, lambda_vec_zero)
loss_a_lambda = criterion_lambda(outputs_zero, target_a, target_weight_a) ##size = B
loss_a = loss_a_lambda.mean()
optimizer.zero_grad()
loss_a.backward()
optimizer.step()
# --------------------------------
outputs_one = model(input, lambda_vec_one)
loss_b_lambda = criterion_lambda(outputs_one, target_b, target_weight_b) ##size = B
loss_b = loss_b_lambda.mean()
optimizer.zero_grad()
loss_b.backward()
optimizer.step()
# --------------------------------
outputs_two = model(input, lambda_vec_two)
loss_c_lambda = criterion_lambda(outputs_two, target_c, target_weight_c) ##size = B
loss_c = loss_c_lambda.mean()
optimizer.zero_grad()
loss_c.backward()
optimizer.step()
# --------------------------------
output = torch.cat([outputs_zero, outputs_one, outputs_two], dim=0)
loss = loss_a + loss_b + loss_c
pose_loss = loss
model_grad = get_network_grad_flow(model)
model_grads.update(model_grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
pose_losses.update(pose_loss.item(), input.size(0))
start_idx = 0; end_idx = start_idx + B
_, avg_acc_a, cnt_a, pred_a = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_a.detach().cpu().numpy())
start_idx = B; end_idx = start_idx + B
_, avg_acc_b, cnt_b, pred_b = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_b.detach().cpu().numpy())
start_idx = 2*B; end_idx = start_idx + B
_, avg_acc_c, cnt_c, pred_c = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_c.detach().cpu().numpy())
acc.update(avg_acc_a, cnt_a)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'model_grad {model_grad.val:.6f} ({model_grad.avg:.6f})\t' \
'PoseLoss {pose_loss.val:.5f} ({pose_loss.avg:.5f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc,
model_grad=model_grads,
pose_loss=pose_losses)
logger.info(msg)
if i % config.PRINT_FREQ == 0:
save_size = min(16, B)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
meta_a['pred_joints_vis'] = torch.ones_like(meta_a['joints_vis'])
meta_b['pred_joints_vis'] = torch.ones_like(meta_b['joints_vis'])
meta_c['pred_joints_vis'] = torch.ones_like(meta_c['joints_vis'])
prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
start_idx = 0; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_a, target_a[:save_size], (pred_a*4)[:save_size], output[start_idx:end_idx], prefix, suffix='a')
start_idx = B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_b, target_b[:save_size], (pred_b*4)[:save_size], output[start_idx:end_idx], prefix, suffix='b')
start_idx = 2*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_c, target_c[:save_size], (pred_c*4)[:save_size], output[start_idx:end_idx], prefix, suffix='c')
return
# --------------------------------------------------------------------------------
def train_lambda_0123(config, train_loader, model, criterion_lambda, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict, print_prefix=''):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
model_grads = AverageMeter()
diversity_losses = AverageMeter()
pose_losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target_a, target_weight_a, meta_a, target_b, target_weight_b, meta_b, target_c, target_weight_c, meta_c, target_d, target_weight_d, meta_d) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
B, C, H, W = input.shape
##--- 0s and 1s--------
lambda_val = 0 ##binary dim0: 0, dim1: 0
lambda_vec_zero = torch.zeros(B, 2).cuda()
lambda_val = 1 ##binary dim0: 1, dim1: 0
lambda_vec_one = torch.zeros(B, 2).cuda()
lambda_vec_one[:, 0] += 1
lambda_val = 2 ##binary dim0: 0, dim1: 1
lambda_vec_two = torch.zeros(B, 2).cuda()
lambda_vec_two[:, 1] += 1
lambda_val = 3 ##binary dim1: 1, dim1: 1
lambda_vec_three = torch.zeros(B, 2).cuda()
lambda_vec_three[:, 0] += 1
lambda_vec_three[:, 1] += 1
# # --------------------------------
target_a = target_a.cuda(non_blocking=True)
target_weight_a = target_weight_a.cuda(non_blocking=True)
target_b = target_b.cuda(non_blocking=True)
target_weight_b = target_weight_b.cuda(non_blocking=True)
target_c = target_c.cuda(non_blocking=True)
target_weight_c = target_weight_c.cuda(non_blocking=True)
target_d = target_d.cuda(non_blocking=True)
target_weight_d = target_weight_d.cuda(non_blocking=True)
# --------------------------------
# compute output
outputs_zero = model(input, lambda_vec_zero)
loss_a_lambda = criterion_lambda(outputs_zero, target_a, target_weight_a) ##size = B
loss_a = loss_a_lambda.mean()
optimizer.zero_grad()
loss_a.backward()
optimizer.step()
# --------------------------------
outputs_one = model(input, lambda_vec_one)
loss_b_lambda = criterion_lambda(outputs_one, target_b, target_weight_b) ##size = B
loss_b = loss_b_lambda.mean()
optimizer.zero_grad()
loss_b.backward()
optimizer.step()
# --------------------------------
outputs_two = model(input, lambda_vec_two)
loss_c_lambda = criterion_lambda(outputs_two, target_c, target_weight_c) ##size = B
loss_c = loss_c_lambda.mean()
optimizer.zero_grad()
loss_c.backward()
optimizer.step()
# --------------------------------
outputs_three = model(input, lambda_vec_three)
loss_d_lambda = criterion_lambda(outputs_three, target_d, target_weight_d) ##size = B
loss_d = loss_d_lambda.mean()
optimizer.zero_grad()
loss_d.backward()
optimizer.step()
# --------------------------------
output = torch.cat([outputs_zero, outputs_one, outputs_two, outputs_three], dim=0)
loss = loss_a + loss_b + loss_c + loss_d
pose_loss = loss
model_grad = get_network_grad_flow(model)
model_grads.update(model_grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
pose_losses.update(pose_loss.item(), input.size(0))
start_idx = 0; end_idx = start_idx + B
_, avg_acc_a, cnt_a, pred_a = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_a.detach().cpu().numpy())
start_idx = B; end_idx = start_idx + B
_, avg_acc_b, cnt_b, pred_b = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_b.detach().cpu().numpy())
start_idx = 2*B; end_idx = start_idx + B
_, avg_acc_c, cnt_c, pred_c = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_c.detach().cpu().numpy())
start_idx = 3*B; end_idx = start_idx + B
_, avg_acc_d, cnt_d, pred_d = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_d.detach().cpu().numpy())
acc.update(avg_acc_a, cnt_a)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'model_grad {model_grad.val:.6f} ({model_grad.avg:.6f})\t' \
'PoseLoss {pose_loss.val:.5f} ({pose_loss.avg:.5f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc,
model_grad=model_grads,
pose_loss=pose_losses)
logger.info(msg)
if i % config.PRINT_FREQ == 0:
save_size = min(16, B)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
meta_a['pred_joints_vis'] = torch.ones_like(meta_a['joints_vis'])
meta_b['pred_joints_vis'] = torch.ones_like(meta_b['joints_vis'])
meta_c['pred_joints_vis'] = torch.ones_like(meta_c['joints_vis'])
meta_d['pred_joints_vis'] = torch.ones_like(meta_d['joints_vis'])
prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
start_idx = 0; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_a, target_a[:save_size], (pred_a*4)[:save_size], output[start_idx:end_idx], prefix, suffix='a')
start_idx = B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_b, target_b[:save_size], (pred_b*4)[:save_size], output[start_idx:end_idx], prefix, suffix='b')
start_idx = 2*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_c, target_c[:save_size], (pred_c*4)[:save_size], output[start_idx:end_idx], prefix, suffix='c')
start_idx = 3*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_d, target_d[:save_size], (pred_d*4)[:save_size], output[start_idx:end_idx], prefix, suffix='d')
return
# --------------------------------------------------------------------------------
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
logger.info(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
logger.info('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
logger.info(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
|
#! /usr/bin/env python
import unittest
import rospy
from geometry_msgs.msg import TwistStamped
from gazebo_msgs.srv import DeleteModel
class TwistTopicTest(unittest.TestCase):
def __init__(self, robot_name, delete_model):
super(TwistTopicTest, self).__init__('twistTopicTest')
self.robot_name = robot_name
self.twist = TwistStamped()
self.twist_topic = self.robot_name + '/twist'
self.delete_model = delete_model
def setUp(self):
self.sub_twist = rospy.Subscriber(self.twist_topic, TwistStamped, self.__recvTwist)
def tearDown(self):
self.sub_twist.unregister()
if self.delete_model:
if len(self.robot_name) == 0:
model_name = 'audibot'
else:
model_name = self.robot_name
delete_srv = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
try:
delete_srv.wait_for_service(1.0)
delete_srv(model_name=model_name)
except rospy.ServiceException: # service call failed
pass
except rospy.ROSInterruptException: # ROS shutdown during timeout
pass
except rospy.ROSException: # timeout expired
pass
def twistTopicTest(self):
# Wait for a twist feedback message sample on the appropriate topic
timeout_t = rospy.Time.now() + rospy.Duration(1)
while not rospy.is_shutdown() and (timeout_t - rospy.Time.now()).to_sec() > 0:
if self.twist.header.stamp != rospy.Time(0):
break
rospy.sleep(0.01)
self.assertTrue(self.twist.header.stamp != rospy.Time(0),
msg='TwistStamped topic [%s] not received' % self.twist_topic)
# Make sure frame_id is correct
if len(self.robot_name) > 0:
correct_frame_id = self.robot_name + '/base_footprint'
else:
correct_frame_id = 'base_footprint'
self.assertEqual(first=self.twist.header.frame_id,
second=correct_frame_id,
msg='TwistStamped frame_id [%s] should be [%s]' % (self.twist.header.frame_id, correct_frame_id)
)
def __recvTwist(self, msg):
self.twist = msg
|
"""
The FixedWidth class definition.
"""
from decimal import Decimal, ROUND_HALF_EVEN
from datetime import datetime
from six import string_types, integer_types
class FixedWidth(object):
"""
Class for converting between Python dictionaries and fixed-width
strings.
Requires a 'config' dictonary.
Each key of 'config' is the field name.
Each item of 'config' is itself a dictionary with the following keys:
required a boolean; required
type a string; required
value (will be coerced into 'type'); hard-coded value
default (will be coerced into 'type')
start_pos an integer; required
length an integer
end_pos an integer
format a string, to format dates, required for date fields
The following keys are only used when emitting fixed-width strings:
alignment a string; required
padding a string; required
precision an integer, to format decimals numbers
rounding a constant ROUND_xxx used when precision is set
Notes:
A field must have a start_pos and either an end_pos or a length.
If both an end_pos and a length are provided, they must not conflict.
A field may not have a default value if it is required.
Type may be string, integer, decimal, numeric, or date.
Alignment and padding are required.
"""
def __init__(self, config, **kwargs):
"""
Arguments:
config: required, dict defining fixed-width format
kwargs: optional, dict of values for the FixedWidth object
"""
self.format_functions = {
'integer': lambda x: str(self.data[x]),
'string': lambda x: str(self.data[x]),
'decimal': self._get_decimal_data,
'numeric': lambda x: str(self.data[x]),
'date': self._get_date_data,
}
self.line_end = kwargs.pop('line_end', '\r\n')
self.config = config
self.data = {}
if kwargs:
self.data = kwargs
self.ordered_fields = sorted(
[(self.config[x]['start_pos'], x) for x in self.config]
)
#Raise exception for bad config
for key, value in self.config.items():
#required values
if any([x not in value for x in (
'type', 'required', 'padding', 'alignment', 'start_pos')]):
raise ValueError(
"Not all required values provided for field %s" % (key,))
if value['type'] == 'date':
if 'format' in value:
try:
datetime.now().strftime(value['format'])
except Exception:
raise ValueError("Incorrect format string provided for field %s" % (key,))
else:
raise ValueError("No format string provided for field %s" % (key,))
elif value['type'] == 'decimal':
if 'precision' in value and type(value['precision']) != int:
raise ValueError("Precision parameter for field %s must be an int" % (key,))
#end position or length required
if 'end_pos' not in value and 'length' not in value:
raise ValueError("An end position or length is required for field %s" % (key,))
#end position and length must match if both are specified
if all([x in value for x in ('end_pos', 'length')]):
if value['length'] != value['end_pos'] - value['start_pos'] + 1:
raise ValueError("Field %s length (%d) does not coincide with \
its start and end positions." % (key, value['length']))
#fill in length and end_pos
if 'end_pos' not in value:
value['end_pos'] = value['start_pos'] + value['length'] - 1
if 'length' not in value:
value['length'] = value['end_pos'] - value['start_pos'] + 1
#end_pos must be greater than start_pos
if value['end_pos'] < value['start_pos']:
raise ValueError("%s end_pos must be *after* start_pos." % (key,))
#make sure authorized type was provided
if not value['type'] in ('string', 'integer', 'decimal', 'numeric', 'date'):
raise ValueError("Field %s has an invalid type (%s). Allowed: 'string', \
'integer', 'decimal', 'numeric', 'date" % (key, value['type']))
#make sure alignment is 'left' or 'right'
if not value['alignment'] in ('left', 'right'):
raise ValueError("Field %s has an invalid alignment (%s). \
Allowed: 'left' or 'right'" % (key, value['alignment']))
#if a default value was provided, make sure
#it doesn't violate rules
if 'default' in value:
#can't be required AND have a default value
if value['required']:
raise ValueError("Field %s is required; \
can not have a default value" % (key,))
#ensure default value provided matches type
if value['type'] == 'decimal' and value['default'] is not None:
value['default'] = Decimal(value['default'])
elif value['type'] == 'date' and isinstance(value['default'], string_types):
value['default'] = datetime.strptime(value['default'], value['format'])
types = {'string': string_types, 'integer': int, 'decimal': Decimal,
'numeric': str, 'date': datetime}
if value['default'] is not None and not isinstance(value['default'], types[value['type']]):
raise ValueError("Default value for %s is not a valid %s" \
% (key, value['type']))
#if a precision was provided, make sure
#it doesn't violate rules
if value['type'] == 'decimal' and 'precision' in value:
#make sure authorized type was provided
if not isinstance(value['precision'], int):
raise ValueError("Precision parameter for field %s "
"must be an int" % (key,))
value.setdefault('rounding', ROUND_HALF_EVEN)
#ensure start_pos and end_pos or length is correct in config
current_pos = 1
for start_pos, field_name in self.ordered_fields:
if start_pos != current_pos:
raise ValueError("Field %s starts at position %d; \
should be %d (or previous field definition is incorrect)." \
% (field_name, start_pos, current_pos))
current_pos = current_pos + config[field_name]['length']
def update(self, **kwargs):
"""
Update self.data using the kwargs sent.
"""
self.data.update(kwargs)
def validate(self):
"""
Ensure the data in self.data is consistent with self.config
"""
type_tests = {
'string': lambda x: isinstance(x, string_types),
'decimal': lambda x: isinstance(x, Decimal),
'integer': lambda x: isinstance(x, integer_types),
'numeric': lambda x: str(x).isdigit(),
'date': lambda x: isinstance(x, datetime),
}
for field_name, parameters in self.config.items():
if field_name in self.data:
if self.data[field_name] is None and 'default' in parameters:
self.data[field_name] = parameters['default']
data = self.data[field_name]
# make sure passed in value is of the proper type
# but only if a value is set
if data and not type_tests[parameters['type']](data):
raise ValueError("%s is defined as a %s, \
but the value is not of that type." \
% (field_name, parameters['type']))
#ensure value passed in is not too long for the field
field_data = self._format_field(field_name)
if len(str(field_data)) > parameters['length']:
raise ValueError("%s is too long (limited to %d \
characters)." % (field_name, parameters['length']))
if 'value' in parameters \
and parameters['value'] != field_data:
raise ValueError("%s has a value in the config, \
and a different value was passed in." % (field_name,))
else: #no value passed in
#if required but not provided
if parameters['required'] and ('value' not in parameters):
raise ValueError("Field %s is required, but was \
not provided." % (field_name,))
#if there's a default value
if 'default' in parameters:
self.data[field_name] = parameters['default']
#if there's a hard-coded value in the config
if 'value' in parameters:
self.data[field_name] = parameters['value']
if parameters['required'] and self.data[field_name] is None:
# None gets checked last because it may be set with a default value
raise ValueError("None value not allowed for %s" % (field_name))
return True
def _get_decimal_data(self, field_name):
"""
quantizes field if it is decimal type and precision is set
"""
if 'precision' in self.config[field_name]:
return str(Decimal(str(self.data[field_name])).
quantize(Decimal('0.%s' % ('0' *
self.config[field_name]['precision'])),
self.config[field_name]['rounding']))
else:
return str(self.data[field_name])
def _get_date_data(self, field_name):
return str(self.data[field_name].strftime(self.config[field_name]['format']))
def _format_field(self, field_name):
"""
Converts field data and returns it as a string.
"""
data = self.data[field_name]
config = self.config[field_name]
if data is None:
# Empty fields can not be formatted
return ''
type = config['type']
return str(self.format_functions[type](field_name) if not None else '')
def _build_line(self):
"""
Returns a fixed-width line made up of self.data, using
self.config.
"""
self.validate()
line = ''
#for start_pos, field_name in self.ordered_fields:
for field_name in [x[1] for x in self.ordered_fields]:
if field_name in self.data:
datum = self._format_field(field_name)
else:
datum = ''
justify = None
if self.config[field_name]['alignment'] == 'left':
justify = datum.ljust
else:
justify = datum.rjust
datum = justify(self.config[field_name]['length'], \
self.config[field_name]['padding'])
line += datum
return line + self.line_end
is_valid = property(validate)
def _string_to_dict(self, fw_string):
"""
Take a fixed-width string and use it to
populate self.data, based on self.config.
"""
self.data = {}
for start_pos, field_name in self.ordered_fields:
conversion = {
'integer': int,
'string': lambda x: str(x).strip(),
'decimal': Decimal,
'numeric': lambda x: str(x).strip(),
'date': lambda x: datetime.strptime(x, self.config[field_name]['format']),
}
row = fw_string[start_pos - 1:self.config[field_name]['end_pos']]
if row.strip() == '' and 'default' in self.config[field_name]:
# Use default value if row is empty
self.data[field_name] = self.config[field_name]['default']
else:
self.data[field_name] = conversion[self.config[field_name]['type']](row)
return self.data
line = property(_build_line, _string_to_dict)
|
import FishEngineInternal
from FishEngine import SceneManager, Material, MeshManager, Vector3, Quaternion, Script
from timing import timing
import demo1, demo2
from collections import OrderedDict
from FishEditor import EditorApplication
import sys, yaml
def Vector3_representer(dumper, v:Vector3):
return dumper.represent_dict(OrderedDict(x=v.x, y=v.y, z=v.z))
def Quaternion_representer(dumper, q:Quaternion):
return dumper.represent_dict(OrderedDict(x=q.x, y=q.y, z=q.z, w=q.w))
def OrderedDict_representer(dumper, data):
return dumper.represent_dict(data.items())
def Start():
# AssetDataBase.StaticInit()
yaml.add_representer(OrderedDict, OrderedDict_representer)
yaml.add_representer(Vector3, Vector3_representer)
yaml.add_representer(Quaternion, Quaternion_representer)
scene = SceneManager.CreateScene("DefaultScene")
SceneManager.SetActiveScene(scene)
demo2.Start()
# project_path = r'D:\workspace\unity\FishEngine\Assets'
# EditorApplication.OpenProject(project_path)
# demo1.Start()
# @timing
def Update():
pass
def Clean():
# Material.StaticClean()
MeshManager.StaticClean()
Script.StaticClean()
def Reload():
print('Reload from Python.....')
import importlib, FishEngine
importlib.reload(FishEngine)
importlib.reload(demo2)
from FishEngine import GameObject, Object
# def Save():
# objs = Object.FindObjectsOfType(GameObject)
# dumper = SceneDumper()
# dumper.Dump(objs)
# scene = SceneManager.CreateScene("RuntimeScene")
# SceneManager.SetActiveScene(scene)
# scene_path = 'FishEngine_demo1.unity'
# sceneImporter = UnitySceneImporter(scene_path)
# sceneImporter.Import() |
"""
Combine speaker and listener for easier training
"""
from torch import nn
class Pair(nn.Module):
def __init__(self, speaker, listener):
super().__init__()
self.speaker = speaker
self.listener = listener
self.bce_criterion = nn.BCEWithLogitsLoss()
self.xent_criterion = nn.CrossEntropyLoss()
|
"""COMMAND : .left"""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="left ?(.*)"))
async def _(event):
if event.fwd_from:
return
mentions = "Why Are Some People Left-Handed?\n\n`Being a righty or a lefty could be linked to variations in a network of genes that influence right or left asymmetries in the body and brain.\n\nFor the left-handed people of the world, life isn’t easy.\nThroughout much of history, massive stigmas attached to left-handedness meant they were singled out as everything from unclean to witches.\nIn Medieval times, writing with your left-hand was a surefire way to be accused of being possessed by the devil; after all, the devil himself was thought to be a lefty.\nThe world has gotten progressively more accepting of left-handed folk,\nbut there are still some undeniable bummers associated with a left-handed proclivity: desks and spiral notebooks pose a constant battle,\nscissors are all but impossible to use and–according to some studies–life-expectancy might be lower than for right-handed people.\n\nWhat makes humanity’s bias against lefties all the more unfair is that left-handed people are born that way.\nIn fact, scientists have speculated for years that a single gene could control a left-right preference in humans.\nUnfortunately, they just couldn’t pinpoint exactly where the gene might lie.\n\nNow, in a paper published SEPTEMBER 12, 2013 in PLOS Genetics a group of researchers have identified a network of genes that relate to handedness in humans.\nWhat’s more, they’ve linked this preference to the development of asymmetry in the body and the brain.\n\nIn previous studies, the researchers observed that patients with dyslexia exhibited a correlation between the gene PCSK6 and handedness.\nBecause every gene has two copies (known as alleles), every gene has two chances for mutation;\nwhat the researches found was that dyslexic patients with more variance in PCSK6–meaning that one or both of their PSCK6 alleles had mutated–were more likely to be right-handed.\n\nThe research team found this especially interesting, because they knew that PCSK6 was a gene directly associated with the development of left-right asymmetry in the body.\nThey weren’t sure why this would present itself only in dyslexic patients, as dyslexia and handedness are not related.\nSo the team expanded the study to include more than 2,600 people who don’t have dyslexia.\n\nThe study found that PCSK6 didn’t work alone in affecting handedness in the general population.\nOther genes, also responsible for creating left-right asymmetry in the body, were strongly associated with handedness.\nLike PCSK6, the effect that these genes have on handedness depends on how many mutations the alleles undergo.\nEach gene has the potential for mutation–the more mutations a person has in any one direction (toward right handedness or left handedness)\nthe more likely they are to use that hand as their dominant hand, or so the researchers speculate.\n\nThe hypothesis is a logical response to a key question:\n If handedness is genetic and if right-handedness is such a dominant trait, why hasn’t left-handedness been forced out of the genetic pool?\nIn reality, the research suggests that handedness could be more subtle than simple “dominant” or “recessive” traits–a whole host of genes might play significant roles.\n\nWhat’s especially exciting is that these genes all relate to the development of left-right asymmetry in the body and brain,\ncreating a strong case for correlation between the development of this symmetry and the development of handedness.\nDisrupting any of these genes could lead to serious physical asymmetry,\nlike situs inversus, a condition where the body’s organs are reversed (heart on the right side of the body, for example).\nIn mice, the disruption of PCSK6 resulted in serious abnormal positioning of organs in their bodies.\n\nIf physical asymmetry is related to handedness, then people with situs inversus should favor one hand more often than what you’d find in the general population.\nStudies show that this isn’t the case–individuals with this condition mirror the general population’s split in handedness–leading the researchers to postulate that while these genes certainly influence handedness,\nthere might be other mechanisms in the body that compensate for handedness in the event of major physiological asymmetries.\n\nOther animals, such as polar bears or chimpanzees, also have handedness\n–-chimpanzees have been known to prefer one hand to the other when using tools or looking for food,\nbut the split within a population hangs around 50/50.\nHumans are the only species that show a truly distinct bias toward one hand or the other:\na 90/10 right/left split throughout the population.\n\nOne predominant hypothesis for this bias relates to another distinct human trait: language ability.\nLanguage ability is split between the different hemispheres of the brain,\nmuch like handedness, which suggests that handedness became compartmentalized along with language ability,\nFor most, the parts of the brain that govern language are are present in the left-side of the brain–these people tend to be right-handed.\nThe few that have language skills focused in the right side of the brain tend to be left-handed.\n\nHowever, William Brandler, a PhD student at Oxford University and the paper’s lead author, isn’t convinced that this theory holds much stock,\nas correlations between language and handedness in research aren’t well established.\nBrandler is more interested in learning how the permutations and combinations of genetic mutations play into humans’ likelihood to be right-handed.\n“Through understanding the genetics of handedness, we might be able to understand how it evolved,”\nhe says. “Once we have the full picture of all the genes involved,\nand how they interact with other genes,\nwe might be able to understand how and why there is such a bias.”\n\nAnd he’s confident that even if environmental factors (like the continued hatred of lefties by two-thirds of the world) place pressure on handedness,\nany baseline bias still boils down to genetics.\n“People think it’s just an environmental thing, but you’ve got to think, why is there that initial bias in the first place,\nand why do you see that bias across all societies?\nWhy aren’t there societies where you see a bias to the left?” Brandler asks.\n“There is a genetic component to handedness, hundreds of different genetic variants,\nand each one might push you one way or the other,\nand it’s the type of variance, along with the environment you’re in\nand the pressures acting on you, which affect your handedness.”\n\nBut until a larger population can be tested–hundreds of thousands,\nby Brandler’s estimates–a full genetic map of what controls handedness\nand why our population isn’t evenly split between righties and lefties can’t be determined.\n“It’s going to take a bit of time before these materialize—but it will happen,” Brandler says.\n“There’s been a whole revolution in genetics such that, in a few years time,\nwe’re really going to start to understand the genetic basis of complex traits.”\n\nThe former President United States Barack Obama is left-handed, as well as at least six former presidents.\n\n`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await event.reply(mentions)
await event.delete()
|
from enum import IntEnum
class CommandOpcode(IntEnum):
PING_OBC = 0x00
GET_RTC = 0x01
SET_RTC = 0x02
READ_OBC_EEPROM = 0x03
ERASE_OBC_EEPROM = 0x04
READ_OBC_RAM_BYTE = 0x05
SET_INDEF_BEACON_ENABLE = 0x06
READ_DATA_BLOCK = 0x10
READ_PRIM_CMD_BLOCKS = 0x11
READ_SEC_CMD_BLOCKS = 0x12
READ_REC_STATUS_INFO = 0x13
READ_REC_LOC_DATA_BLOCK = 0x14
READ_RAW_MEM_BYTES = 0x15
COL_DATA_BLOCK = 0x20
GET_AUTO_DATA_COL_SETTINGS = 0x21
SET_AUTO_DATA_COL_ENABLE = 0x22
SET_AUTO_DATA_COL_PERIOD = 0x23
RESYNC_AUTO_DATA_COL_TIMERS = 0x24
GET_CUR_BLOCK_NUMS = 0x30
SET_CUR_BLOCK_NUM = 0x31
GET_MEM_SEC_ADDRS = 0x32
SET_MEM_SEC_START_ADDR = 0x33
SET_MEM_SEC_END_ADDR = 0x34
ERASE_MEM_PHY_SECTOR = 0x35
ERASE_MEM_PHY_BLOCK = 0x36
ERASE_ALL_MEM = 0x37
SEND_EPS_CAN_MSG = 0x40
SEND_PAY_CAN_MSG = 0x41
RESET_SUBSYS = 0x42
class Subsystem(IntEnum):
OBC = 1
EPS = 2
PAY = 3
class BlockType(IntEnum):
OBC_HK = 1
EPS_HK = 2
PAY_HK = 3
PAY_OPT = 4
PAY_OPT_OD = 5
PAY_OPT_FL = 6
PRIM_CMD_LOG = 7
SEC_CMD_LOG = 8
class CAN(IntEnum):
EPS_CTRL = 0x02
PAY_CTRL = 0x05
class EPS_CTRL(IntEnum):
PING = 0
READ_EEPROM = 1
ERASE_EEPROM = 2
READ_RAM_BYTE = 3
RESET = 4
GET_HEAT_SHAD_SP = 5
SET_HEAT_1_SHAD_SP = 6
SET_HEAT_2_SHAD_SP = 7
GET_HEAT_SUN_SP = 8
SET_HEAT_1_SUN_SP = 9
SET_HEAT_2_SUN_SP = 10
GET_HEAT_CUR_THR = 11
SET_HEAT_LOWER_CUR_THR = 12
SET_HEAT_UPPER_CUR_THR = 13
class PAY_CTRL(IntEnum):
PING = 0
READ_EEPROM = 1
ERASE_EEPROM = 2
READ_RAM_BYTE = 3
RESET_SSM = 4
RESET_OPT = 5
ENABLE_6V = 6
DISABLE_6V = 7
ENABLE_10V = 8
DISABLE_10V = 9
GET_HEAT_PARAMS = 10
SET_HEAT_SP = 11
SET_INV_THERM_READING = 12
GET_THERM_READING = 13
GET_THERM_ERR_CODE = 14
SET_THERM_ERR_CODE = 15
GET_MOTOR_STATUS = 16
MOTOR_DEP_ROUTINE = 17
MOTOR_UP = 18
MOTOR_DOWN = 19
SEND_OPT_SPI = 20
class PAYThermErrCode(IntEnum):
NORMAL = 0x00
BELOW_ULL = 0x01
ABOVE_UHL = 0x02
BELOW_MIU = 0x03
ABOVE_MIU = 0x04
MANUAL_INVALID = 0x05
MANUAL_VALID = 0x06
class PAYOptSPIOpcode(IntEnum):
GET_READING = 0x01
GET_POWER = 0x02
ENTER_SLEEP_MODE = 0x03
ENTER_NORMAL_MODE = 0x04
class PacketACKStatus(IntEnum):
OK = 0x00
RESET_CMD_ID = 0x01
INVALID_ENC_FMT = 0x02
INVALID_LEN = 0x03
INVALID_CSUM = 0x04
INVALID_DEC_FMT = 0x05
INVALID_CMD_ID = 0x06
DECREMENTED_CMD_ID = 0x07
REPEATED_CMD_ID = 0x08
INVALID_OPCODE = 0x09
INVALID_PWD = 0x0A
FULL_CMD_QUEUE = 0x0B
class PacketRespStatus(IntEnum):
OK = 0x00
INVALID_ARGS = 0x01
TIMED_OUT = 0x02
INVALID_CAN_OPCODE = 0x11
INVALID_CAN_FIELD_NUM = 0x12
INVALID_CAN_DATA = 0x13
|
import unittest
import utilities.src.latlon as latlon
class LatLonTest(unittest.TestCase):
p1: latlon.Point
p2: latlon.Point
def setUp(self):
self.p1 = latlon.Point(1.14, 1.21)
self.p2 = latlon.Point(1.52, 1.35)
def test_distance(self):
self.assertAlmostEqual(45.03, latlon.distance(self.p1, self.p2), delta=0.6)
def get_lanlonkey(self):
expected = [self.p1, self.p2]
keyfunction = latlon.get_lanlonkey(latlon.Point(1, 1))
res = sorted([self.p2, self.p1], key=keyfunction)
self.assertEqual(expected, res)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
############### cuda ###############
from pycuda import gpuarray
def getcudadevice():
"""
initial shell device, if found return device, else return None
:return:
"""
devices = []
for i in range(cuda.Device.count()):
gpu_device = cuda.Device(i)
devices.append(gpu_device)
# print("gpu_device name: ", gpu_device.name())
compute_capability = float('%d.%d' % gpu_device.compute_capability())
# print("compute_capability: ", compute_capability)
# print("total memory: ", gpu_device.total_memory() // (1024 ** 2))
return devices
def getcudadeviceattributes(gpu_device):
device_attributes_tuples = gpu_device.get_attributes().items()
# for item, count in device_attributes_tuples:
# print(item, count)
return device_attributes_tuples
def hasdevice():
device = pycuda.autoinit.device
device_count = device.count()
if device_count >= 1:
return True
else:
return False
def checkDataDevice(operator):
def check(*args):
arg_list = []
for arg in args:
if arg.device == "gpu" or arg.device == "cuda":
# assert arg.data is not None, "shell error: Tensor's data must not be None. @checkDevice"
# assert type(arg.data) == pycuda.gpuarray.GPUArray, "shell error: data must be GPUArray type. " \
# "@checkDevice"
if arg.left is not None:
assert type(arg.left.data) == pycuda.gpuarray.GPUArray, \
"shell error: arg's left's data type must be GPUArray type. @checkDevice"
if arg.right is not None:
assert type(arg.right.data) == pycuda.gpuarray.GPUArray, \
"shell error: arg's right's data type must be GPUArray type. @checkDevice"
elif arg.device == "cpu":
pass
else:
raise Exception
arg_list.append(arg)
return operator(*arg_list)
return check
def checkGradDevice(operator):
def check(*args):
arg_list = []
for arg in args:
if arg.device == "gpu" or arg.device == "cuda":
assert type(arg.grad) == pycuda.gpuarray.GPUArray or arg.grad is None, \
"shell error: arg's grad must be GPUArray type. @checkDevice"
if arg.left is not None:
assert type(arg.left.grad) == pycuda.gpuarray.GPUArray or arg.left.grad is None, \
"shell error: arg's left's data type must be GPUArray type. @checkDevice"
if arg.right is not None:
assert type(arg.right.grad) == pycuda.gpuarray.GPUArray or arg.right.grad is None, \
"shell error: arg's right node grad type must be GPUArray type. @checkDevice"
arg_list.append(arg)
return operator(*arg_list)
return check
def _hosttodevice(data_host):
"""
transform host data to device
:param data_host:
:return:
"""
assert hasdevice(), "host To Device error, must have device number >= 1"
data_device = cuda.mem_alloc(data_host.nbytes)
cuda.memcpy_htod(data_device, data_host)
return data_device
def _devicetohost(data_device):
data_host = np.empty_like(1, dtype=np.float32)
cuda.memcpy_dtoh(data_host, data_device)
return data_host
|
# Copyright (c) 2021. Nicolai Oswald
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from typing import List
from DataObjects.FlowDataTypes.ClassBaseAccess import BaseAccess
from DataObjects.Transitions.ClassTransitionv2 import Event
from Parser.ProtoCCcomTreeFct import objListToStringList
from DataObjects.Transitions.ClassTransitionv2 import Transition_v2
from Debug.Monitor.ClassDebug import Debug
class ProtoCCTablePrinter(Debug):
Spacing = 20
TransFormat = ["StartState", "FinalState", "Access", "InMsg", "InEvent", "OutMsg", "OutEvent", "Cond"]
def __init__(self, debug_enable: bool = True):
Debug.__init__(self, debug_enable)
def ptransition(self, transition: Transition_v2):
self.ptable(self.TransFormat, [self.outtransition(transition)])
def ptransitiontable(self, transitions: List[Transition_v2]):
states = []
for transition in transitions:
states.append(str(transition.start_state))
states.append(str(transition.final_state))
states = list(set(states))
self.p_header("#States: " + str(len(states)) + " #Transitions: " + str(len(transitions)))
output = []
for transition in sorted(transitions, key=lambda x: str(x)):
output.append(self.outtransition(transition))
self.ptable(self.TransFormat, output)
@staticmethod
def outtransition(transition: Transition_v2) -> List[str]:
p_start_state = str(transition.start_state)
p_final_state = str(transition.final_state)
p_access: str = ""
p_in_msg: str = ""
p_event: str = ""
if isinstance(transition.guard, Event):
p_event = str(transition.guard)
else:
if isinstance(transition.guard, BaseAccess.Access_type):
p_access = str(transition.guard)
else:
p_in_msg = str(transition.guard)
p_out_msg: str = ""
p_out_event: str = ""
for out_msg in transition.out_msg:
if p_out_msg != "":
p_out_msg += "; "
if p_out_event != "":
p_out_event += "; "
if isinstance(out_msg, Event):
p_out_event += str(out_msg)
else:
p_out_msg += str(out_msg) + "@" + str(out_msg.get_vc())
p_cond = ""
for cond in transition.dbg_cond_operation():
if p_cond != "":
p_cond += "; "
p_cond += ''.join(cond)
return [p_start_state, p_final_state, p_access, p_in_msg, p_event, p_out_msg, p_out_event, p_cond]
@staticmethod
def ptransaction(transactions):
for transaction in transactions:
ProtoCCTablePrinter().ptransitions(transaction.gettransitions())
def ptransitions(self, transitions):
for transition in transitions:
ProtoCCTablePrinter().ptransition(transition)
self.pdebug('$')
ops = objListToStringList(transition.getoperation())
for entry in ops:
self.pdebug(entry)
self.pdebug()
def pstates(self, states):
for state in states:
self.p_header('$$$$' + state.getstatename())
self.ptransitions(state.gettransitions())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.