content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import torch
from pytext.config import ConfigBase
from pytext.config.component import Component, ComponentType
from pytext.optimizer import Optimizer
from torch.optim.lr_scheduler import (
CosineAnnealingLR as TorchCosineAnnealingLR,
ExponentialLR as TorchExponentialLR,
ReduceLROnPlateau as TorchReduceLROnPlateau,
StepLR as TorchStepLR,
_LRScheduler,
)
class Scheduler(Component):
"""
Schedulers help in adjusting the learning rate during training. Scheduler
is a wrapper class over schedulers which can be available in torch
library or for custom implementations. There are two kinds of lr scheduling
that is supported by this class. Per epoch scheduling and per batch scheduling.
In per epoch scheduling, the learning rate is adjusted at the end of each epoch
and in per batch scheduling the learning rate is adjusted after the forward and
backward pass through one batch during the training.
There are two main methods that needs to be implemented by the Scheduler.
step_epoch() is called at the end of each epoch and step_batch() is called
at the end of each batch in the training data.
prepare() method can be used by BatchSchedulers to initialize any attributes
they may need.
"""
__COMPONENT_TYPE__ = ComponentType.SCHEDULER
__EXPANSIBLE__ = True
class Config(ConfigBase):
pass
def step_batch(self, **kwargs) -> None:
pass
def step_epoch(self, **kwargs) -> None:
pass
def prepare(self, train_iter, total_epochs):
pass
class BatchScheduler(Scheduler):
def prepare(self, train_iter, total_epochs):
self.num_epochs = total_epochs
self.steps_per_epoch = getattr(train_iter, "total_num_batches", None)
class LmFineTuning(_LRScheduler, BatchScheduler):
"""
Fine-tuning methods from the paper
"[arXiv:1801.06146]Universal Language Model Fine-tuning for Text Classification".
Specifically, modifies training schedule using slanted triangular learning rates,
discriminative fine-tuning (per-layer learning rates), and gradual unfreezing.
"""
class Config(Scheduler.Config):
#: The fraction of iterations we increase the learning rate. Default 0.1
cut_frac: float = 0.1
#: How much smaller the lowest LR is from the maximum LR eta_max.
ratio: int = 32
#: Number of param_groups, starting from the
#: end, that were not pretrained. The default value is 2, since the base Model
#: class supplies to the optimizer typically one param_group from the embedding
#: and one param_group from its other components.
non_pretrained_param_groups: int = 2
#: Factor to multiply lr for all pretrained layers by.
lm_lr_multiplier: float = 1.0
#: Whether to make each pretrained layer's lr
#: one-half as large as the next (higher) layer.
lm_use_per_layer_lr: bool = False
#: Whether to unfreeze layers one by one (per epoch).
lm_gradual_unfreezing: bool = True
#: Though the name is `last_epoch`, it means `last batch update`.
#: last_batch_update: = current_epoch_number * num_batches_per_epoch + batch_id
#: after each batch update, it will increment 1
last_epoch: int = -1
def __init__(
self,
optimizer,
cut_frac=0.1,
ratio=32,
non_pretrained_param_groups=2,
lm_lr_multiplier=1.0,
lm_use_per_layer_lr=False,
lm_gradual_unfreezing=True,
last_epoch=-1,
):
assert isinstance(optimizer, torch.optim.Adam)
self.num_epochs = None # to be set later by Trainer
self.steps_per_epoch = None # to be set later by Trainer
self.cut_frac = cut_frac
self.ratio = ratio
self.lm_pretrained_layers = (
len(optimizer.param_groups) - non_pretrained_param_groups
)
assert self.lm_pretrained_layers >= 0
assert non_pretrained_param_groups > 0
self.lm_lr_multiplier = lm_lr_multiplier
self.lm_use_per_layer_lr = lm_use_per_layer_lr
self.lm_gradual_unfreezing = lm_gradual_unfreezing
super(LmFineTuning, self).__init__(optimizer, last_epoch)
@classmethod
def from_config(cls, config: Config, optimizer):
return cls(
optimizer,
config.cut_frac,
config.ratio,
config.non_pretrained_param_groups,
config.lm_lr_multiplier,
config.lm_use_per_layer_lr,
config.lm_gradual_unfreezing,
config.last_epoch,
)
def get_lr(self):
if self.num_epochs is None or self.steps_per_epoch is None:
return [1.0] * len(self.base_lrs)
slanted_multiplier = self._slanted_multiplier()
return [
(
slanted_multiplier
* self._lm_layer_multiplier(i)
* self._lm_frozen_multiplier(i)
* base_lr
)
for i, base_lr in enumerate(self.base_lrs)
]
def _slanted_multiplier(self):
phase_step = self.last_epoch
phase_total_steps = self.num_epochs * self.steps_per_epoch
if phase_step > phase_total_steps:
return 1.0 / self.ratio
if self.lm_gradual_unfreezing:
unfreeze_steps = self.lm_pretrained_layers * self.steps_per_epoch
if self.last_epoch > unfreeze_steps:
phase_step -= unfreeze_steps
phase_total_steps -= unfreeze_steps
else:
phase_step %= self.steps_per_epoch
phase_total_steps = self.steps_per_epoch
cut = math.floor(self.cut_frac * phase_total_steps)
if phase_step < cut:
p = phase_step / cut
else:
p = 1.0 - (phase_step - cut) / (phase_total_steps - cut)
return (1.0 + p * (self.ratio - 1.0)) / self.ratio
def _lm_layer_multiplier(self, layer_index):
multiplier = 1.0
if layer_index < self.lm_pretrained_layers:
multiplier *= self.lm_lr_multiplier
if self.lm_use_per_layer_lr:
multiplier *= 2 ** (layer_index - self.lm_pretrained_layers)
return multiplier
def _lm_frozen_multiplier(self, layer_index):
return 0.0 if self._lm_frozen(layer_index) else 1.0
def _lm_frozen(self, layer_index):
if not self.lm_gradual_unfreezing:
return False
if layer_index >= self.lm_pretrained_layers:
return False
epoch = self.last_epoch / self.steps_per_epoch
return epoch < self.lm_pretrained_layers - layer_index
def step_batch(self, metrics=None, epoch=None):
self.step(epoch)
class StepLR(TorchStepLR, Scheduler):
"""
Wrapper around `torch.optim.lr_scheduler.StepLR`
See the original documentation for more details.
"""
class Config(Scheduler.Config):
#: Period of learning rate decay.
step_size: int = 30
#: Multiplicative factor of learning rate decay.
gamma: float = 0.1
@classmethod
def from_config(cls, config: Config, optimizer):
return cls(optimizer, config.step_size, config.gamma)
def step_epoch(self, metrics=None, epoch=None):
self.step(epoch)
class ReduceLROnPlateau(TorchReduceLROnPlateau, Scheduler):
"""
Wrapper around `torch.optim.lr_scheduler.ReduceLROnPlateau`
See the original documentation for more details.
"""
class Config(Scheduler.Config):
#: This indicates the desirable direction in which we would like the
#: training to proceed. If set to true, learning rate will be reduce
#: when quantity being monitored stops going down
lower_is_better: bool = True
#: Factor by which the learning rate will be reduced. new_lr = lr * factor
factor: float = 0.1
#: Number of epochs with no improvement after which learning rate will
#: be reduced
patience: int = 5
#: Lower bound on the learning rate of all param groups
min_lr: float = 0
#: Threshold for measuring the new optimum, to only focus on significant
#: changes.
threshold: float = 0.0001
#: One of rel, abs.
#: In rel mode, dynamic_threshold = best * ( 1 + threshold ) in ‘max’ mode
#: or best * ( 1 - threshold ) in min mode.
#: In abs mode, dynamic_threshold = best + threshold in max mode or
#: best - threshold in min mode.
threshold_is_absolute: bool = True
#: Number of epochs to wait before resuming normal operation after
#: lr has been reduced.
cooldown: int = 0
@classmethod
def from_config(cls, config: Config, optimizer: Optimizer):
return cls(
optimizer,
mode="min" if config.lower_is_better else "max",
factor=config.factor,
patience=config.patience,
min_lr=config.min_lr,
threshold=config.threshold,
threshold_mode=("abs" if config.threshold_is_absolute else "rel"),
cooldown=config.cooldown,
)
def step_epoch(self, metrics, epoch):
self.step(metrics, epoch)
class CosineAnnealingLR(TorchCosineAnnealingLR, BatchScheduler):
"""
Wrapper around `torch.optim.lr_scheduler.CosineAnnealingLR`
See the original documentation for more details.
"""
class Config(Scheduler.Config):
#: Maximum number of iterations.
t_max: int = 1000
#: Minimum learning rate
eta_min: float = 0
@classmethod
def from_config(cls, config: Config, optimizer: Optimizer):
return cls(optimizer, config.t_max, config.eta_min)
def step_batch(self, metrics=None, epoch=None):
self.step(epoch)
class ExponentialLR(TorchExponentialLR, Scheduler):
"""
Wrapper around `torch.optim.lr_scheduler.ExponentialLR`
See the original documentation for more details.
"""
class Config(Scheduler.Config):
#: Multiplicative factor of learning rate decay.
gamma: float = 0.1
@classmethod
def from_config(cls, config: Config, optimizer: Optimizer):
return cls(optimizer, config.gamma)
def step_epoch(self, metrics=None, epoch=None):
self.step(epoch)
class WarmupScheduler(_LRScheduler, BatchScheduler):
"""
Scheduler to linearly increase learning rate from 0 to final value at the beginning
of training.
"""
class Config(BatchScheduler.Config):
#: number of training steps over which to increase learning rate
warmup_steps: int = 10000
@classmethod
def from_config(cls, config: Config, optimizer: Optimizer):
return cls(optimizer, config.warmup_steps)
def __init__(self, optimizer, warmup_steps):
assert warmup_steps > 0
self.warmup_steps = warmup_steps
self.current_steps = 0
super().__init__(optimizer)
def prepare(self, train_iter, total_epochs):
super().prepare(train_iter, total_epochs)
self.step_batch() # initialize learning rate
def step_batch(self):
self.current_steps += 1
self.step()
def get_lr(self):
if self.current_steps >= self.warmup_steps:
lr_multiplier = 1.0
else:
lr_multiplier = self.current_steps / self.warmup_steps
return [lr_multiplier * base_lr for base_lr in self.base_lrs]
class PolynomialDecayScheduler(_LRScheduler, BatchScheduler):
"""
Applies a polynomial decay with lr warmup to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This scheduler linearly increase learning rate from 0 to final value at the
beginning of training, determined by warmup_steps.
Then it applies a polynomial decay function to an optimizer step, given a
provided `base_lrs` to reach an `end_learning_rate` after `total_steps`.
"""
class Config(BatchScheduler.Config):
#: number of training steps over which to increase learning rate
warmup_steps: int = 0
#: number of training steps for learning rate decay
total_steps: int
#: end learning rate after `total_steps` of training
end_learning_rate: float
#: power used for polynomial decay calculation
power: float = 1.0
@classmethod
def from_config(cls, config: Config, optimizer: Optimizer):
return cls(
optimizer,
config.warmup_steps,
config.total_steps,
config.end_learning_rate,
config.power,
)
def __init__(self, optimizer, warmup_steps, total_steps, end_learning_rate, power):
assert total_steps > warmup_steps >= 0
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.current_steps = 0
super().__init__(optimizer)
def prepare(self, train_iter, total_epochs):
super().prepare(train_iter, total_epochs)
self.step_batch() # initialize learning rate
def get_lr(self):
if self.current_steps <= self.warmup_steps:
# during warmup the learning rate linearly increases until
# it reaches base_lr.
warmup_factor = self.current_steps / self.warmup_steps
lrs = [warmup_factor * base_lr for base_lr in self.base_lrs]
elif self.current_steps <= self.total_steps:
# start polynomial weight decay until it reaches end_learning_rate
decay_factor = (
1
- (self.current_steps - self.warmup_steps)
/ (self.total_steps - self.warmup_steps)
) ** self.power
lrs = [
(base_lr - self.end_learning_rate) * decay_factor
+ self.end_learning_rate
for base_lr in self.base_lrs
]
else:
# reach end_learning_rate after total_steps
lrs = [self.end_learning_rate for _ in self.base_lrs]
return lrs
def step_batch(self):
self.current_steps += 1
# update optimizer.param_groups's learning rate
self.step()
|
import os
from functools import namedtuple
import dgl
import dgl.function as fn
import numpy as np
import torch
from dgl.data import PPIDataset
from ogb.nodeproppred import DglNodePropPredDataset, Evaluator
from sklearn.metrics import accuracy_score, f1_score
import scipy.sparse as sp
import json
from networkx.readwrite import json_graph
from utils import load_dgl_graph
def get_ogb_evaluator(dataset):
"""
Get evaluator from Open Graph Benchmark based on dataset
"""
evaluator = Evaluator(name=dataset)
return lambda preds, labels: evaluator.eval({
"y_true": labels.view(-1, 1),
"y_pred": preds.view(-1, 1),
})["acc"]
class ACCEvaluator(object):
def __init__(self):
pass
def __call__(self, y_pred, y_true):
return accuracy_score(y_true.cpu(), y_pred.cpu())
class F1Evaluator(object):
def __init__(self, average='micro'):
self.average = average
pass
def __call__(self, y_pred, y_true):
return f1_score(y_true.cpu(), y_pred.cpu(), average=self.average)
def get_evaluator(name):
if name in ["cora"]:
evaluator = ACCEvaluator()
elif name in ["yelp", "ppi", "ppi_large", "reddit", "flickr"]:
evaluator = F1Evaluator(average="micro")
else:
evaluator = get_ogb_evaluator(name)
return evaluator
def load_dataset(device, args):
"""
Load dataset and move graph and features to device
"""
if args.dataset in ["reddit", "cora", "ppi", "ppi_large", "yelp", "flickr"]:
# raise RuntimeError("Dataset {} is not supported".format(name))
if args.dataset == "reddit":
from dgl.data import RedditDataset
data = RedditDataset(self_loop=True)
g = data[0]
g = dgl.add_self_loop(g)
n_classes = data.num_classes
elif args.dataset == "cora":
from dgl.data import CitationGraphDataset
data = CitationGraphDataset('cora', raw_dir=os.path.join(args.data_dir, 'cora'))
g = data[0]
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_classes = data.num_classes
elif args.dataset == "ppi":
data = load_ppi_data(args.data_dir)
g = data.g
n_classes = data.num_classes
elif args.dataset == "ppi_large":
data = load_ppi_large_data()
g = data.g
n_classes = data.num_classes
elif args.dataset == "yelp":
from torch_geometric.datasets import Yelp
pyg_data = Yelp(os.path.join(args.data_dir, 'yelp'))[0]
feat = pyg_data.x
labels = pyg_data.y
u, v = pyg_data.edge_index
g = dgl.graph((u, v))
g.ndata['feat'] = feat
g.ndata['label'] = labels
g.ndata['train_mask'] = pyg_data.train_mask
g.ndata['val_mask'] = pyg_data.val_mask
g.ndata['test_mask'] = pyg_data.test_mask
n_classes = labels.size(1)
elif args.dataset == "flickr":
from torch_geometric.datasets import Flickr
pyg_data = Flickr(os.path.join(args.data_dir, "flickr"))[0]
feat = pyg_data.x
labels = pyg_data.y
# labels = torch.argmax(labels, dim=1)
u, v = pyg_data.edge_index
g = dgl.graph((u, v))
g.ndata['feat'] = feat
g.ndata['label'] = labels
g.ndata['train_mask'] = pyg_data.train_mask
g.ndata['val_mask'] = pyg_data.val_mask
g.ndata['test_mask'] = pyg_data.test_mask
n_classes = labels.max().item() + 1
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
train_nid = train_mask.nonzero().squeeze().long()
val_nid = val_mask.nonzero().squeeze().long()
test_nid = test_mask.nonzero().squeeze().long()
g = g.to(device)
labels = g.ndata['label']
else:
if args.dataset == 'maxp':
g, labels, train_nid, val_nid, test_nid, node_feat = load_dgl_graph('../../dataset')
g = dgl.to_bidirected(g, copy_ndata=True)
g = dgl.add_self_loop(g)
print("Use node2vec embedding...", flush=True)
emb = torch.load('../../dataset/emb.pt', map_location='cpu')
emb.requires_grad = False
node_feat = torch.cat([node_feat, emb], dim=1)
g.ndata["feat"] = node_feat
train_nid = torch.from_numpy(train_nid)
val_nid = torch.from_numpy(val_nid)
test_nid = torch.from_numpy(test_nid)
n_classes = 23
g = g.to(device)
else:
dataset = DglNodePropPredDataset(name=args.dataset, root=args.data_dir)
splitted_idx = dataset.get_idx_split()
train_nid = splitted_idx["train"]
val_nid = splitted_idx["valid"]
test_nid = splitted_idx["test"]
g, labels = dataset[0]
n_classes = dataset.num_classes
g = g.to(device)
if args.dataset == "ogbn-arxiv":
g = dgl.add_reverse_edges(g, copy_ndata=True)
g = dgl.add_self_loop(g)
g.ndata['feat'] = g.ndata['feat'].float()
elif args.dataset == "ogbn-papers100M":
g = dgl.add_reverse_edges(g, copy_ndata=True)
g.ndata['feat'] = g.ndata['feat'].float()
labels = labels.long()
elif args.dataset == "ogbn-mag":
# MAG is a heterogeneous graph. The task is to make prediction for
# paper nodes
path = os.path.join(args.emb_path, f"{args.pretrain_model}_mag")
labels = labels["paper"]
train_nid = train_nid["paper"]
val_nid = val_nid["paper"]
test_nid = test_nid["paper"]
features = g.nodes['paper'].data['feat']
author_emb = torch.load(os.path.join(path, "author.pt"), map_location=torch.device("cpu")).float()
topic_emb = torch.load(os.path.join(path, "field_of_study.pt"), map_location=torch.device("cpu")).float()
institution_emb = torch.load(os.path.join(path, "institution.pt"), map_location=torch.device("cpu")).float()
g.nodes["author"].data["feat"] = author_emb.to(device)
g.nodes["institution"].data["feat"] = institution_emb.to(device)
g.nodes["field_of_study"].data["feat"] = topic_emb.to(device)
g.nodes["paper"].data["feat"] = features.to(device)
paper_dim = g.nodes["paper"].data["feat"].shape[1]
author_dim = g.nodes["author"].data["feat"].shape[1]
if paper_dim != author_dim:
paper_feat = g.nodes["paper"].data.pop("feat")
rand_weight = torch.Tensor(paper_dim, author_dim).uniform_(-0.5, 0.5)
g.nodes["paper"].data["feat"] = torch.matmul(paper_feat, rand_weight.to(device))
print(f"Randomly project paper feature from dimension {paper_dim} to {author_dim}", flush=True)
labels = labels.to(device).squeeze()
n_classes = int(labels.max() - labels.min()) + 1
else:
g.ndata['feat'] = g.ndata['feat'].float()
labels = labels.squeeze()
if args.dataset == 'maxp':
evaluator = None
else:
evaluator = get_evaluator(args.dataset)
print(f"# Nodes: {g.number_of_nodes()}\n"
f"# Edges: {g.number_of_edges()}\n"
f"# Train: {len(train_nid)}\n"
f"# Val: {len(val_nid)}\n"
f"# Test: {len(test_nid)}\n"
f"# Classes: {n_classes}", flush=True)
return g, labels, n_classes, train_nid, val_nid, test_nid, evaluator
def load_ppi_data(root):
DataType = namedtuple('Dataset', ['num_classes', 'g'])
adj_full = sp.load_npz(os.path.join(root, 'ppi', 'adj_full.npz'))
G = dgl.from_scipy(adj_full)
nodes_num = G.num_nodes()
role = json.load(open(os.path.join(root, 'ppi', 'role.json'), 'r'))
tr = list(role['tr'])
te = list(role['te'])
va = list(role['va'])
mask = np.zeros((nodes_num,), dtype=bool)
train_mask = mask.copy()
train_mask[tr] = True
val_mask = mask.copy()
val_mask[va] = True
test_mask = mask.copy()
test_mask[te] = True
G.ndata['train_mask'] = torch.tensor(train_mask, dtype=torch.bool)
G.ndata['val_mask'] = torch.tensor(val_mask, dtype=torch.bool)
G.ndata['test_mask'] = torch.tensor(test_mask, dtype=torch.bool)
feats = np.load(os.path.join(root, 'ppi', 'feats.npy'))
G.ndata['feat'] = torch.tensor(feats, dtype=torch.float)
class_map = json.load(open(os.path.join(root, 'ppi', 'class_map.json'), 'r'))
labels = np.array([class_map[str(i)] for i in range(nodes_num)])
G.ndata['label'] = torch.tensor(labels, dtype=torch.float)
data = DataType(g=G, num_classes=labels.shape[1])
return data
def load_ppi_large_data():
'''Wraps the dgl's load_data utility to handle ppi special case'''
DataType = namedtuple('Dataset', ['num_classes', 'g'])
train_dataset = PPIDataset('train')
train_graph = dgl.batch([train_dataset[i] for i in range(len(train_dataset))], edge_attrs=None, node_attrs=None)
val_dataset = PPIDataset('valid')
val_graph = dgl.batch([val_dataset[i] for i in range(len(val_dataset))], edge_attrs=None, node_attrs=None)
test_dataset = PPIDataset('test')
test_graph = dgl.batch([test_dataset[i] for i in range(len(test_dataset))], edge_attrs=None, node_attrs=None)
G = dgl.batch(
[train_graph, val_graph, test_graph], edge_attrs=None, node_attrs=None)
train_nodes_num = train_graph.number_of_nodes()
test_nodes_num = test_graph.number_of_nodes()
val_nodes_num = val_graph.number_of_nodes()
nodes_num = G.number_of_nodes()
assert(nodes_num == (train_nodes_num + test_nodes_num + val_nodes_num))
# construct mask
mask = np.zeros((nodes_num,), dtype=bool)
train_mask = mask.copy()
train_mask[:train_nodes_num] = True
val_mask = mask.copy()
val_mask[train_nodes_num:-test_nodes_num] = True
test_mask = mask.copy()
test_mask[-test_nodes_num:] = True
G.ndata['train_mask'] = torch.tensor(train_mask, dtype=torch.bool)
G.ndata['val_mask'] = torch.tensor(val_mask, dtype=torch.bool)
G.ndata['test_mask'] = torch.tensor(test_mask, dtype=torch.bool)
data = DataType(g=G, num_classes=train_dataset.num_labels)
return data
|
import pytest
from pybetter.cli import process_file
from pybetter.improvements import FixMutableDefaultArgs
NO_CHANGES_MADE = None
# In these samples we do not use indents for formatting,
# since this transformer uses module's inferred indentation
# settings, and those will be like _8_ spaces or such.
NON_MUTABLE_DEFAULTS_IGNORED = (
"""
def f(a=None, b=frozenset(), c=42):
pass
""",
NO_CHANGES_MADE,
)
EMPTY_MUTABLE_DEFAULT_EXTRACTED = (
"""
def f(a=[]):
pass
""",
"""
def f(a=None):
if a is None:
a = []
pass
""",
)
NONEMPTY_MUTABLE_DEFAULT_EXTRACTED = (
"""
def f(a=[42]):
pass
""",
"""
def f(a=None):
if a is None:
a = [42]
pass
""",
)
NESTED_FUNCTIONS_ARE_PROCESSED = (
"""
def outer(a=[53]):
def inner(b=[42]):
pass
""",
"""
def outer(a=None):
if a is None:
a = [53]
def inner(b=None):
if b is None:
b = [42]
pass
""",
)
ARGUMENT_ORDER_PRESERVED = (
"""
def outer(b=[], a={}):
pass
""",
"""
def outer(b=None, a=None):
if b is None:
b = []
if a is None:
a = {}
pass
""",
)
@pytest.mark.parametrize(
"original,expected",
[
NON_MUTABLE_DEFAULTS_IGNORED,
EMPTY_MUTABLE_DEFAULT_EXTRACTED,
NONEMPTY_MUTABLE_DEFAULT_EXTRACTED,
NESTED_FUNCTIONS_ARE_PROCESSED,
ARGUMENT_ORDER_PRESERVED,
],
ids=[
"non-mutable defaults are ignored",
"empty mutable default is extracted",
"non-empty mutable default",
"nested functions with defaults",
"argument order is preserved",
],
)
def test_mutable_defaults_extraction(original, expected):
processed, _ = process_file(original.strip(), [FixMutableDefaultArgs])
assert processed.strip() == (expected or original).strip()
|
# The goal is to take several SQL queries, completely similar except
# the values, and generalise by doing the inverse operation of binding
# variables to a possibly prepared query.
#
# The results are sets of variables which represent the application data.
# If we can deduce the most important variables: Indexed columns,
# target of "where id=??" statements, we know what are the application
# unique ids, also called "case ids" in process mining.
#
# Some heuristics to differentiate the ids for other values:
# - Column name like "IDxxx"
# - First column of a row.
# - First or unique column of a WHERE statement.
# - If number, uniform distribution of values
#
#
# If suspected identifier is alphanumeric:
# - identical size of all values.
# - non-spellable word, does not exist in any dictionary.
# - same layout of letters-digits for all ids.
#
import sys
import numpy
class induction:
def __init__(self):
self.m_list_samples = []
self.m_map_token_to_index = dict()
@staticmethod
def tokenize(sample):
return []
#Non: Ce qui caracterise un cluster, c est la substitution.
#La "moyenne" de deux queries, ce sont les tokens identiaues tandis que les
#differents sont remplaces par des "variables" (Ou bien on garde les variables deja la).
# Certains tokens peuvent etre substitues plus facilement que d'autres: Chaines (encadrees
# par des guillemets), et nombres.
# On aura peut-etre plus vite fait de remplacer tout de suite.
# This is a kind of Hamming distance.
@staticmethod
def query_distance(sam1,sam2):
len1 = len(sam1)
len2 = len(sam2)
if len1 <= len2:
minLen = len1
deltaLen = len2 - len1
else:
minLen = len2
deltaLen = len1 - len2
median = []
idx = 0
numSubsts = deltaLen
while idx < minLen:
elt1 = sam1[idx]
elt2 = sam2[idx]
if elt1 != elt2:
median[idx] = 'any letter'
if isinstance( elt1, str) or isinstance( elt2, str):
numSubsts +=1
else:
median[idx] = elt1
# Si longueurs differentes
numSubsts += 100 * deltaLen
return numSubsts,median
# Comment et pourquoi clusteriser les buffers (aka echantillons) ?
# On veut les repartir en classes homogenes pour separar les donnees d'une part, du code d'autre part.
# Quand on ajoute un echantillon, on calcule sa "moyenne" avec tous les autres echantillons.
# Cette "moyenne" represente les tokens communs entre deux echantillons: C'est une generalisation.
# On a une map des "moyennes" et des echantillons qui produisent cette moyenne:
# Echantillons A et B, operation "moyenne": *
# Si M = A * B, alors M = A * M = M * A = B * M = M * B.
# Le match renvoie aussi le nombre de substitutions necessaires.
# Les moyennes a garder sont celles qui ont beaucoup de participants pour le plus faible nombre de substitutions.
# En effet, s'il faut tout substituer, ca ne vaut pas le coup.
# On pourrait confronter un echantillon avec une "moyenne" mais ca peut fort bien creer une nouvelle "moyenne"
# s'il faut substituer de nouvelles variables/
# Cette nouvelle "moyenne" va recevoir aussi tous les echantillons de l'ancienne mais avec un nombre de substitutions plus grand
# car il y a davantage de substitutions.
# Chaque "moyenne" stocke une liste de ( nombre de substitutions, echantillon )
# On ne stocke pas si le nombre de substitutions est superieur a un seuil (Ex: 50% de la longueur).
# Quand nouvel echantillon, on compare en premier avec les moyennes.
# Si aucune ne donne un resultat "satisfaisant", comparer avec les echantillons de la "moyenne"
# donnant le meilleur resultat ?
# Ou bien: Au debut chaque echantillon est sa propre "moyenne" et au fur et a mesure, on fusionne ?
# Eventuellement, ranger les echantillons dans un arbre dont les feuilles sont les echantillons,
# et les noeuds intermediaires, les "moyennes".
# Quand un nouvel echantillon arrive, on parcourt l'arbre en largeur d'abord.
# On s'arrete au meilleur score et eventuellement, on insere un noeud intermediaire ???
# Des qu'il y a plus de deux echantillons dans la meme moyenne, on supprime les echantillons,
# on arrete de les stocker ???
def token_to_index(self,token):
pass
def add_sample(self,sample):
lstToks = induction.tokenize(sample)
lstIndices = [ self.token_to_index(tok) for tok in lstToks ]
self.m_list_samples.append( induction.tokenize(lstIndices) )
def clusterize(self):
num_tokens = len(self.m_map_token_to_index)
sys.stdout.write("num_tokens=%d\n"%num_tokens)
distMatrix = numpy.zeros( (num_tokens,num_tokens,),dtype=int)
idx1 = 0
while idx1 < num_tokens:
sam1 = self.m_list_samples[idx1]
distMatrix[idx1][idx1] = 0.0
idx2 = idx1 + 1
while idx2 < num_tokens:
sam2 = self.m_list_samples[idx2]
dst = induction.query_distance( sam1, sam2 )
distMatrix[idx1][idx2] = dst
distMatrix[idx2][idx1] = dst
idx2 += 1
idx1 += 1
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
def get_clusters(self):
pass
# Si periodiques:
# Matrice de Markov.
# Detecter les etats qui ont la meme periode
# Reperer la premiere sequence complete.
# On doit avoir les memes regles de tokenisation.
def TestInduction():
tstData = [
"insert into table1 values('id1',11,'aa')",
"insert into table1 values('id2',22,'bb')",
"insert into table1 values('id3',33,'cc')",
"insert into table1 values('id4',44,'dd')",
"update table2 set age=11 where name='id1'",
"update table2 set age=22 where name='id2'",
"update table2 set age=33 where name='id3'",
"update table2 set age=44 where name='id4'",
]
induc = induction()
for tstStr in tstData:
induc.add_sample(tstStr)
induc.clusterize()
# It should be modular enough so that he creation of CIM entites could be in a separate HTTP server,
# and this could work on tcpdump socket content.
# At the moment, we are planning to detect the file type,
# and then to extract the case ids, and leave the "skeleton" of queries.
# In fact these "skeletons" are much more characteristic of the type of the stream.
if __name__ == '__main__':
TestInduction()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import time
import json
from base64 import b64encode
import requests
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
RSA_PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDC7kw8r6tq43pwApYvkJ5lalja
N9BZb21TAIfT/vexbobzH7Q8SUdP5uDPXEBKzOjx2L28y7Xs1d9v3tdPfKI2LR7P
AzWBmDMn8riHrDDNpUpJnlAGUqJG9ooPn8j7YNpcxCa1iybOlc2kEhmJn5uwoanQ
q+CA6agNkqly2H4j6wIDAQAB
-----END PUBLIC KEY-----"""
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
def encrypt_pwd(password, public_key=RSA_PUBLIC_KEY):
rsa_key = RSA.importKey(public_key)
encryptor = Cipher_pkcs1_v1_5.new(rsa_key)
cipher = b64encode(encryptor.encrypt(password.encode('utf-8')))
return cipher.decode('utf-8')
def response_status(resp):
if resp.status_code != requests.codes.OK:
print('Status: %u, Url: %s' % (resp.status_code, resp.url))
return False
return True
def get_current_time():
return time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime())
def open_image(image_file):
if os.name == "nt":
os.system('start ' + image_file) # for Windows
else:
if os.uname()[0] == "Linux":
os.system("eog " + image_file) # for Linux
else:
os.system("open " + image_file) # for Mac
def save_image(resp, image_file):
with open(image_file, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
f.write(chunk)
def parse_json(s):
begin = s.find('{')
end = s.rfind('}') + 1
return json.loads(s[begin:end])
def get_tag_value(tag, key='', index=0):
if key:
value = tag[index].get(key)
else:
value = tag[index].text
return value.strip(' \t\r\n')
def parse_items_dict(d):
result = ''
for index, key in enumerate(d):
if index < len(d) - 1:
result = result + '{0} x {1}, '.format(key, d[key])
else:
result = result + '{0} x {1}'.format(key, d[key])
return result
|
#! /usr/bin/env python3
from datetime import datetime
from sys import argv, exit
from time import sleep
from json import load
from notify import notify
## Import Site-Specific Checkers
from websites.newegg import check as newegg
from websites.bestbuy import check as bestbuy
CHECK = {
'newegg': newegg,
'bestbuy': bestbuy,
}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if __name__ == "__main__":
with open('catalog.json') as catalog_file:
catalog = load(catalog_file)
while True:
for item in catalog:
print(f"{bcolors.HEADER}{str(datetime.now())}{bcolors.ENDC} || {bcolors.OKBLUE}[[ {item['website']} ]]{bcolors.ENDC} {bcolors.BOLD}{item['name']}{bcolors.ENDC} :: ", end="")
try:
if CHECK[item['website']](item):
print(f"{bcolors.OKCYAN}available! notifying now{bcolors.ENDC}")
notify(item['name'], item['website'], item['url'])
else:
print(f"{bcolors.FAIL}not available{bcolors.ENDC}")
except Exception:
print(f"{bcolors.WARNING}CAPTCHA{bcolors.ENDC}")
sleep(10)
|
from tornado import web
from controllers._base import BaseController
class HomeController(BaseController):
@web.authenticated
async def get(self):
self.renderTemplate('home.html')
@web.authenticated
async def post(self):
self.renderJSON({'user': self.current_user.toDict()})
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for leaks functions."""
import os
import shutil
import tempfile
import unittest
import mock
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.fuzzing import leak_blacklist
from clusterfuzz._internal.tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class LeaksTest(unittest.TestCase):
"""Base class for leaks test cases."""
def setUp(self):
self.data_directory = os.path.join(
os.path.dirname(__file__), 'leak_blacklist_data')
self.temp_directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_directory)
def _read_test_data(self, filename):
"""Helper function to read files."""
with open(os.path.join(self.data_directory, filename)) as file_handle:
return file_handle.read()
def _add_dummy_leak_testcase(self):
"""Helper function to add a dummy testcase to Testcase database."""
testcase_item = data_types.Testcase(
crash_type='Direct-leak', crash_state='test_foo\ntest_bar\n')
testcase_item.put()
return testcase_item
def test_single_leak(self):
"""Test highlighting for report with single direct leak."""
data = self._read_test_data('single_direct_leak.txt')
actual_data = leak_blacklist.highlight_first_direct_leak(data)
expected_data = data
self.assertEqual(expected_data, actual_data)
def test_indirect_before_direct_leak(self):
"""Test highlighting when indirect leak precedes first direct leak."""
data = self._read_test_data('indirect_before_direct_leak.txt')
actual_data = leak_blacklist.highlight_first_direct_leak(data)
expected_data = self._read_test_data(
'indirect_before_direct_leak_highlighted.txt')
self.assertEqual(expected_data, actual_data)
def test_multi_direct_leaks(self):
"""Test highlighting for report with multiple direct leaks."""
data = self._read_test_data('multi_direct_leak.txt')
actual_data = leak_blacklist.highlight_first_direct_leak(data)
expected_data = self._read_test_data('multi_direct_leak_expected.txt')
self.assertEqual(expected_data, actual_data)
def test_add_to_global_blacklist(self):
"""Test adding element to global blacklist."""
testcase = self._add_dummy_leak_testcase()
blacklist_item = leak_blacklist.add_crash_to_global_blacklist_if_needed(
testcase)
self.assertTrue(blacklist_item.key.get())
@mock.patch(
'clusterfuzz._internal.fuzzing.leak_blacklist.get_local_blacklist_file_path'
)
def test_copy_global_to_local_blacklist(self,
mock_get_local_blacklist_file_path):
"""Test copying of global to local blacklist."""
local_blacklist_file_path = os.path.join(self.temp_directory,
'lsan_suppressions.txt')
mock_get_local_blacklist_file_path.return_value = local_blacklist_file_path
testcase = self._add_dummy_leak_testcase()
blacklist_item = leak_blacklist.add_crash_to_global_blacklist_if_needed(
testcase)
self.assertTrue(blacklist_item.key.get())
# Test that a reproducible leak gets copied to local blacklist file.
leak_blacklist.copy_global_to_local_blacklist()
blacklist_function = leak_blacklist.get_leak_function_for_blacklist(
testcase)
expected_lsan_suppression_line = (
leak_blacklist.LSAN_SUPPRESSION_LINE.format(function=blacklist_function)
)
self.assertTrue(os.path.isfile(local_blacklist_file_path))
self.assertIn(expected_lsan_suppression_line,
self._read_test_data(local_blacklist_file_path))
# Test that an excluded reproducible leak is not copied to blacklist file.
leak_blacklist.copy_global_to_local_blacklist(excluded_testcase=testcase)
self.assertTrue(os.path.isfile(local_blacklist_file_path))
self.assertNotIn(expected_lsan_suppression_line,
self._read_test_data(local_blacklist_file_path))
def test_clean_up_global_blacklist(self):
"""Test cleaning of global blacklist."""
# Test that a reproducible leak is not cleared from blacklist cleanup.
testcase = self._add_dummy_leak_testcase()
blacklist_item = leak_blacklist.add_crash_to_global_blacklist_if_needed(
testcase)
leak_blacklist.cleanup_global_blacklist()
self.assertTrue(blacklist_item.key.get())
# Test that an unreproducible leak is cleared from blacklist cleanup.
testcase.one_time_crasher_flag = True
testcase.put()
leak_blacklist.cleanup_global_blacklist()
self.assertFalse(blacklist_item.key.get())
# Flip reproducibility flag and verify that testcase is in blacklist.
testcase.one_time_crasher_flag = False
testcase.put()
blacklist_item = leak_blacklist.add_crash_to_global_blacklist_if_needed(
testcase)
self.assertTrue(blacklist_item.key.get())
# Delete testcase and make sure it is removed from blacklist.
testcase.key.delete()
leak_blacklist.cleanup_global_blacklist()
self.assertFalse(blacklist_item.key.get())
if __name__ == '__main__':
unittest.main()
|
from bitmovin_api_sdk.encoding.infrastructure.kubernetes.agent_deployment.agent_deployment_api import AgentDeploymentApi
|
#
# Copyright 2015-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from collector import (
app,
db,
report_handler,)
from flask import current_app
classification = 'classification'
severity = 'severity'
kernel_version = 'kernel_version'
record_version = 'record_format_version'
machine_id = 'machine_id'
host_type = 'host_type'
arch = 'arch'
build = 'build'
timestamp = 'creation_timestamp'
tid = 'X-Telemetry-Tid'
board_name = 'Board-Name'
cpu_model = 'Cpu-Model'
bios_version = 'Bios-Version'
system_name = 'System-Name'
payload_version = 'Payload-Format-Version'
event_id = 'Event-Id'
REQUIRED_HEADERS_V1 = (
'Arch',
'Build',
'Creation-Timestamp',
'Classification',
'Host-Type',
'Kernel-Version',
'Machine-Id',
'Severity',
'Record-Format-Version',
)
def get_record_v1():
return {
arch: 'x86_64',
build: '550',
timestamp: 1483232401,
classification: 'a/b/c',
host_type: 'LenovoT20',
kernel_version: '3.16.4-123.generic',
machine_id: '1234',
severity: 2,
record_version: 1,
}
def get_record_v2():
v2 = get_record_v1()
v2.update({
record_version: 2,
tid: '6907c830-eed9-4ce9-81ae-76daf8d88f0f',
system_name: 'clear-linux-os',
payload_version: 1
})
return v2
def get_record_v3():
v3 = get_record_v2()
v3.update({
record_version: 3,
board_name: 'D54250WYK|Intel Corporation',
cpu_model: 'Intel(R) Core(TM) i5-4250U CPU @ 1.30GHz',
bios_version: 'WYLPT10H.86A.0041.2015.0720.1108',
})
return v3
def get_record_v4():
v4 = get_record_v3()
v4.update({
record_version: 4,
event_id: '39cc109a1079df96376693ebc7a0f632',
})
return v4
def get_record():
return {
"X-Telemetry-TID": "6907c830-eed9-4ce9-81ae-76daf8d88f0f",
"record_format_version": "2",
"severity": "1",
"classification": "org.clearlinux/hello/world",
"machine_id": "clr-linux-avj01",
"creation_timestamp": "1505235249",
"arch": "x86_64",
"host_type": "blank|blank|blank",
"kernel_version": "4.12.5-374.native",
"system_name": "clear-linux-os",
"build": "17700",
"payload_format_version": "1",
"board_name": "D54250WYK|Intel Corporation",
"cpu_model": "Intel(R) Core(TM) i5-4250U CPU @ 1.30GHz",
"bios_version": "WYLPT10H.86A.0041.2015.0720.1108"
}
class RecordTestCases(unittest.TestCase):
""" Generic object for telemetry record tests """
def setUp(self):
app.testing = True
app.config.from_object('collector.config_local.Testing')
app.debug = False
self.app_context = app.app_context()
self.app_context.push()
db.init_app(current_app)
db.create_all()
self.client = app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def missing_header(self, header, header_name):
headers = self.get_version_records()
del headers[header_name]
response = self.client.post('/', headers=headers, data='test')
self.assertTrue(response.status_code == 400)
|
import requests
from datetime import datetime, timedelta
from pytz import timezone
from source.classes import Channel, Program
from source.utils import get_epg_datetime
PROGRAMS_URL = "https://nwapi.nhk.jp/nhkworld/epg/v7b/world/s{start}-e{end}.json"
def get_all_channels(): # Hardcode since we are only dealing with one channel
return [Channel("nhk", "nhk.Jp", "NHK World Japan", "")]
def get_programs_by_channel(channel_name, *args):
days = args[0] if args else 1
days = 7 if days > 7 else days
start_temp = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = int(start_temp.timestamp() * 1000)
end_temp = start_temp + timedelta(days=days)
end_time = int(end_temp.timestamp() * 1000)
url = PROGRAMS_URL.format(start=start_time, end=end_time)
try:
r = requests.get(url)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if r.status_code != 200:
raise Exception(r.raise_for_status())
output = r.json()["channel"]["item"]
programs = []
for program in output:
start_timestamp = int(int(program["pubDate"]) / 1000)
start_program = datetime.fromtimestamp(
start_timestamp, timezone("UTC"))
end_timestamp = int(int(program["endDate"]) / 1000)
end_program = datetime.fromtimestamp(end_timestamp, timezone("UTC"))
obj = Program(
get_all_channels()[0].tvg_id,
program["title"],
program["description"],
get_epg_datetime(start_program),
get_epg_datetime(end_program),
""
)
programs.append(obj)
return programs
|
# NASA EO-Metadata-Tools Python interface for the Common Metadata Repository (CMR)
#
# https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html
#
# Copyright (c) 2020 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Test cases for the cmr.auth package
Author: thomas.a.cherry@nasa.gov - NASA
Created: 2020-10-15
"""
#from unittest.mock import Mock
from unittest.mock import patch
import unittest
import os
import uuid
import cmr.util.common as com
# ******************************************************************************
class TestSearch(unittest.TestCase):
"""Test suit for Search API"""
# **********************************************************************
# Util methods
# **********************************************************************
# Tests
def test_conj(self):
"""Test the conj function"""
self.assertEqual([3, 4], com.conj(None, [3, 4]), 'src was None')
self.assertEqual([1, 2, 3, 4], com.conj([1, 2], [3, 4]), 'good src, lists')
self.assertEqual((4, 3, 1, 2), com.conj((1, 2), (3, 4)), 'good src, tuples')
self.assertEqual({'a': 'A', 'b': 'B'}, com.conj({'a':'A'}, {'b':'B'}), 'good src, dict')
def test_always(self):
"""Test the always function"""
self.assertEqual({}, com.always("wrong type"), 'wrong thing')
self.assertEqual({}, com.always([]), 'wrong type')
self.assertEqual({}, com.always({}), 'same type')
self.assertEqual({'a':'b'}, com.always({'a':'b'}), 'populated dict, assumed')
self.assertEqual({'a':'b'}, com.always({'a':'b'}, otype=dict), 'populated dict')
self.assertEqual(['a', 'b'], com.always(['a','b'], otype=list), 'populated list')
self.assertEqual((1,2,3), com.always((1,2,3), otype=tuple), 'populated tuple')
self.assertEqual((1,2,3), com.always((1,2,3), tuple), 'populated tuple, positional')
# None use cases
self.assertEqual({}, com.always(None), 'assumed, none, dict')
self.assertEqual({}, com.always(None, otype=dict), 'None, dict')
self.assertEqual([], com.always(None, otype=list), 'None, list')
self.assertEqual((), com.always(None, otype=tuple), 'None, tuple')
self.assertEqual((), com.always(None, tuple), 'None, tuple, positional')
def test_drop_key_safely(self):
"""Test that values can be dropped safely"""
# pylint: disable=C0301 # lambdas must be on one line
tester = lambda expected, src, key, msg : self.assertEqual(expected, com.drop_key_safely(src, key), msg)
tester({}, {}, "Not existing", "Empty dictionary")
tester({"key":"value"}, {"key": "value"}, "not found", "wrong key, no drop")
tester({}, {"key":"value"}, "key", "drop found key")
def test_write_read_round_trip(self):
"""
Test the read and write functions by doing a full round trip test. Save
some text to a temp file, then read it back, testing both functions at once
"""
path = "/tmp/" + str(uuid.uuid4())
expected = str(uuid.uuid4())
com.write_file(path, expected)
actual = com.read_file(path)
os.remove(path) # cleanup now
self.assertEqual(expected, actual, "Write-Read round trip")
def test_execute_command(self):
"""Execute will run any command, test that it behaves as expected"""
# pylint: disable=C0301 # lambdas must be on one line
tester = lambda expected, given, msg : self.assertEqual(expected, com.execute_command(given), msg)
tester("", "true", "Test a single command response")
tester("_result_", ["printf", '_%s_', 'result'], "Test a command with properties")
@patch('cmr.util.common.execute_command')
def test_security_call(self, execute_command_mock):
"""
test that the code will call an external command and respond as expected
"""
execute_command_mock.return_value = " response info "
self.assertEqual("response info", com.call_security("account", "service"), "Good response")
execute_command_mock.return_value = None
try:
com.call_security("account", "service")
except TypeError as err:
self.assertEqual('account not found in keychain', str(err), "Bad response")
def test_help_format_lambda(self):
"""Test that the lambda function performs as expected"""
cmd = com.help_format_lambda()
self.assertTrue("str(object='') -> str" in cmd("str", ""))
def test_mask_string(self):
"""Test that the mask_diictionary function will clean out sensitive info"""
# pylint: disable=C0301 # lambdas must be on one line
tester = lambda expected, given, msg : self.assertEqual(expected, com.mask_string(given), msg)
tester("", None, "None sent")
tester("", "", "No Letters")
tester("0", "0", "One letter")
tester("01", "01", "Two Letters")
tester("0*2", "012", "Three Letters")
tester('EDL-U123********34567890', 'EDL-U12345678901234567890', "Real example")
def test_mask_dictionary(self):
"""Test that the mask_diictionary function will clean out sensitive info"""
data = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U12345678901234567890'}
expected1 = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U123********34567890'}
expected2 = {'ignore': 'this',
'token': '012***687', 'cmr-token': 'EDL-U12345678901234567890'}
expected3 = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U12345678901234567890'}
expected4 = {'ignore': 'this',
'token': '012***687', 'cmr-token': 'EDL-U123********34567890'}
self.assertEqual(expected1, com.mask_dictionary(data, 'cmr-token'))
self.assertEqual(expected1, com.mask_dictionary(data, ['cmr-token']))
self.assertEqual(expected2, com.mask_dictionary(data, 'token'))
self.assertEqual(expected2, com.mask_dictionary(data, ['token']))
self.assertEqual(expected3, com.mask_dictionary(data, 'cmr'))
self.assertEqual(expected3, com.mask_dictionary(data, ['cmr']))
self.assertEqual(expected4, com.mask_dictionary(data, ['token', 'cmr-token']))
self.assertEqual(data, com.mask_dictionary(data, ''))
self.assertEqual(data, com.mask_dictionary(data, []))
|
'''
Instructions:
1. Run python dino_jump.py - This launches the training tool.
2. Click on the pygame window thats opened to make sure windows sends the keypresses to that process.
3. Relax the Myo arm, and with your other hand press 0 - This labels the incoming data as class 0
4. Make a fist with your hand and press 1, to label the fist as 1.
5. Try making a closed and open fist and watching the bars change.
6. Once you've gathered enough data, exit the pygame window. This saves the data in data/vals0.dat and vals1.dat
7. If you make a mistake and wrongly classify data, delete vals0 and vals1 and regather
8. If your happy it works, change TRAINING_MODE to False.
9. Goto https://trex-runner.com/ and rerun dino_jump.py with TRAINING_MODE set to false.
10. Click in the brower to start the game and tell windows to send keypresses there
11. Try making a fist and seeing if the dino jumps
If it doesn't work, feel free to let me know in the discord:
https://discord.com/invite/mG58PVyk83
- PerlinWarp
'''
import pygame
from pygame.locals import *
from pynput.keyboard import Key, Controller
from pyomyo import Myo, emg_mode
import simple_classifier as sc
TRAINING_MODE = True
def dino_handler(pose):
print("Pose detected", pose)
if ((pose == 1) and (TRAINING_MODE == False)):
for i in range(0,10):
# Press and release space
keyboard.press(Key.space)
keyboard.release(Key.space)
if __name__ == '__main__':
keyboard = Controller()
pygame.init()
w, h = 800, 320
scr = pygame.display.set_mode((w, h))
font = pygame.font.Font(None, 30)
m = sc.MyoClassifier(sc.Classifier())
hnd = sc.EMGHandler(m)
m.add_emg_handler(hnd)
m.connect()
m.add_raw_pose_handler(dino_handler)
try:
while True:
m.run()
r = m.history_cnt.most_common(1)[0][0]
for ev in pygame.event.get():
if ev.type == QUIT or (ev.type == KEYDOWN and ev.unicode == 'q'):
raise KeyboardInterrupt()
elif ev.type == KEYDOWN:
if K_0 <= ev.key <= K_9:
hnd.recording = ev.key - K_0
elif K_KP0 <= ev.key <= K_KP9:
hnd.recording = ev.key - K_Kp0
elif ev.unicode == 'r':
hnd.cl.read_data()
elif ev.type == KEYUP:
if K_0 <= ev.key <= K_9 or K_KP0 <= ev.key <= K_KP9:
hnd.recording = -1
scr.fill((0, 0, 0), (0, 0, w, h))
for i in range(10):
x = 0
y = 0 + 30 * i
clr = (0,200,0) if i == r else (255,255,255)
txt = font.render('%5d' % (m.cls.Y == i).sum(), True, (255,255,255))
scr.blit(txt, (x + 20, y))
txt = font.render('%d' % i, True, clr)
scr.blit(txt, (x + 110, y))
scr.fill((0,0,0), (x+130, y + txt.get_height() / 2 - 10, len(m.history) * 20, 20))
scr.fill(clr, (x+130, y + txt.get_height() / 2 - 10, m.history_cnt[i] * 20, 20))
pygame.display.flip()
except KeyboardInterrupt:
pass
finally:
m.disconnect()
print()
pygame.quit()
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from UM.Logger import Logger
from UM.i18n import i18nCatalog
from UM.Qt.Duration import DurationFormat
from cura.CuraApplication import CuraApplication
from cura.PrinterOutputDevice import PrinterOutputDevice, ConnectionState
from cura.PrinterOutput.PrinterOutputModel import PrinterOutputModel
from cura.PrinterOutput.PrintJobOutputModel import PrintJobOutputModel
from cura.PrinterOutput.GenericOutputController import GenericOutputController
from .AutoDetectBaudJob import AutoDetectBaudJob
from .AvrFirmwareUpdater import AvrFirmwareUpdater
from serial import Serial, SerialException, SerialTimeoutException
from threading import Thread, Event
from time import time
from queue import Queue
from typing import Union, Optional, List, cast
import re
import functools # Used for reduce
catalog = i18nCatalog("cura")
class USBPrinterOutputDevice(PrinterOutputDevice):
def __init__(self, serial_port: str, baud_rate: Optional[int] = None) -> None:
super().__init__(serial_port)
self.setName(catalog.i18nc("@item:inmenu", "USB printing"))
self.setShortDescription(catalog.i18nc("@action:button Preceded by 'Ready to'.", "Print via USB"))
self.setDescription(catalog.i18nc("@info:tooltip", "Print via USB"))
self.setIconName("print")
self._serial = None # type: Optional[Serial]
self._serial_port = serial_port
self._address = serial_port
self._timeout = 3
# List of gcode lines to be printed
self._gcode = [] # type: List[str]
self._gcode_position = 0
self._use_auto_detect = True
self._baud_rate = baud_rate
self._all_baud_rates = [115200, 250000, 230400, 57600, 38400, 19200, 9600]
# Instead of using a timer, we really need the update to be as a thread, as reading from serial can block.
self._update_thread = Thread(target = self._update, daemon = True)
self._last_temperature_request = None # type: Optional[int]
self._is_printing = False # A print is being sent.
## Set when print is started in order to check running time.
self._print_start_time = None # type: Optional[float]
self._print_estimated_time = None # type: Optional[int]
self._accepts_commands = True
self._paused = False
self._printer_busy = False # when printer is preheating and waiting (M190/M109), or when waiting for action on the printer
self.setConnectionText(catalog.i18nc("@info:status", "Connected via USB"))
# Queue for commands that need to be sent.
self._command_queue = Queue() # type: Queue
# Event to indicate that an "ok" was received from the printer after sending a command.
self._command_received = Event()
self._command_received.set()
self._firmware_name_requested = False
self._firmware_updater = AvrFirmwareUpdater(self)
CuraApplication.getInstance().getOnExitCallbackManager().addCallback(self._checkActivePrintingUponAppExit)
# This is a callback function that checks if there is any printing in progress via USB when the application tries
# to exit. If so, it will show a confirmation before
def _checkActivePrintingUponAppExit(self) -> None:
application = CuraApplication.getInstance()
if not self._is_printing:
# This USB printer is not printing, so we have nothing to do. Call the next callback if exists.
application.triggerNextExitCheck()
return
application.setConfirmExitDialogCallback(self._onConfirmExitDialogResult)
application.showConfirmExitDialog.emit(catalog.i18nc("@label", "A USB print is in progress, closing Cura will stop this print. Are you sure?"))
def _onConfirmExitDialogResult(self, result: bool) -> None:
if result:
application = CuraApplication.getInstance()
application.triggerNextExitCheck()
## Reset USB device settings
#
def resetDeviceSettings(self) -> None:
self._firmware_name = None
## Request the current scene to be sent to a USB-connected printer.
#
# \param nodes A collection of scene nodes to send. This is ignored.
# \param file_name \type{string} A suggestion for a file name to write.
# \param filter_by_machine Whether to filter MIME types by machine. This
# is ignored.
# \param kwargs Keyword arguments.
def requestWrite(self, nodes, file_name = None, filter_by_machine = False, file_handler = None, **kwargs):
if self._is_printing:
return # Aleady printing
# cancel any ongoing preheat timer before starting a print
self._printers[0].getController().stopPreheatTimers()
CuraApplication.getInstance().getController().setActiveStage("MonitorStage")
# find the G-code for the active build plate to print
active_build_plate_id = CuraApplication.getInstance().getMultiBuildPlateModel().activeBuildPlate
gcode_dict = getattr(CuraApplication.getInstance().getController().getScene(), "gcode_dict")
gcode_list = gcode_dict[active_build_plate_id]
self._printGCode(gcode_list)
## Start a print based on a g-code.
# \param gcode_list List with gcode (strings).
def _printGCode(self, gcode_list: List[str]):
self._gcode.clear()
self._paused = False
for layer in gcode_list:
self._gcode.extend(layer.split("\n"))
# Reset line number. If this is not done, first line is sometimes ignored
self._gcode.insert(0, "M110")
self._gcode_position = 0
self._print_start_time = time()
self._print_estimated_time = int(CuraApplication.getInstance().getPrintInformation().currentPrintTime.getDisplayString(DurationFormat.Format.Seconds))
for i in range(0, 4): # Push first 4 entries before accepting other inputs
self._sendNextGcodeLine()
self._is_printing = True
self.writeFinished.emit(self)
def _autoDetectFinished(self, job: AutoDetectBaudJob):
result = job.getResult()
if result is not None:
self.setBaudRate(result)
self.connect() # Try to connect (actually create serial, etc)
def setBaudRate(self, baud_rate: int):
if baud_rate not in self._all_baud_rates:
Logger.log("w", "Not updating baudrate to {baud_rate} as it's an unknown baudrate".format(baud_rate=baud_rate))
return
self._baud_rate = baud_rate
def connect(self):
self._firmware_name = None # after each connection ensure that the firmware name is removed
if self._baud_rate is None:
if self._use_auto_detect:
auto_detect_job = AutoDetectBaudJob(self._serial_port)
auto_detect_job.start()
auto_detect_job.finished.connect(self._autoDetectFinished)
return
if self._serial is None:
try:
self._serial = Serial(str(self._serial_port), self._baud_rate, timeout=self._timeout, writeTimeout=self._timeout)
except SerialException:
Logger.log("w", "An exception occured while trying to create serial connection")
return
CuraApplication.getInstance().globalContainerStackChanged.connect(self._onGlobalContainerStackChanged)
self._onGlobalContainerStackChanged()
self.setConnectionState(ConnectionState.connected)
self._update_thread.start()
def _onGlobalContainerStackChanged(self):
container_stack = CuraApplication.getInstance().getGlobalContainerStack()
num_extruders = container_stack.getProperty("machine_extruder_count", "value")
# Ensure that a printer is created.
controller = GenericOutputController(self)
controller.setCanUpdateFirmware(True)
self._printers = [PrinterOutputModel(output_controller = controller, number_of_extruders = num_extruders)]
self._printers[0].updateName(container_stack.getName())
def close(self):
super().close()
if self._serial is not None:
self._serial.close()
# Re-create the thread so it can be started again later.
self._update_thread = Thread(target=self._update, daemon=True)
self._serial = None
## Send a command to printer.
def sendCommand(self, command: Union[str, bytes]):
if not self._command_received.is_set():
self._command_queue.put(command)
else:
self._sendCommand(command)
def _sendCommand(self, command: Union[str, bytes]):
if self._serial is None or self._connection_state != ConnectionState.connected:
return
new_command = cast(bytes, command) if type(command) is bytes else cast(str, command).encode() # type: bytes
if not new_command.endswith(b"\n"):
new_command += b"\n"
try:
self._command_received.clear()
self._serial.write(new_command)
except SerialTimeoutException:
Logger.log("w", "Timeout when sending command to printer via USB.")
self._command_received.set()
def _update(self):
while self._connection_state == ConnectionState.connected and self._serial is not None:
try:
line = self._serial.readline()
except:
continue
if not self._firmware_name_requested:
self._firmware_name_requested = True
self.sendCommand("M115")
if b"FIRMWARE_NAME:" in line:
self._setFirmwareName(line)
if self._last_temperature_request is None or time() > self._last_temperature_request + self._timeout:
# Timeout, or no request has been sent at all.
if not self._printer_busy: # Don't flood the printer with temperature requests while it is busy
self.sendCommand("M105")
self._last_temperature_request = time()
if re.search(b"[B|T\d*]: ?\d+\.?\d*", line): # Temperature message. 'T:' for extruder and 'B:' for bed
extruder_temperature_matches = re.findall(b"T(\d*): ?(\d+\.?\d*) ?\/?(\d+\.?\d*)?", line)
# Update all temperature values
matched_extruder_nrs = []
for match in extruder_temperature_matches:
extruder_nr = 0
if match[0] != b"":
extruder_nr = int(match[0])
if extruder_nr in matched_extruder_nrs:
continue
matched_extruder_nrs.append(extruder_nr)
if extruder_nr >= len(self._printers[0].extruders):
Logger.log("w", "Printer reports more temperatures than the number of configured extruders")
continue
extruder = self._printers[0].extruders[extruder_nr]
if match[1]:
extruder.updateHotendTemperature(float(match[1]))
if match[2]:
extruder.updateTargetHotendTemperature(float(match[2]))
bed_temperature_matches = re.findall(b"B: ?(\d+\.?\d*) ?\/?(\d+\.?\d*) ?", line)
if bed_temperature_matches:
match = bed_temperature_matches[0]
if match[0]:
self._printers[0].updateBedTemperature(float(match[0]))
if match[1]:
self._printers[0].updateTargetBedTemperature(float(match[1]))
if line == b"":
# An empty line means that the firmware is idle
# Multiple empty lines probably means that the firmware and Cura are waiting
# for eachother due to a missed "ok", so we keep track of empty lines
self._firmware_idle_count += 1
else:
self._firmware_idle_count = 0
if line.startswith(b"ok") or self._firmware_idle_count > 1:
self._printer_busy = False
self._command_received.set()
if not self._command_queue.empty():
self._sendCommand(self._command_queue.get())
elif self._is_printing:
if self._paused:
pass # Nothing to do!
else:
self._sendNextGcodeLine()
if line.startswith(b"echo:busy:"):
self._printer_busy = True
if self._is_printing:
if line.startswith(b'!!'):
Logger.log('e', "Printer signals fatal error. Cancelling print. {}".format(line))
self.cancelPrint()
elif line.lower().startswith(b"resend") or line.startswith(b"rs"):
# A resend can be requested either by Resend, resend or rs.
try:
self._gcode_position = int(line.replace(b"N:", b" ").replace(b"N", b" ").replace(b":", b" ").split()[-1])
except:
if line.startswith(b"rs"):
# In some cases of the RS command it needs to be handled differently.
self._gcode_position = int(line.split()[1])
def _setFirmwareName(self, name):
new_name = re.findall(r"FIRMWARE_NAME:(.*);", str(name))
if new_name:
self._firmware_name = new_name[0]
Logger.log("i", "USB output device Firmware name: %s", self._firmware_name)
else:
self._firmware_name = "Unknown"
Logger.log("i", "Unknown USB output device Firmware name: %s", str(name))
def getFirmwareName(self):
return self._firmware_name
def pausePrint(self):
self._paused = True
def resumePrint(self):
self._paused = False
self._sendNextGcodeLine() #Send one line of g-code next so that we'll trigger an "ok" response loop even if we're not polling temperatures.
def cancelPrint(self):
self._gcode_position = 0
self._gcode.clear()
self._printers[0].updateActivePrintJob(None)
self._is_printing = False
self._paused = False
# Turn off temperatures, fan and steppers
self._sendCommand("M140 S0")
self._sendCommand("M104 S0")
self._sendCommand("M107")
# Home XY to prevent nozzle resting on aborted print
# Don't home bed because it may crash the printhead into the print on printers that home on the bottom
self.printers[0].homeHead()
self._sendCommand("M84")
def _sendNextGcodeLine(self):
if self._gcode_position >= len(self._gcode):
self._printers[0].updateActivePrintJob(None)
self._is_printing = False
return
line = self._gcode[self._gcode_position]
if ";" in line:
line = line[:line.find(";")]
line = line.strip()
# Don't send empty lines. But we do have to send something, so send M105 instead.
# Don't send the M0 or M1 to the machine, as M0 and M1 are handled as an LCD menu pause.
if line == "" or line == "M0" or line == "M1":
line = "M105"
checksum = functools.reduce(lambda x, y: x ^ y, map(ord, "N%d%s" % (self._gcode_position, line)))
self._sendCommand("N%d%s*%d" % (self._gcode_position, line, checksum))
progress = (self._gcode_position / len(self._gcode))
elapsed_time = int(time() - self._print_start_time)
print_job = self._printers[0].activePrintJob
if print_job is None:
controller = GenericOutputController(self)
controller.setCanUpdateFirmware(True)
print_job = PrintJobOutputModel(output_controller=controller, name=CuraApplication.getInstance().getPrintInformation().jobName)
print_job.updateState("printing")
self._printers[0].updateActivePrintJob(print_job)
print_job.updateTimeElapsed(elapsed_time)
estimated_time = self._print_estimated_time
if progress > .1:
estimated_time = self._print_estimated_time * (1 - progress) + elapsed_time
print_job.updateTimeTotal(estimated_time)
self._gcode_position += 1
|
import random
from paragen.samplers import AbstractSampler, register_sampler
from paragen.utils.runtime import Environment
@register_sampler
class ShuffleSampler(AbstractSampler):
"""
ShuffleSampler shuffle the order before fetching samples.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._env = Environment()
def build(self, data_source):
"""
Build sampler over data_source
Args:
data_source: a list of data
"""
self._data_source = data_source
self._permutation = [_ for _ in range(len(self._data_source))]
self._length = len(self._permutation)
self.reset(0)
def reset(self, epoch, *args, **kwargs):
"""
Resetting sampler states / shuffle reading order for next round of iteration
"""
random.seed(self._env.seed + epoch)
random.shuffle(self._permutation)
|
import numpy as np
from icecream import ic
from pathlib import Path
import cv2
data = np.load('data/dance-twirl.npy')[0].astype(np.uint8)
label = np.load('labels/dance-twirl.npy')[0].argmax(axis=-1)
label = label.astype(np.uint8)*255
ic(label.shape)
ic(np.unique(label, return_counts=True))
ic(data.shape)
ic(np.min(data), np.average(data), np.max(data))
ic(data.dtype)
observe_sample_path = Path('observe_samples')
observe_sample_path.mkdir(parents=True, exist_ok=True)
cv2.imwrite(str(observe_sample_path / 'data.png'), data)
cv2.imwrite(str(observe_sample_path / 'label.png'), label)
|
# -*- coding: utf-8 -*-
from time import sleep
from .baseapi import BaseAPI
class Action(BaseAPI):
def __init__(self, *args, **kwargs):
self.id = None
self.token = None
self.status = None
self.type = None
self.started_at = None
self.completed_at = None
self.resource_id = None
self.resource_type = None
self.region = None
self.region_slug = None
# Custom, not provided by the json object.
self.droplet_id = None
super(Action, self).__init__(*args, **kwargs)
@classmethod
def get_object(cls, api_token, action_id, mocked):
"""
Class method that will return a Action object by ID.
"""
action = cls(token=api_token, id=action_id, mocked=mocked)
action.mock_data = "actions/ipv6_completed.json"
action.load_directly()
return action
def load_directly(self):
self.mock_data = "actions/ipv6_completed.json"
action = self.get_data("actions/%s" % self.id)
if action:
action = action[u'action']
# Loading attributes
for attr in action.keys():
setattr(self, attr, action[attr])
def load(self):
self.mock_data = "actions/ipv6_completed.json"
action = self.get_data(
"droplets/%s/actions/%s" % (
self.droplet_id,
self.id
)
)
if action:
action = action[u'action']
# Loading attributes
for attr in action.keys():
setattr(self, attr, action[attr])
def wait(self, update_every_seconds=1):
"""
Wait until the action is marked as completed or with an error.
It will return True in case of success, otherwise False.
Optional Args:
update_every_seconds - int : number of seconds to wait before
checking if the action is completed.
"""
while self.status == u'in-progress':
sleep(update_every_seconds)
self.load()
return self.status == u'completed'
def __str__(self):
return "%s %s [%s]" % (self.id, self.type, self.status)
|
# -*- coding: utf-8 -*-
"""Unit tests for contradictory_claims/data."""
|
# pylint: disable=missing-module-docstring
from .wordle_agent import Agent
|
# -*- coding: utf-8 -*-
"""Functions to generate a list of steps to transition from the current state to the desired state."""
from __future__ import (unicode_literals, print_function)
from creds import constants
from creds.ssh import write_authorized_keys
from creds.users import (generate_add_user_command, generate_modify_user_command,
generate_delete_user_command, compare_user, get_user_by_uid)
from creds.utils import execute_command, write_sudoers_entry, remove_sudoers_entry
from external.six import iteritems
def create_plan(existing_users=None, proposed_users=None, purge_undefined=None, protected_users=None,
allow_non_unique_id=None, manage_home=True, manage_keys=True):
"""Determine what changes are required.
args:
existing_users (Users): List of discovered users
proposed_users (Users): List of proposed users
purge_undefined (bool): Remove discovered users that have not been defined in proposed users list
protected_users (list): List of users' names that should not be evaluated as part of the plan creation process
allow_non_unique_id (bool): Allow more than one user to have the same uid
manage_home (bool): Create/remove users' home directories
manage_keys (bool): Add/update/remove users' keys (manage_home must also be true)
returns:
list: Differences between discovered and proposed users with a
list of operations that will achieve the desired state.
"""
plan = list()
proposed_usernames = list()
if not purge_undefined:
purge_undefined = constants.PURGE_UNDEFINED
if not protected_users:
protected_users = constants.PROTECTED_USERS
if not allow_non_unique_id:
allow_non_unique_id = constants.ALLOW_NON_UNIQUE_ID
# Create list of modifications to make based on proposed users compared to existing users
for proposed_user in proposed_users:
proposed_usernames.append(proposed_user.name)
user_matching_name = existing_users.describe_users(users_filter=dict(name=proposed_user.name))
user_matching_id = get_user_by_uid(uid=proposed_user.uid, users=existing_users)
# If user does not exist
if not allow_non_unique_id and user_matching_id and not user_matching_name:
plan.append(
dict(action='fail', error='uid_clash', proposed_user=proposed_user, state='existing', result=None))
elif not user_matching_name:
plan.append(
dict(action='add', proposed_user=proposed_user, state='missing', result=None, manage_home=manage_home,
manage_keys=manage_keys))
# If they do, then compare
else:
user_comparison = compare_user(passed_user=proposed_user, user_list=existing_users)
if user_comparison.get('result'):
plan.append(
dict(action='update', proposed_user=proposed_user, state='existing',
user_comparison=user_comparison, manage_home=manage_home, manage_keys=manage_keys))
# Application of the proposed user list will not result in deletion of users that need to be removed
# If 'PURGE_UNDEFINED' then look for existing users that are not defined in proposed usernames and mark for removal
if purge_undefined:
for existing_user in existing_users:
if existing_user.name not in proposed_usernames:
if existing_user.name not in protected_users:
plan.append(
dict(action='delete', username=existing_user.name, state='existing', manage_home=manage_home,
manage_keys=manage_keys))
return plan
def execute_plan(plan=None):
"""Create, Modify or Delete, depending on plan item."""
execution_result = list()
for task in plan:
action = task['action']
if action == 'delete':
command = generate_delete_user_command(username=task.get('username'), manage_home=task['manage_home'])
command_output = execute_command(command)
execution_result.append(dict(task=task, command_output=command_output))
remove_sudoers_entry(username=task.get('username'))
elif action == 'add':
command = generate_add_user_command(proposed_user=task.get('proposed_user'), manage_home=task['manage_home'])
command_output = execute_command(command)
if task['proposed_user'].public_keys and task['manage_home'] and task['manage_keys']:
write_authorized_keys(task['proposed_user'])
if task['proposed_user'].sudoers_entry:
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['proposed_user'].sudoers_entry)
execution_result.append(dict(task=task, command_output=command_output))
elif action == 'update':
result = task['user_comparison'].get('result')
# Don't modify user if only keys have changed
action_count = 0
for k, _ in iteritems(result):
if '_action' in k:
action_count += 1
command_output = None
if task['manage_home'] and task['manage_keys'] and action_count == 1 and 'public_keys_action' in result:
write_authorized_keys(task['proposed_user'])
elif action_count == 1 and 'sudoers_entry_action' in result:
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry'])
else:
command = generate_modify_user_command(task=task)
command_output = execute_command(command)
if task['manage_home'] and task['manage_keys'] and result.get('public_keys_action'):
write_authorized_keys(task['proposed_user'])
if result.get('sudoers_entry_action'):
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry'])
execution_result.append(dict(task=task, command_output=command_output))
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from app.home import blueprint
from flask import render_template, redirect, url_for, request
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
import razorpay
from bs4 import BeautifulSoup
import requests
client = razorpay.Client(auth=("rzp_test_ui6SHylFxya3wB","mfDawSJMdh75En9EP7xMDZlH"))
@blueprint.route('/index')
@login_required
def index():
return render_template('my-projects.html', segment='index')
@blueprint.route('/colab', methods = ['GET', 'POST'])
def colab():
if request.form:
first = request.form['First_Name']
last = request.form['First_Name']
email = request.form['Email_Address']
location = request.form['Location']
city = request.form['City']
Country = request.form['Country']
Postal = request.form['Postal_Code']
return render_template('try.html', segment='ui_maps',first = first )
return render_template('collaborate.html', segment='ui_maps')
@blueprint.route('/analysis', methods = ['GET', 'POST'])
def analysis():
source = requests.get('https://socialstats.info/report/ashishchanchlani/instagram').text
soup = BeautifulSoup(source,'lxml')
article = soup.find('div',class_='d-flex')
head = article.find_all('p', class_='report-header-number')
y=[]
for i in head:
x = i.text
if " " in x:
x=x.split()[0]
y.append(x)
source = requests.get('https://www.speakrj.com/audit/report/viratkohli/facebook').text
soup = BeautifulSoup(source,'lxml')
article = soup.find('div',class_='justify-content-around')
print(article.prettify())
head = article.find_all('p', class_='report-header-number')
fb=[]
# z=[]
for i in head:
x = i.text
if " " in x:
x=x.split()[0]
fb.append(x)
source = requests.get('https://www.speakrj.com/audit/report/ashchanchlani/twitter').text
soup = BeautifulSoup(source,'lxml')
article = soup.find('div',class_='d-flex')
head = article.find_all('p', class_='report-header-number')
tw=[]
for i in head:
x = i.text
if " " in x:
x=x.split()[0]
tw.append(x)
return render_template('index.html', y = y, fb = fb, tw = tw, segment='index',)
@blueprint.route('/pay', methods = ['GET', 'POST'])
def pay():
name_of_event = 'example'
target_amount = 200 * 100
payment = client.order.create({'amount' : target_amount, 'currency' : 'INR', 'payment_capture' : '1'})
event_details = [name_of_event]
if request.form:
name_of_event = request.form['event_name']
target_amount = int(request.form['target_amount']) * 100
payment = client.order.create({'amount' : target_amount, 'currency' : 'INR', 'payment_capture' : '1'})
event_details = [name_of_event]
return render_template('fund-raiser.html',event_details=event_details,payment=payment)
return render_template('fund-raiser.html',event_details=event_details,payment=payment, segment="ui_tables")
@blueprint.route('/success', methods = ['GET', 'POST'])
def success():
return render_template('includes/success.html')
@blueprint.route('/<template>')
@login_required
def route_template(template):
try:
if not template.endswith( '.html' ):
template += '.html'
# Detect the current page
segment = get_segment( request )
# Serve the file (if exists) from app/templates/FILE.html
return render_template( template, segment=segment )
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
# Helper - Extract current page name from request
def get_segment( request ):
try:
segment = request.path.split('/')[-1]
if segment == '':
segment = 'index'
return segment
except:
return None
|
#keep credit if u gonna edit or kang it
#without creadit copy paster mc
#creadits to sawan(@veryhelpful) learned from kraken
import random, re
from uniborg.util import admin_cmd
import asyncio
from telethon import events
@borg.on(admin_cmd(pattern="mst ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("mst hu bbro ")
await asyncio.sleep(1)
await event.edit("╔═╦═╗╔══╗╔══╗\n║║║║║║══╣╚╗╔╝\n║║║║║╠══║─║║─\n╚╩═╩╝╚══╝─╚╝─")
@borg.on(admin_cmd(pattern="gm ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("good morning ")
await asyncio.sleep(1)
await event.edit("╔══╗╔═╦═╗\n║╔═╣║║║║║\n║╚╗║║║║║║\n╚══╝╚╩═╩╝")
@borg.on(admin_cmd(pattern="good ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("╔══╗╔═╗╔═╗╔══╗\n║╔═╣║║║║║║╚╗╗║\n║╚╗║║║║║║║╔╩╝║\n╚══╝╚═╝╚═╝╚══╝")
@borg.on(admin_cmd(pattern="hhlo ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("hello,how r u")
await asyncio.sleep(1)
await event.edit("╔╗╔╗╔╗─╔═╗\n║╚╝║║║─║║║\n║╔╗║║╚╗║║║\n╚╝╚╝╚═╝╚═╝")
@borg.on(admin_cmd(pattern="sry ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("i'm sorry")
await asyncio.sleep(1)
await event.edit("last time forgive me")
await asyncio.sleep(1)
await event.edit("╔══╗╔═╗╔═╗╔═╗╔═╦╗\n║══╣║║║║╬║║╬║╚╗║║\n╠══║║║║║╗╣║╗╣╔╩╗║\n╚══╝╚═╝╚╩╝╚╩╝╚══╝")
@borg.on(admin_cmd(pattern="thnq ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("thanks for your help")
await asyncio.sleep(1)
await event.edit("╔══╗╔╗╔╗╔══╗╔═╦╗╔╦╗╔══╗\n╚╗╔╝║╚╝║║╔╗║║║║║║╔╝║══╣\n─║║─║╔╗║║╠╣║║║║║║╚╗╠══║\n─╚╝─╚╝╚╝╚╝╚╝╚╩═╝╚╩╝╚══╝")
@borg.on(admin_cmd(pattern="ok ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("▒▐█▀▀█▌▒▐█▒▐▀\n▒▐█▄▒█▌▒▐██▌░\n▒▐██▄█▌▒▐█▒▐▄")
@borg.on(admin_cmd(pattern="smile ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("how sad ")
await asyncio.sleep(1)
await event.edit("╔══╗╔═╦═╗╔══╗╔╗─╔═╗\n║══╣║║║║║╚║║╝║║─║╦╝\n╠══║║║║║║╔║║╗║╚╗║╩╗\n╚══╝╚╩═╩╝╚══╝╚═╝╚═╝")
@borg.on(admin_cmd(pattern="lal ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("╔╗─╔═╗╔╗─\n║╚╗║╬║║╚╗\n╚═╝╚═╝╚═╝")
|
# ==========================================================================
# Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==========================================================================
import argparse
import os
import subprocess
import sys;
import json;
def run_cmd(cmd, args=[''], is_root=False):
cmd = ['sudo', cmd] + args if is_root else [cmd] + args
p = subprocess.Popen(cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return [p.returncode, out, err]
def accel_get_active_dvices():
ret, out, err = run_cmd(cmd="accel-config", args=["list"])
if ret:
return [ret, err, out]
else:
return [ret, err, json.loads(out)]
def accel_get_all_dvices():
ret, out, err = run_cmd(cmd="accel-config", args=["list", "-i"])
if ret:
return [ret, err, out]
else:
return [ret, err, json.loads(out)]
def accel_load_config(config_file, is_root=False):
ret, out, err = run_cmd(cmd="accel-config", args=["load-config", "-v", "-c", config_file], is_root=is_root)
return [ret, err, out]
def accel_disable_device(device, is_root=False):
ret, out, err = run_cmd(cmd="accel-config", args=["disable-device", "-v", device], is_root=is_root)
return [ret, err, out]
def accel_enable_device(device, is_root=False):
ret, out, err = run_cmd(cmd="accel-config", args=["enable-device", "-v", device], is_root=is_root)
return [ret, err, out]
def accel_enable_wq(device, wq, is_root=False):
ret, out, err = run_cmd(cmd="accel-config", args=["enable-wq", "-v", device + "/" + wq], is_root=is_root)
return [ret, err, out]
def accel_set_block_on_fault(device, wq, bof_flag, is_root=False):
ret, out, err = run_cmd(cmd="accel-config", args=["config-wq", device + "/" + wq, "-b", str(int(bof_flag))], is_root=is_root)
return [ret, err, out]
def get_aggregated(dev_filter):
numas = 0
devices = 0
wqs = 0
engines = 0
ret, err, devices_list = accel_get_active_dvices()
devices_list.sort(key=lambda x: x["numa_node"])
numa = devices_list[0]["numa_node"]
numas = 1
numa_comp = False
devices_names = {}
device1 = devices_list[0]["dev"]
for device in devices_list:
if not dev_filter in device["dev"]:
continue
if numa != device["numa_node"]:
numas += 1
numa = device["numa_node"]
numa_comp = True
if numa_comp:
# TODO
print("Warning: non-uniform numas configuration")
else:
devices += 1
groups = 0
group_idx = 0
group_active = -1
for group in device["groups"]:
if "grouped_workqueues" in group:
groups += 1
if group_active < 0:
group_active = group_idx
group_idx += 1
if groups > 1:
print("Warning: multiple groups for device: " + device["dev"])
if groups == 0:
print("Warning: no groups for device: " + device["dev"])
if wqs:
if wqs != len(device["groups"][group_active]["grouped_workqueues"]) or engines != len(device["groups"][group_active]["grouped_engines"]):
print("Warning: non-uniform devices configuration for devices: " + device1 + " and " + device)
else:
wqs = len(device["groups"][group_active]["grouped_workqueues"])
engines = len(device["groups"][group_active]["grouped_engines"])
return [numas, devices, wqs, engines]
def get_devices_short():
ret, err, devices_list = accel_get_active_dvices()
device_dict = {}
for device in devices_list:
if "iax" in device["dev"]:
device_gen = "iax"
elif "dsa" in device["dev"]:
device_gen = "dsa"
if not device_gen in device_dict:
device_dict[device_gen] = {}
if not device["numa_node"] in device_dict[device_gen]:
device_dict[device_gen][device["numa_node"]] = {}
if not device["dev"] in device_dict[device_gen][device["numa_node"]]:
device_dict[device_gen][device["numa_node"]][device["dev"]] = {}
for group in device["groups"]:
if "grouped_workqueues" in group and "grouped_engines" in group:
if not group["dev"] in device_dict[device_gen][device["numa_node"]][device["dev"]]:
device_dict[device_gen][device["numa_node"]][device["dev"]][group["dev"]] = {'workqueues' : [], "engines" : []}
for wqs in group["grouped_workqueues"]:
device_dict[device_gen][device["numa_node"]][device["dev"]][group["dev"]]["workqueues"].append(wqs["dev"])
for engine in group["grouped_engines"]:
device_dict[device_gen][device["numa_node"]][device["dev"]][group["dev"]]["engines"].append(engine["dev"])
return device_dict
def config_device(conf_file, dev_filter="", bof=False, is_root=False):
print("Filter: " + dev_filter)
if not os.path.exists(conf_file):
raise ValueError(conf_file + " does not exist")
ret, err, active_devices = accel_get_active_dvices()
if len(active_devices):
print("Disabling active devices")
for device in active_devices:
print(" " + device['dev'], end='')
if device['dev'].find(dev_filter) != -1:
ret, err, out = accel_disable_device(device['dev'], is_root=is_root)
if ret:
print(" - error")
else:
print(" - done")
else:
print(" - skipped")
else:
print("No active devices")
print("Loading configuration", end='')
ret, err, out = accel_load_config(conf_file, is_root=is_root)
if ret:
print(" - error")
print("---------")
print(err)
print("---------")
else:
print(" - done")
config_devices = open(conf_file, "r")
config_devices = json.load(config_devices)
print("Additional configuration steps")
print(" Force block on fault: " + str(bof))
for device in config_devices:
if device['dev'].find(dev_filter) != -1:
if device["groups"][0]["grouped_workqueues"]:
for wq in device["groups"][0]["grouped_workqueues"]:
if bof:
ret, err, out = accel_set_block_on_fault(device["dev"], wq["dev"], bof_flag=True, is_root=is_root)
if ret:
print(" - error")
print("---------")
print(err)
print("---------")
print("Enabling configured devices")
for device in config_devices:
print(" " + device["dev"], end='')
if device['dev'].find(dev_filter) != -1:
ret, err, out = accel_enable_device(device["dev"], is_root=is_root)
if ret:
print(" - error")
else:
print(" - done")
if device["groups"][0]["grouped_workqueues"]:
for wq in device["groups"][0]["grouped_workqueues"]:
print(" " + wq["dev"], end='')
ret, err, out = accel_enable_wq(device["dev"], wq["dev"], is_root=is_root)
if ret:
print(" - error")
print("---------")
print(err)
print("---------")
else:
print(" - done")
else:
print(" No work queues configured for the device")
else:
print(" - skipped")
print("Checking configuration")
ret, err, active_devices = accel_get_active_dvices()
if len(active_devices):
active_devices.sort(key=lambda x: x["dev"])
for device in active_devices:
for group in device["groups"]:
if "grouped_workqueues" in group or "grouped_engines" in group:
print(" node: " + str(device['numa_node']) + "; device: " + device['dev'] + "; group: " + group["dev"])
if "grouped_workqueues" in group:
print(" wqs: ", end='')
for wq in group["grouped_workqueues"]:
print(wq["dev"] + " ", end='')
print("")
if "grouped_engines" in group:
print(" engines: ", end='')
for engine in group["grouped_engines"]:
print(engine["dev"] + " ", end='')
print("")
else:
print("No active devices")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Accelerator configurator')
parser.add_argument('--load', default='', metavar='FILE_NAME', help='Configuration file')
parser.add_argument('--filter', default='', metavar='FILTER', help='Device filter')
parser.add_argument('--bof', default=False, action='store_true', help='Set block on fault flag')
parser.add_argument('--root', default=False, action='store_true', help='Use if sudo is required for device configuration')
args = parser.parse_args()
if args.load:
config_device(args.load, args.filter, bof=args.bof, is_root=args.root)
|
#!/usr/bin/python3
import tabula
import sys
import json
import getTable
from tableMap import getTableMap, getTableCols
import pdf2pos
def processBedsTable(filename):
# Get Index & TableMap
index = getTable.getIdFromFilename(filename)
tableName = "beds"
tableMap = getTableMap(index,tableName)
colInt, colFloat = getTableCols(tableName)
# Read pdf into list of DataFrame
#if tableMap.get("template"):
# print(f"Using template.")
# df = tabula.read_pdf_with_template(f"./data/{filename}", f"./templates/{tableMap['template']}")
#else:
# df = tabula.read_pdf(f"./data/{filename}", pages=tableMap["page"])
pos = pdf2pos.pdfBedCoordinates(f"./data/{filename}")
#print(pos)
col2str = {'dtype': str}
df = tabula.read_pdf(f"./data/{filename}", pages=pos["page"], area = pos['area'], pandas_options=col2str)
#print(df)
# Select table in page
conf = getTable.checkSize(df,18,len(tableMap["colNames"]))
# Remove header
getTable.rmHeader(conf)
# Rename columns
conf.columns=tableMap["colNames"]
#print(conf)
#colInt = [ "hospital_cur", "icu_cur" ]
#colFloat = [ "hospital_ratio", "icu_ratio" ]
#Convert to Int
getTable.cols2int(conf,colInt)
#Convert to Float
getTable.cols2float(conf,colFloat)
print(conf)
result = json.loads(conf.to_json(orient="records"))
#print(result)
return result
if __name__ == '__main__':
pdf=processBedsTable(sys.argv[1])
|
def main():
print("Is 2 less than 5?: ", (2 < 5))
print("Is 2 greater than 5?:", (2 > 5))
print("Is 2 equal than 5?: ", (2 == 5))
print("Is 3 greater than or equal to 3?: ", (3 >= 3))
print("Is 5 less than or equal to 1?: ", (5 >= 1))
print("Is True equal to True?: ", (True == True))
print("Is True equal to False?: ", (True == False))
print("What is the value of True or True?: ", (True or True))
print("What is the value of True or False?: ", (True or False))
print("What is the value of False or False?: ", (False or False))
print("What is the value of False and False?: ", (False and False))
print("What is the value of False and True?: ", (False and True))
print("What is the value of True and True?: ", (False and True))
print("What is the value of not True and True?: ", (not True and True))
print("What is the value of not True or True?: ", (not True or True))
print("What is the value of not True or not True?: ", (not True or not True))
print("What is the value of True and 0?: ", (True and 0))
print("What is the value of 1 and 0?: ", (1 and 0))
print("What is the value of 1 or 0?: ", (1 or 0))
main()
|
from abc import ABC, abstractmethod
from math import pi
class Person:
def __init__(self, name):
self.name = name
def introduce(self):
print(f'Hello! I am {self.name}')
class Shape(ABC):
@abstractmethod
def calculate_perimeter(self):
pass
@abstractmethod
def calculate_area(self):
pass
class Circle(Shape):
def __init__(self, radius):
self.__radius = radius
def calculate_area(self):
return pi * self.__radius * self.__radius
def calculate_perimeter(self):
return 2 * pi * self.__radius
class Rectangle(Shape):
def __init__(self, height, width):
self.height = height
self.width = width
def calculate_area(self):
return self.height * self.width
def calculate_perimeter(self):
return 2 * (self.height + self.width)
def print_info(obj):
if isinstance(obj, Person):
obj.introduce()
# elif isinstance(obj, Circle) or isinstance(obj, Rectangle):
elif isinstance(obj, Shape):
print(f'Perimeter: {obj.calculate_perimeter()}')
print(f'Area: {obj.calculate_area()}')
else:
print('I don\'t know!')
print_info(Circle(3))
print_info(Rectangle(2, 3))
print_info(Person('Pesho'))
|
"""
This PyQGIS script iterates through all layers with a name matching "_described"
and:
(1) Zooms to and displays all non-path features
(2) Prompts user to confirm deletion
(3) Deletes non-path features upon confirmation
Selecting "No" will end the layer iteration loop.
WARNING: This modifies local shapefiles. Maintain a separate copy for
restoration.
"""
import re
from qgis.PyQt import QtGui
SUFFIX_CLEANED = '_cleaned'
SUFFIX_DESCRIBED = '_described'
def confirm_delete(layer):
"""Prompts the user to confirm if the displayed non-path features of the
given layer should be deleted."""
fids = [
f.id()
for f in layer.getFeatures()
if not re.match('2020', f.attribute('Name'))
]
layer.selectByIds(fids)
description = ''
for f in layer.getFeatures():
if re.match('2020', f.attribute('Name')):
description = f.attribute('descriptio')
box = QMessageBox()
response = QMessageBox.question(box, 'Delete all non-path features?', description)
if response == QMessageBox.Yes:
layer.startEditing()
layer.deleteFeatures(fids)
layer.commitChanges()
if not re.match(SUFFIX_CLEANED, layer.name()):
layer.setName('{}{}'.format(layer.name(), SUFFIX_CLEANED))
set_layer_visibility(layer, False)
return True
set_layer_visibility(layer, False)
return False
def select_layer(layer):
set_layer_visibility(layer, True)
qgis.utils.iface.layerTreeView().setCurrentLayer(layer)
zoom_to_layer(layer)
def set_layer_visibility(layer, visible):
qgis.core.QgsProject.instance().layerTreeRoot().findLayer(layer).setItemVisibilityChecked(visible)
def zoom_to_layer(layer):
fids = [f.id() for f in layer.getFeatures()]
canvas = iface.mapCanvas()
canvas.zoomToFeatureIds(layer, fids)
for layer in qgis.core.QgsProject.instance().mapLayers().values():
# Prompt the user for non-path feature deletion if the layer has been marked
# with an entered description and has not been marked as cleaned already.
if re.search(SUFFIX_DESCRIBED, layer.name()) and not re.search(SUFFIX_CLEANED, layer.name()):
select_layer(layer)
# Exit the loop if a non-path feature deletion confirmation is rejected.
if not confirm_delete(layer):
break
|
# Generated by Django 2.1 on 2018-12-21 07:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20181219_1816'),
]
operations = [
migrations.CreateModel(
name='CommentEditHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1000)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Comment')),
],
options={
'db_table': 'comment_edits',
},
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 14:54:12 2018
@author: maximov
"""
import torch, os, sys, cv2
import torch.nn as nn
from torch.nn import init
import functools
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as func
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import torch
#dynamic architecture
class RecurrentBlock(nn.Module):
def __init__(self, input_nc, output_nc, downsampling=False, bottleneck=False, upsampling=False):
super(RecurrentBlock, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.downsampling = downsampling
self.upsampling = upsampling
self.bottleneck = bottleneck
self.hidden = None
if self.downsampling:
self.l1 = nn.Sequential(
nn.Conv2d(input_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1)
)
self.l2 = nn.Sequential(
nn.Conv2d(2 * output_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(output_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
)
elif self.upsampling:
self.l1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(2 * input_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(output_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
)
elif self.bottleneck:
self.l1 = nn.Sequential(
nn.Conv2d(input_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1)
)
self.l2 = nn.Sequential(
nn.Conv2d(2 * output_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(output_nc, output_nc, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1),
)
def forward(self, inp):
if self.downsampling:
op1 = self.l1(inp)
op2 = self.l2(torch.cat((op1, self.hidden), dim=1))
self.hidden = op2
return op2
elif self.upsampling:
op1 = self.l1(inp)
return op1
elif self.bottleneck:
op1 = self.l1(inp)
op2 = self.l2(torch.cat((op1, self.hidden), dim=1))
self.hidden = op2
return op2
def reset_hidden(self, inp, dfac):
size = list(inp.size())
size[1] = self.output_nc
size[2] /= dfac
size[3] /= dfac
for s in range(len(size)):
size[s] = int(size[s])
self.hidden_size = size
self.hidden = torch.zeros(*(size)).to('cuda:0')
class RecurrentAE(nn.Module):
def __init__(self, input_nc):
super(RecurrentAE, self).__init__()
self.d1 = RecurrentBlock(input_nc=input_nc, output_nc=32, downsampling=True)
self.d2 = RecurrentBlock(input_nc=32, output_nc=43, downsampling=True)
self.d3 = RecurrentBlock(input_nc=43, output_nc=57, downsampling=True)
self.d4 = RecurrentBlock(input_nc=57, output_nc=76, downsampling=True)
self.d5 = RecurrentBlock(input_nc=76, output_nc=101, downsampling=True)
self.bottleneck = RecurrentBlock(input_nc=101, output_nc=101, bottleneck=True)
self.u5 = RecurrentBlock(input_nc=101, output_nc=76, upsampling=True)
self.u4 = RecurrentBlock(input_nc=76, output_nc=57, upsampling=True)
self.u3 = RecurrentBlock(input_nc=57, output_nc=43, upsampling=True)
self.u2 = RecurrentBlock(input_nc=43, output_nc=32, upsampling=True)
self.u1 = RecurrentBlock(input_nc=32, output_nc=1, upsampling=True)
def set_input(self, inp):
self.inp = inp['A']
def forward(self):
d1 = func.max_pool2d(input=self.d1(self.inp), kernel_size=2)
d2 = func.max_pool2d(input=self.d2(d1), kernel_size=2)
d3 = func.max_pool2d(input=self.d3(d2), kernel_size=2)
d4 = func.max_pool2d(input=self.d4(d3), kernel_size=2)
d5 = func.max_pool2d(input=self.d5(d4), kernel_size=2)
b = self.bottleneck(d5)
u5 = self.u5(torch.cat((b, d5), dim=1))
u4 = self.u4(torch.cat((u5, d4), dim=1))
u3 = self.u3(torch.cat((u4, d3), dim=1))
u2 = self.u2(torch.cat((u3, d2), dim=1))
u1 = self.u1(torch.cat((u2, d1), dim=1))
return u1
def reset_hidden(self):
self.d1.reset_hidden(self.inp, dfac=1)
self.d2.reset_hidden(self.inp, dfac=2)
self.d3.reset_hidden(self.inp, dfac=4)
self.d4.reset_hidden(self.inp, dfac=8)
self.d5.reset_hidden(self.inp, dfac=16)
self.bottleneck.reset_hidden(self.inp, dfac=32)
self.u4.reset_hidden(self.inp, dfac=16)
self.u3.reset_hidden(self.inp, dfac=8)
self.u5.reset_hidden(self.inp, dfac=4)
self.u2.reset_hidden(self.inp, dfac=2)
self.u1.reset_hidden(self.inp, dfac=1)
|
class Console:
def __init__(self, input_path):
self.instructions = []
self._read_program(input_path)
self.accumulator = 0
self.loop = False # switches to True when revisiting an instruction
def _read_program(self, input_path):
lines = [line.strip() for line in open(input_path)]
for line in lines:
action, value = line.split(' ')
self.instructions.append({
'action': action,
'value': int(value),
})
def run(self):
seen = set()
ind = 0
while ind < len(self.instructions):
if ind in seen:
self.loop = True
break
seen.add(ind)
action = self.instructions[ind]['action']
value = self.instructions[ind]['value']
if action == 'nop':
ind += 1
elif action == 'acc':
self.accumulator += value
ind += 1
elif action == 'jmp':
ind += value
else:
raise RuntimeError("Invalid action")
def day08a(input_path):
console = Console(input_path)
console.run()
return console.accumulator
def test08a():
assert 5 == day08a('test_input.txt')
def day08b(input_path):
console = Console(input_path)
jmp_inds = [ind for ind, inst in enumerate(console.instructions) if inst['action'] == 'jmp']
for ind in jmp_inds:
console = Console(input_path)
console.instructions[ind]['action'] = 'nop'
console.run()
if not console.loop:
return console.accumulator
nop_inds = [ind for ind, inst in enumerate(console.instructions) if inst['action'] == 'nop']
for ind in nop_inds:
console = Console(input_path)
console.instructions[ind]['action'] = 'jmp'
console.run()
if not console.loop:
return console.accumulator
print('No solution found')
def test08b():
assert 8 == day08b('test_input.txt')
if __name__ == '__main__':
test08a()
print('Day 08a:', day08a('day08_input.txt'))
test08b()
print('Day 08b:', day08b('day08_input.txt'))
|
class Solution:
def ambiguousCoordinates(self, s: str) -> List[str]:
def make(frag):
N = len(frag)
for d in range(1, N + 1):
left, right = frag[:d], frag[d:]
if ((not left.startswith('0') or left == '0') and (not right.endswith('0'))):
yield left + ('.' if d != N else '') + right
S = s[1: -1]
return ["({}, {})".format(*cand) for i in range(1, len(S)) for cand in itertools.product(make(S[:i]), make(S[i:]))]
|
#!/usr/bin/env python3
# coding=utf-8
import os
import subprocess
import requests
import yaml
import json
import sys
from pathlib import Path
from loguru import logger
from yuque import YuQue
from lake2md import lake_to_md
from dotenv import dotenv_values
user_config = dotenv_values(".env")
class Config:
def __init__(self, prefix):
try:
pwd = Path(__file__).absolute()
file_path = Path(pwd).parent / 'config.json'
logger.info(file_path)
with open(file_path, 'r') as f:
config = json.load(f)
if prefix in config.keys():
self.token = config[prefix]['token']
# self.namespace = config[prefix]['token']
self.basedir = config[prefix]['basedir']
self.desdir = Path(self.basedir, config[prefix]['desdir'])
self.workdir = Path(self.basedir, config[prefix]['workdir'])
self.cmd = config[prefix]['cmd']
self.conf = config[prefix]['hugo']
else:
logger.debug("配置不正确")
except OSError as e:
logger.exception(e)
def deploy(self):
if self.cmd != '':
os.chdir(self.workdir)
return subprocess.Popen(self.cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")
else:
logger.debug("命令为空")
def init_doc(prefix, namespace):
config = Config(prefix)
yq = YuQue(config.token)
trees = yq.get_info(namespace)
logger.debug("文档树的内容:{}", trees)
# 把目录写到本地,以便于更新和删除
tree_path = Path(config.desdir, prefix + '.json')
with open(tree_path, 'w+') as f:
json.dump(trees, f, indent=6)
# 遍历目录树
for tree in trees:
path = Path(config.desdir, tree['path'])
if Path(path).exists():
pass
else:
Path(path).mkdir(parents=True, exist_ok=True)
doc = yq.get_doc(namespace, tree['slug'])
if doc['format'] == 'lake':
file = Path(config.desdir, Path(tree['path']), tree['title'] + '.md')
with open(file, 'w') as f:
md_doc = lake_to_md(doc['body'], doc['title'])
f.writelines(md_doc)
logger.info("{}---已经下载", doc['title'])
else:
logger.info("{doc_title}的格式为{doc_format}---格式不支持跳过", doc_title=doc['title'], doc_format=doc['format'])
def init_config(token, prefix):
pwd = Path(__file__).absolute()
file_path = Path(pwd).parent / 'config.json'
with open(file_path, 'r') as f:
config = json.load(f)
basedir = user_config.get('BASEDIR', Path.home())
desdir = Path(basedir, prefix, user_config.get('DESDIR', 'content'))
workdir = Path(basedir, prefix)
if workdir.exists():
os.chdir(workdir)
else:
workdir.mkdir(parents=True, exist_ok=True)
os.chdir(workdir)
subprocess.call("hugo new site .",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")
desdir.mkdir(parents=True, exist_ok=True)
conf = {
prefix: {
"token": token,
"basedir": str(basedir),
"desdir": str(desdir),
"workdir": str(workdir),
"cmd": "hugo",
"hugo": {
"html": True,
"shortcode": False
}
}
}
if prefix not in config:
config.update(conf)
logger.debug(config)
with open(file_path, 'w') as f:
json.dump(config, f, indent = 6)
logger.debug("{}已经添加到配置", prefix)
else:
logger.debug("{}已经存在了", prefix)
def publish_doc(slug, doc, title, prefix, namespace):
config = Config(prefix)
# 获取目录列表
yq = YuQue(config.token)
trees = yq.get_info(namespace)
tree_path = Path(config.desdir, prefix + '.json')
with open(tree_path, 'w+') as f:
json.dump(trees, f, indent=6)
for tree in trees:
if tree['slug'] == slug:
path = Path(config.desdir, tree['path'])
if Path(path).exists():
pass
else:
Path(path).mkdir(parents=True)
doc = yq.get_doc(namespace, tree['slug'])
file = Path(config.desdir, Path(tree['path']), tree['title'] + '.md')
with open(file, 'w') as f:
# md_doc = hugo_lake_to_md(doc, tree['title'], html=config.html)
md_doc = lake_to_md(doc['body'], tree['title'])
f.writelines(md_doc)
title = tree['title']
else:
pass
config.deploy()
logger.info("知识库{}发布了一遍名为<<{}>>的文章并已部署!", namespace, title)
def delete_doc(slug, prefix, namespace):
config = Config(prefix)
tree_path = Path(config.desdir, prefix + '.json')
with open(tree_path, 'r') as f:
trees = json.load(f)
for tree in trees:
if tree['slug'] == slug:
path = Path(config.desdir, tree['path'], tree['title'] + '.md')
Path(path).unlink()
title = tree['title']
config.deploy()
logger.info("知识库{}删除了一篇名为<<{}>>的文章!", namespace, title)
def update_doc(slug, doc, title, prefix, namespace):
config = Config(prefix)
yq = YuQue(config.token)
tree_path = Path(config.desdir, prefix + '.json')
with open(tree_path, 'r') as f:
trees = json.load(f)
for tree in trees:
if tree['slug'] == slug:
path = Path(config.desdir, tree['path'])
if Path(path).exists():
pass
else:
Path(path).mkdir(parents=True)
logger.debug("文档已被修改或移动,直接覆盖")
file = Path(path, tree['title'] + '.md')
doc = yq.get_doc(namespace, tree['slug'])
with open(file, 'w') as f:
md_doc = lake_to_md(doc['body'], tree['title'])
f.writelines(md_doc)
title = tree['title']
else:
pass
config.deploy()
logger.info("知识库{}更新了一篇<<{}>>的文章!", namespace, title)
if __name__ == '__main__':
# init_doc('zjan-bwcmnq')
init_config('cccc', 'abcd')
|
import base64
from rdflib import URIRef
def http_formatter(namespace, value):
""" Formats a namespace and ending value into a full RDF URI format with NO
'<' and '>' encapsulation
args:
namespace: RdfNamespace or tuple in the format of (prefix, uri,)
value: end value to attach to the namespace
"""
return "%s%s" % (namespace[1], value)
def uri_formatter(namespace, value):
""" Formats a namespace and ending value into a full RDF URI format with
'<' and '>' encapsulation
args:
namespace: RdfNamespace or tuple in the format of (prefix, uri,)
value: end value to attach to the namespace
"""
return "<%s%s>" % (namespace[1], value)
def ttl_formatter(namespace, value):
""" Formats an RdfNamespace in the RDF turtle format if able otherwise
returns the full RDF URI format
args:
namespace: RdfNamespace of tuple in the format of (prefix, uri)
value: end value to attach to the RdfNamespace
"""
# if the namespce prefix exists format in ttl format
if namespace[0]:
return "%s:%s" % (namespace[0], value)
else:
# else return in full Uri format
return uri_formatter(namespace, value)
def pyuri_formatter(namespace, value):
""" Formats a namespace and ending value into a python friendly format
args:
namespace: RdfNamespace or tuple in the format of (prefix, uri,)
value: end value to attach to the namespace
"""
if namespace[0]:
return "%s_%s" %(namespace[0], value)
else:
return "pyuri_%s_%s" % (base64.b64encode(bytes(namespace[1],
"utf-8")).decode(),
value)
def rdflib_formatter(namespace, value):
""" formats the URI as an 'rdflib' URIRef
args:
namespace: RdfNamespace or tuple in the format of (prefix, uri,)
value: end value to attach to the namespace
"""
return URIRef(http_formatter(namespace, value))
def xmletree_formatter(namespace, value):
""" formats the URI as an 'rdflib' URIRef
args:
namespace: RdfNamespace or tuple in the format of (prefix, uri,)
value: end value to attach to the namespace
"""
return "{%s}%s" % (namespace[1], value)
|
#
# This file is part of PKPDApp (https://github.com/pkpdapp-team/pkpdapp) which
# is released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
"""
Django settings for pkpdapp project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/.
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/.
"""
import os
import dj_database_url
# Set BASE_DIR to two directories up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
SECRET_KEY = os.environ.get("SECRET_KEY", default='foo')
DEBUG = int(os.environ.get("DEBUG", default=0))
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'pkpdapp.herokuapp.com']
# Application definition - to use any of those you need to run `manage.py
# migrate` first
INSTALLED_APPS = [
# standard Django apps
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# external apps
'dpd_static_support',
'django_extensions',
'djoser',
'rest_framework',
'rest_framework.authtoken',
# internal apps
'pkpdapp',
]
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': 'reset-password/{uid}/{token}',
'ACTIVATION_URL': 'activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': True,
'SEND_CONFIRMATION_EMAIL': True,
'PASSWORD_CHANGED_EMAIL_CONFIRMATION': True,
'SERIALIZERS': {},
'PERMISSIONS': {
'activation': ['rest_framework.permissions.AllowAny'],
'password_reset': ['rest_framework.permissions.AllowAny'],
'password_reset_confirm': ['rest_framework.permissions.AllowAny'],
'set_password': ['djoser.permissions.CurrentUserOrAdmin'],
'username_reset': ['rest_framework.permissions.AllowAny'],
'username_reset_confirm': ['rest_framework.permissions.AllowAny'],
'set_username': ['djoser.permissions.CurrentUserOrAdmin'],
'user_create': ['rest_framework.permissions.AllowAny'],
'user_delete': ['djoser.permissions.CurrentUserOrAdmin'],
'user': ['djoser.permissions.CurrentUserOrAdmin'],
'user_list': ['djoser.permissions.CurrentUserOrAdmin'],
'token_create': ['rest_framework.permissions.AllowAny'],
'token_destroy': ['rest_framework.permissions.IsAuthenticated'],
},
}
# django rest framework library
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MARKDOWNIFY_MARKDOWN_EXTENSIONS = [
'mdx_math',
]
MARKDOWNIFY_WHITELIST_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'em',
'i',
'li',
'ol',
'p',
'strong',
'ul',
'h',
'script',
]
MARKDOWNIFY_WHITELIST_ATTRS = [
'href',
'src',
'alt',
'type',
]
MARKDOWNIFY_WHITELIST_STYLES = [
'color',
'font-weight',
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGINS = [
"http://127.0.0.1:3000",
]
ROOT_URLCONF = 'pkpdapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'pkpdapp', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
X_FRAME_OPTIONS = 'SAMEORIGIN'
WSGI_APPLICATION = 'pkpdapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASE_URL = os.environ.get('DATABASE_URL')
db_from_env = dj_database_url.config(
default=DATABASE_URL, conn_max_age=500, ssl_require=False
)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Media files (such as data sets and model files)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
]
# Staticfiles finders for locating dash app assets and related files
STATICFILES_FINDERS = [
# Django default finders
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Forever cachable files and compression support
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_HOST = os.environ.get("EMAIL_HOST", default=None)
if EMAIL_HOST is None:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_PORT = os.environ.get("EMAIL_PORT", default='foo')
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", default='foo')
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", default='foo')
DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL",
default='webmaster@localhost')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class DataPoint(AbstractModel):
"""监控数据点
"""
def __init__(self):
"""
:param Dimensions: 实例对象维度组合
:type Dimensions: list of Dimension
:param Timestamps: 时间戳数组,表示那些时间点有数据,缺失的时间戳,没有数据点,可以理解为掉点了
:type Timestamps: list of float
:param Values: 监控值数组,该数组和Timestamps一一对应
:type Values: list of float
"""
self.Dimensions = None
self.Timestamps = None
self.Values = None
def _deserialize(self, params):
if params.get("Dimensions") is not None:
self.Dimensions = []
for item in params.get("Dimensions"):
obj = Dimension()
obj._deserialize(item)
self.Dimensions.append(obj)
self.Timestamps = params.get("Timestamps")
self.Values = params.get("Values")
class DescribeBaseMetricsRequest(AbstractModel):
"""DescribeBaseMetrics请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 业务命名空间
:type Namespace: str
:param MetricName: 指标名
:type MetricName: str
"""
self.Namespace = None
self.MetricName = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
self.MetricName = params.get("MetricName")
class DescribeBaseMetricsResponse(AbstractModel):
"""DescribeBaseMetrics返回参数结构体
"""
def __init__(self):
"""
:param MetricSet: 查询得到的指标描述列表
:type MetricSet: list of MetricSet
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.MetricSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("MetricSet") is not None:
self.MetricSet = []
for item in params.get("MetricSet"):
obj = MetricSet()
obj._deserialize(item)
self.MetricSet.append(obj)
self.RequestId = params.get("RequestId")
class Dimension(AbstractModel):
"""实例对象的维度组合
"""
def __init__(self):
"""
:param Name: 实例维度名称
:type Name: str
:param Value: 实例维度值
:type Value: str
"""
self.Name = None
self.Value = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Value = params.get("Value")
class DimensionsDesc(AbstractModel):
"""维度信息
"""
def __init__(self):
"""
:param Dimensions: 维度名数组
:type Dimensions: list of str
"""
self.Dimensions = None
def _deserialize(self, params):
self.Dimensions = params.get("Dimensions")
class GetMonitorDataRequest(AbstractModel):
"""GetMonitorData请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 命名空间,每个云产品会有一个命名空间
:type Namespace: str
:param MetricName: 指标名称,各个云产品的详细指标说明请参阅各个产品[监控接口](https://cloud.tencent.com/document/product/248/30384)文档
:type MetricName: str
:param Instances: 实例对象的维度组合
:type Instances: list of Instance
:param Period: 监控统计周期。默认为取值为300,单位为s
:type Period: int
:param StartTime: 起始时间,如2018-09-22T19:51:23+08:00
:type StartTime: str
:param EndTime: 结束时间,默认为当前时间。 EndTime不能小于EtartTime
:type EndTime: str
"""
self.Namespace = None
self.MetricName = None
self.Instances = None
self.Period = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
self.MetricName = params.get("MetricName")
if params.get("Instances") is not None:
self.Instances = []
for item in params.get("Instances"):
obj = Instance()
obj._deserialize(item)
self.Instances.append(obj)
self.Period = params.get("Period")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
class GetMonitorDataResponse(AbstractModel):
"""GetMonitorData返回参数结构体
"""
def __init__(self):
"""
:param Period: 统计周期
:type Period: int
:param MetricName: 指标名
:type MetricName: str
:param DataPoints: 数据点数组
:type DataPoints: list of DataPoint
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.MetricName = None
self.DataPoints = None
self.StartTime = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.MetricName = params.get("MetricName")
if params.get("DataPoints") is not None:
self.DataPoints = []
for item in params.get("DataPoints"):
obj = DataPoint()
obj._deserialize(item)
self.DataPoints.append(obj)
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class Instance(AbstractModel):
"""实例维度组合数组
"""
def __init__(self):
"""
:param Dimensions: 实例的维度组合
:type Dimensions: list of Dimension
"""
self.Dimensions = None
def _deserialize(self, params):
if params.get("Dimensions") is not None:
self.Dimensions = []
for item in params.get("Dimensions"):
obj = Dimension()
obj._deserialize(item)
self.Dimensions.append(obj)
class MetricObjectMeaning(AbstractModel):
"""指标数据的解释
"""
def __init__(self):
"""
:param En: 指标英文解释
:type En: str
:param Zh: 指标中文解释
:type Zh: str
"""
self.En = None
self.Zh = None
def _deserialize(self, params):
self.En = params.get("En")
self.Zh = params.get("Zh")
class MetricSet(AbstractModel):
"""对业务指标的单位及支持统计周期的描述
"""
def __init__(self):
"""
:param Namespace: 命名空间,每个云产品会有一个命名空间
:type Namespace: str
:param MetricName: 指标名称
:type MetricName: str
:param Unit: 指标使用的单位
:type Unit: str
:param UnitCname: 指标使用的单位
:type UnitCname: str
:param Period: 指标支持的统计周期,单位是秒,如60、300
:type Period: list of int
:param Periods: 统计周期内指标方式
:type Periods: list of PeriodsSt
:param Meaning: 统计指标含义解释
:type Meaning: :class:`tencentcloud.monitor.v20180724.models.MetricObjectMeaning`
:param Dimensions: 维度描述信息
:type Dimensions: list of DimensionsDesc
"""
self.Namespace = None
self.MetricName = None
self.Unit = None
self.UnitCname = None
self.Period = None
self.Periods = None
self.Meaning = None
self.Dimensions = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
self.MetricName = params.get("MetricName")
self.Unit = params.get("Unit")
self.UnitCname = params.get("UnitCname")
self.Period = params.get("Period")
if params.get("Periods") is not None:
self.Periods = []
for item in params.get("Periods"):
obj = PeriodsSt()
obj._deserialize(item)
self.Periods.append(obj)
if params.get("Meaning") is not None:
self.Meaning = MetricObjectMeaning()
self.Meaning._deserialize(params.get("Meaning"))
if params.get("Dimensions") is not None:
self.Dimensions = []
for item in params.get("Dimensions"):
obj = DimensionsDesc()
obj._deserialize(item)
self.Dimensions.append(obj)
class PeriodsSt(AbstractModel):
"""周期内的统计方式
"""
def __init__(self):
"""
:param Period: 周期
:type Period: str
:param StatType: 统计方式
:type StatType: list of str
"""
self.Period = None
self.StatType = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.StatType = params.get("StatType")
|
import time
class log:
"""
This module is used to track the progress of events
and write it into a log file.
"""
def __init__(self,filepath,init_message):
self.message = ''
self.filepath = filepath
self.progress = {'task':[init_message],'time':[time.process_time()]}
print(self.progress['task'][-1]
+ ': {:.4f}'.format(self.progress['time'][-1]))
def time_event(self,message):
self.progress['task'].append(message)
self.progress['time'].append(time.process_time())
print(self.progress['task'][-1]
+ ': {:.4f}'.format(self.progress['time'][-1]
- self.progress['time'][-2]))
def record(self,message):
self.message = message
def save(self):
progress = self.progress
with open(self.filepath,'w') as logfile:
for idx in range(1,len(progress['task'])):
logfile.write(progress['task'][idx]
+ ': {:.4f}\n'.format(progress['time'][idx]
- progress['time'][idx - 1]))
logfile.write(self.message)
|
# coding:utf-8
__author__ = 'timmyliang'
__email__ = '820472580@qq.com'
__date__ = '2020-03-24 13:44:48'
"""
"""
import sys
import inspect
# NOTE https://stackoverflow.com/questions/4214936/
# intercept a function and retrieve the modifed values
def get_modified_values(target):
def wrapper(*args, **kwargs):
# get the applied args
kargs = getcallargs(target, *args, **kwargs)
print kargs
# # get the source code
# src = inspect.getsource(target)
# lines = src.split('\n')
# # oh noes string patching of the function
# unindent = len(lines[0]) - len(lines[0].lstrip())
# indent = lines[0][:len(lines[0]) - len(lines[0].lstrip())]
# lines[0] = ''
# lines[1] = indent + 'def _temp(_args, ' + lines[1].split('(')[1]
# setter = []
# for k in kargs.keys():
# setter.append('_args["%s"] = %s' % (k, k))
# i = 0
# while i < len(lines):
# indent = lines[i][:len(lines[i]) - len(lines[i].lstrip())]
# if lines[i].find('return ') != -1 or lines[i].find('return\n') != -1:
# for e in setter:
# lines.insert(i, indent + e)
# i += len(setter)
# elif i == len(lines) - 2:
# for e in setter:
# lines.insert(i + 1, indent + e)
# break
# i += 1
# for i in range(0, len(lines)):
# lines[i] = lines[i][unindent:]
# data = '\n'.join(lines) + "\n"
# # setup variables
# frame = inspect.currentframe()
# loc = inspect.getouterframes(frame)[1][0].f_locals
# glob = inspect.getouterframes(frame)[1][0].f_globals
# loc['_temp'] = None
# # compile patched function and call it
# func = compile(data, '<witchstuff>', 'exec')
# eval(func, glob, loc)
# loc['_temp'](kargs, *args, **kwargs)
# # there you go....
# print kargs
# # >> {'a': 10, 'b': 1223, 'f': 'Hello World'}
return wrapper
# from python 2.7 inspect module
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = inspect.getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if inspect.ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
def main():
@get_modified_values
def foo(a, f, b):
print a, f, b
a = 10
if a == 2:
return a
f = 'Hello World'
b = 1223
e = 1
c = 2
foo(e, 1000, b = c)
main()
|
class Solution:
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
ransomNote = list(ransomNote)
magazine = list(magazine)
r_len = len(ransomNote)
m_len = len(magazine)
if r_len > m_len:
return False
if r_len == 0:
return True
i = 0
k = 0
while i < r_len and k < m_len:
if ransomNote[i] == magazine[k]:
magazine.pop(k)
m_len -= 1
k = 0
i += 1
else:
k += 1
return i == r_len
if __name__ == '__main__':
solution = Solution()
print(solution.canConstruct("a", "b"))
print(solution.canConstruct("aa", "ab"))
print(solution.canConstruct("aa", "aa"))
print(solution.canConstruct("bjaajgea", \
"affhiiicabhbdchbidghccijjbfjfhjeddgggbajhidhjchiedhdibgeaecffbbbefiabjdhggihccec"))
else:
pass
|
# -*- coding: utf-8 -*-
"""Test C-loop and BLAS ufuncs
"""
import hypothesis as hy
import numpy as np
import numpy_linalg.gufuncs._gufuncs_cloop as gfc
import numpy_linalg.gufuncs._gufuncs_blas as gfb
import numpy_linalg.testing.unittest_numpy as utn
import numpy_linalg.testing.hypothesis_numpy as hn
from numpy_linalg.testing import main, TestCaseNumpy
# =============================================================================
# pylint: disable=missing-function-docstring
errstate = np.errstate(invalid='raise')
hy.settings.register_profile("slow",
suppress_health_check=(hy.HealthCheck.too_slow,))
hy.settings.load_profile('slow')
np.set_printoptions(precision=2, threshold=10, edgeitems=2)
# =============================================================================
__all__ = ['TestBlas', 'TestBlasVectors', 'TestCloop']
# =============================================================================
# =============================================================================
# Test BLAS ufuncs
# =============================================================================
class TestBlas(TestCaseNumpy):
"""Testing norm, matmul and rmatmul"""
def setUp(self):
super().setUp()
self.gfm = gfb
@hy.given(hn.matrices_b)
def test_norm_returns_expected_shapes(self, m_bs):
v_s = m_bs[(0,) * (m_bs.ndim - 1)]
tall = m_bs.shape
self.assertArrayShape(self.gfm.norm(m_bs), tall[:-1])
self.assertArrayShape(self.gfm.norm(m_bs, axis=-2), utn.drop(tall))
self.assertArrayShape(self.gfm.norm(v_s, keepdims=True), (1,))
self.assertArrayShape(self.gfm.norm(v_s), ())
@hy.given(hn.broadcastable('(a,b)', None))
def test_norm_returns_expected_values(self, m_bs: np.ndarray):
nout = np.empty_like(m_bs[..., 0].real)
nrms = self.gfm.norm(m_bs, out=nout)
norms = np.sqrt((np.abs(m_bs)**2).sum(-1))
self.assertArrayAllClose(nrms, norms)
self.assertArrayAllClose(nout, norms)
@hy.given(hn.broadcastable('(a,b),(b,c)', 'd'))
def test_matmul_returns_expected_shapes(self, arrays):
m_sb, m_bs = arrays
hy.assume(hn.nonsquare(m_sb))
hy.assume(hn.nonsquare(m_bs))
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_sb, m_bs)
self.assertArrayShape(self.gfm.matmul(m_sb, m_bs), expect)
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.matmul(m_bs, m_bs)
with self.assertRaisesRegex(*utn.broadcast_err):
self.gfm.matmul(*utn.make_bad_broadcast(m_sb, m_bs))
@hy.given(hn.broadcastable('(a,b),(b,c)', None))
def test_matmul_returns_expected_values(self, arrays):
m_sb, m_bs = arrays
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_sb, m_bs)
pout = np.empty(expect, m_sb.dtype)
pres = self.gfm.matmul(m_sb, m_bs, out=pout)
prod = np.matmul(m_sb, m_bs)
self.assertArrayAllClose(pres, prod)
self.assertArrayAllClose(pout, prod)
@hy.given(hn.broadcastable('(a,b),(b,c)', 'd'))
def test_rmatmul_returns_expected_shapes(self, arrays):
m_sb, m_bs = arrays
hy.assume(hn.nonsquare(m_sb))
hy.assume(hn.nonsquare(m_bs))
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_sb, m_bs)
self.assertArrayShape(self.gfm.rmatmul(m_bs, m_sb), expect)
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.rmatmul(m_bs, m_bs)
with self.assertRaisesRegex(*utn.broadcast_err):
self.gfm.rmatmul(*utn.make_bad_broadcast(m_bs, m_sb))
@hy.given(hn.broadcastable('(a,b),(b,c)', None))
def test_rmatmul_returns_expected_values(self, arrays):
m_sb, m_bs = arrays
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_sb, m_bs)
pout = np.empty(expect, m_sb.dtype)
pres = self.gfm.rmatmul(m_bs, m_sb, out=pout)
prod = np.matmul(m_sb, m_bs)
self.assertArrayAllClose(pres, prod)
self.assertArrayAllClose(pout, prod)
class TestBlasVectors(TestCaseNumpy):
"""Testing matmul and rmatmul"""
def setUp(self):
super().setUp()
self.gfm = gfb
@hy.given(hn.broadcastable('(a,a),(a,b),(b,b),(b,a),(a),(b)', 'd'))
def test_matmul_flexible_signature_with_vectors(self, arrays):
m_ss, m_sb, m_bb, m_bs = arrays[:-2]
v_s, v_b = hn.core_only(*arrays[-2:], dims=1)
smol, wide, big, tall = [arr.shape for arr in arrays[:-2]]
hy.assume(hn.nonsquare(m_sb))
off_b, y_one = utn.make_off_by_one(m_sb, m_sb)
# with self.subTest('matrix-vector'):
self.assertArrayShape(self.gfm.matmul(m_sb, v_b), wide[:-1])
self.assertArrayShape(self.gfm.matmul(m_bs, v_s), tall[:-1])
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.matmul(m_sb, v_s)
with self.assertRaisesRegex(*utn.core_dim_err):
# This would succeed/broadcast error if interpreted as Mv:
self.gfm.matmul(m_sb[off_b], m_sb[y_one])
# with self.subTest('vector-matrix'):
self.assertArrayShape(self.gfm.matmul(v_s, m_ss), smol[:-1])
self.assertArrayShape(self.gfm.matmul(v_b, m_bb), big[:-1])
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.matmul(v_b, m_sb)
with self.assertRaisesRegex(*utn.core_dim_err):
# This would succeed/broadcast error if interpreted as vM:
self.gfm.matmul(m_sb[y_one], m_sb[off_b])
# with self.subTest('vector-vector'):
self.assertArrayShape(self.gfm.matmul(v_s, v_s), ())
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.matmul(v_s, v_b)
@hy.given(hn.broadcastable('(a,a),(a,b),(b,b),(b,a),(a),(b)', 'd'))
def test_rmatmul_flexible_signature_with_vectors(self, arrays):
m_ss, m_sb, m_bb, m_bs = arrays[:-2]
v_s, v_b = hn.core_only(*arrays[-2:], dims=1)
smol, wide, big, tall = [arr.shape for arr in arrays[:-2]]
hy.assume(hn.nonsquare(m_sb))
off_b, y_one = utn.make_off_by_one(m_sb, m_sb)
# with self.subTest('matrix-vector'):
self.assertArrayShape(self.gfm.rmatmul(v_s, m_bs), tall[:-1])
self.assertArrayShape(self.gfm.rmatmul(v_b, m_sb), wide[:-1])
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.rmatmul(v_b, m_bs)
with self.assertRaisesRegex(*utn.core_dim_err):
# This would succeed/broadcast error if interpreted as Mv:
self.gfm.rmatmul(m_sb[y_one], m_sb[off_b])
# w\ith self.subTest('vector-matrix'):
self.assertArrayShape(self.gfm.rmatmul(m_ss, v_s), smol[:-1])
self.assertArrayShape(self.gfm.rmatmul(m_bb, v_b), big[:-1])
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.rmatmul(m_bs, v_s)
with self.assertRaisesRegex(*utn.core_dim_err):
# This would succeed/broadcast error if interpreted as vM:
self.gfm.rmatmul(m_sb[off_b], m_sb[y_one])
# with self.subTest('vector-vector'):
self.assertArrayShape(self.gfm.rmatmul(v_b, v_b), ())
with self.assertRaisesRegex(*utn.core_dim_err):
self.gfm.rmatmul(v_b, v_s)
# =============================================================================
# Test cloop ufuncs
# =============================================================================
class TestCloop(TestBlas):
"""Testing norm, matmul, rmatmul and rtrue_tdivide
"""
def setUp(self):
super().setUp()
self.gfm = gfc
@hy.given(hn.broadcastable('(a,b),(a,b)', 'd'))
def test_rtrue_divide_returns_expected_shapes(self, arrays):
a_bs, m_bs = arrays
a_bs[np.abs(a_bs) < 1e-5] += 1.
expect = utn.array_return_shape('(),()->()', a_bs, m_bs)
self.assertArrayShape(self.gfm.rtrue_divide(a_bs, m_bs), expect)
with self.assertRaisesRegex(*utn.broadcast_err):
self.gfm.rtrue_divide(*utn.make_bad_broadcast(m_bs, a_bs))
@hy.given(hn.broadcastable('(a,b),(a,b)', None))
def test_rtrue_divide_returns_expected_values(self, arrays):
a_bs, m_bs = arrays
expect = utn.array_return_shape('(),()->()', a_bs, m_bs)
zout = np.empty(expect, m_bs.dtype)
a_bs[np.abs(a_bs) < 1e-5] += 1.
zres = self.gfm.rtrue_divide(a_bs, m_bs, out=zout)
zzz = m_bs / a_bs
self.assertArrayAllClose(zres, zzz)
self.assertArrayAllClose(zout, zzz)
# =============================================================================
if __name__ == '__main__':
main(verbosity=2)
|
from read_inputs import read_file
fileName = "d7_inputs.txt"
# fileName = "d7_test.txt"
# fileName = "real_inputs7.txt"
counter = 0
max_num = 0
min_num = 0
import numpy as np
def diff(test_point, data):
#determine total diff from incoming data_list and test_point
# diff(2, [[0, 1, 1, 2, 2, 2, 4, 7, 14, 16]]) = 37
difference = 0
# print("data: ", data)
for x in data:
difference += abs(x-test_point)
# print(f"differnce between {x} and {test_point}: {difference}")
return difference
def decide_next_move(x_pos_testing, data):
x_pos_testing = int(x_pos_testing)
global counter, max_num, min_num
if counter > 50:
return 0
counter += 1
#assume all values coming in are good
print(f"Testing value: {x_pos_testing}")
x_left = x_pos_testing - 1
x_right = x_pos_testing + 1
x_center = x_pos_testing
left_diff = diff(x_left, data)
right_diff = diff(x_right, data)
center_diff = diff(x_center, data)
if left_diff > center_diff and right_diff > center_diff:
print("found minima at:", x_pos_testing)
return x_pos_testing
elif left_diff > right_diff:
# print("negative slope:", left_diff - right_diff)
return decide_next_move(min([int(x_center * 1.5), max_num]), data)
elif left_diff < right_diff:
# print("positive slope:", left_diff - right_diff)
return decide_next_move(max([min_num, int(x_center / 2)]), data)
def classify_slope(index, data):
#returns if point is part of negative or positive slope, or if point is min
x_left = data[x_pos_testing - 1]
x_right = x_pos_testing + 1
x_center = x_pos_testing
slope = ()
left_diff = diff(x_left, data)
right_diff = diff(x_right, data)
center_diff = diff(x_center, data)
def decide_next_state(step_size, ind_to_check, data):
ind_to_check += step_size
decision = classify_slope(ind_to_check, data)
def main():
global max_num, min_num
numbers = read_file(fileName) # returns list of contents of file as strings
numbers = numbers[0]
numbers = np.asarray(list(map(int, numbers.split(","))))
numbers.sort()
print(numbers)
max_num = max(numbers)
min_num = min(numbers)
step_size = max_num / 2
e = []
for x in range(max_num+1):
total = diff(x, numbers)
print(f"total diff for {x}: {total}")
e.append(total)
print("min (trivial):", min(e))
print("\n\n\n")
tracker = np.zeros(max_num + 1)
for x in range(max_num + 1): #propogate inital state
tracker[x] = len(numbers[numbers==x])
# print(decide_next_move(h_pos_test, numbers)) #start recursion
total = 0
for value, x in enumerate(tracker):
total += (value+1) * x
print(total)
# for x in numbers:
print(tracker)
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import csv
import copy
import xlrd
import operator
from functools import reduce
from django.db.models import Q
from django.utils.timezone import datetime
from idcops.lib.utils import shared_queryset
from idcops.lib.tasks import device_post_save
from idcops.models import (
Option, Rack, Client, Unit, Pdu, User, Online, Device
)
CreatorId = 1
def import_online_for_csv(filename, onidc_id):
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
field_describe = [d for d in next(csvreader) if d]
fieldnames = [f for f in next(csvreader) if f]
rows = csv.DictReader(csvfile, fieldnames=fieldnames)
handler_error = []
handler_warning = []
handler_success = []
index = 0
for index, row in enumerate(rows, 1):
if index > 500:
# 每次只处理500条数据
msg = "一次最多导入500条数据"
handler_error.append(msg)
break
raw = copy.copy(row)
try:
_created = '-'.join(re.split(r'-|/', row.get('created')))
created = datetime.strptime(_created, '%Y-%m-%d')
except:
msg = "第{}行:日期格式不正确,跳过处理本行".format(index)
handler_error.append(msg)
continue
raw.update(**dict(created=created))
verify = Device.objects.filter(name=raw.get('name'))
if verify.exists():
msg = "第{}行:{}设备已存在".format(index, raw.get('name'))
handler_error.append(msg)
continue
style = get_or_create_style(raw.get('style'), onidc_id)
creator = get_creator(raw.get('creator'))
# 获取机柜信息
rack, err = get_rack(raw.get('rack'), onidc_id)
if not rack:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
# 获取客户信息
client, err = get_or_create_client(raw.get('client'), onidc_id)
if not client:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
# 实例化在线设备
instance = Online(
created=created, style=style, creator=creator,
rack=rack, client=client, name=raw.get('name'),
sn=raw.get('sn'), ipaddr=raw.get('ipaddr'),
model=raw.get('model'), onidc_id=onidc_id
)
instance.save()
# 保存U位
units, err = clean_units(raw.get('units'), rack.pk)
if units:
for u in units:
instance.units.add(u)
units.update(actived=False)
instance.save()
else:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
# U位不对,删除本实例
instance.delete()
continue
handler_success.append(instance.name)
# 保存PDU
pdus, err = clean_pdus(raw.get('pdus'), rack.pk)
if pdus:
for p in pdus:
instance.pdus.add(p)
pdus.update(actived=False)
instance.save()
else:
msg = "第{}行:{}".format(index, err)
handler_warning.append(msg)
# 保存TAGS
tags = clean_tags(raw.get('tags'), onidc_id, creator.pk)
if tags:
for t in tags:
instance.tags.add(t)
instance.save()
device_post_save(instance.pk, True)
return handler_error, handler_warning, handler_success, index
def import_online(path, onidc_id):
fileds = [
'name', 'creator', 'rack', 'client', 'created', 'onidc',
'sn', 'model', 'ipaddr', 'style', 'units', 'pdus', 'tags'
]
workbook = xlrd.open_workbook(path)
sheets = workbook.sheet_names()
worksheet = workbook.sheet_by_name(sheets[0])
# 设置导入错误日志记录到一个字典中
handler_error = []
handler_warning = []
handler_success = []
index = 0
headers = None
for index, row in enumerate(worksheet.get_rows(), 1):
# header = index
if index == 1:
# 跳过表头
continue
if index == 2:
# 获取字段名称
headers = [h.value for h in row]
continue
if index > 502:
# 每次只处理500条数据
msg = "一次最多导入500条数据"
handler_error.append(msg)
break
data = dict(zip(headers, [k.value for k in row]))
raw = {k: str(data.get(k)) for k in fileds}
_created = '-'.join(re.split(r'-|/', raw.get('created')))
created = datetime.strptime(_created, '%Y-%m-%d')
raw.update(**dict(created=created))
verify = Device.objects.filter(name=raw.get('name'))
if verify.exists():
msg = "第{}行:{}设备已存在".format(index, raw.get('name'))
handler_error.append(msg)
continue
else:
style = get_or_create_style(raw.get('style'), onidc_id)
creator = get_creator(raw.get('creator'))
# 获取机柜信息
rack, err = get_rack(raw.get('rack'), onidc_id)
if not rack:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
# 获取客户信息
client, err = get_or_create_client(raw.get('client'), onidc_id)
if not client:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
# 实例化在线设备
instance = Online(
created=created, style=style, creator=creator,
rack=rack, client=client, name=raw.get('name'),
sn=raw.get('sn'), ipaddr=raw.get('ipaddr'),
model=raw.get('model'), onidc_id=onidc_id
)
instance.save()
# 保存U位
units, err = clean_units(raw.get('units'), rack.pk)
if units:
for u in units:
instance.units.add(u)
units.update(actived=False)
instance.save()
else:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
# U位不对,删除本实例
instance.delete()
continue
handler_success.append(instance.name)
# 保存PDU
pdus, err = clean_pdus(raw.get('pdus'), rack.pk)
if pdus:
for p in pdus:
instance.pdus.add(p)
pdus.update(actived=False)
instance.save()
else:
msg = "第{}行:{}".format(index, err)
handler_warning.append(msg)
# 保存TAGS
tags = clean_tags(raw.get('tags'), onidc_id, creator.pk)
if tags:
for t in tags:
instance.tags.add(t)
instance.save()
device_post_save(instance.pk, True)
total = (index-2)
return handler_error, handler_warning, handler_success, total
def import_rack(path, onidc_id):
fileds = [
'name', 'cname', 'zone', 'client', 'style',
'status', 'unitc', 'pduc', 'cpower', 'tags'
]
workbook = xlrd.open_workbook(path)
sheets = workbook.sheet_names()
worksheet = workbook.sheet_by_name(sheets[0])
# 设置导入错误日志记录到一个字典中
handler_error = []
handler_warning = []
handler_success = []
index = 0
headers = None
for index, row in enumerate(worksheet.get_rows(), 1):
# header = index
if index == 1:
# 跳过表头
continue
if index == 2:
# 获取字段名称
headers = [h.value for h in row]
continue
if index > 1002:
# 每次只处理500条数据
msg = "一次最多导入1000条数据"
handler_error.append(msg)
break
data = dict(zip(headers, [k.value for k in row]))
raw = {k: data.get(k) for k in fileds}
zone, err = get_rack_zone(raw.get('zone'), onidc_id)
if not zone:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
name = raw.get('name')
verify = Rack.objects.filter(name=name, zone=zone)
if verify.exists():
msg = "第{}行:{}机柜已存在".format(index, name)
handler_error.append(msg)
continue
else:
# 处理机柜别名
cname = raw.get('cname') if raw.get('cname') else name
# 获取机柜类型和机柜状态
style = get_or_create_option(
raw.get('style'), onidc_id, flag='Rack-Style'
)
status = get_or_create_option(
raw.get('status'), onidc_id, flag='Rack-Status', create=True
)
# 获取客户信息
if raw.get('client'):
actived = True
client, err = get_or_create_client(raw.get('client'), onidc_id)
if not client:
msg = "第{}行:{}".format(index, err)
handler_error.append(msg)
continue
else:
actived = False
client = None
unitc = int(raw.get('unitc'))
pduc = int(raw.get('pduc'))
cpower = int(raw.get('cpower'))
# 实例化机柜
instance = Rack(
name=name, cname=cname, zone=zone, client=client,
style=style, status=status, actived=actived,
creator_id=CreatorId, unitc=unitc, pduc=pduc,
cpower=cpower, onidc_id=onidc_id
)
instance.save()
handler_success.append(instance.name)
# 保存标签
tags = get_or_create_tags(
raw.get('tags'), onidc_id, CreatorId, 'Rack-Tags'
)
if tags:
for t in tags:
instance.tags.add(t)
instance.save()
total = (index-2)
return handler_error, handler_warning, handler_success, total
def get_creator(username):
fields = ['first_name', 'username', 'mobile']
query = [Q(**{k: username.strip()}) for k in fields]
query_str = reduce(operator.or_, query)
user = User.objects.filter(query_str)
if user.exists():
return user.first()
else:
return User.objects.filter().order_by('pk').first()
def get_or_create_style(name, onidc_id):
f = dict(
onidc_id=onidc_id, flag='Device-Style', text=name.strip()
)
qs = shared_queryset(Option.objects.filter(**f), onidc_id)
if qs.exists():
instance = qs.first()
else:
extra = dict(
description=name.strip(),
creator_id=CreatorId
)
f.update(**extra)
instance = Option.objects.create(**f)
return instance
def get_or_create_option(name, onidc_id, flag, create=False):
if not name.strip():
instance = None
f = dict(
onidc_id=onidc_id, flag=flag, text=name.strip()
)
qs = shared_queryset(Option.objects.filter(**f), onidc_id)
if qs.exists():
instance = qs.first()
else:
if create and name.strip():
extra = dict(
description=name.strip(),
creator_id=CreatorId
)
f.update(**extra)
instance = Option.objects.create(**f)
else:
instance = None
return instance
def get_or_create_client(name, onidc_id):
qs = Client.objects.filter(name=name.strip())
if qs.exists():
instance = qs.first()
else:
types = Option.objects.filter(
onidc_id=onidc_id, flag='Client-Style'
)
if types.exists():
default = types.filter(master=True)
if default.exists():
style = default.first()
else:
style = types.first()
else:
return None, "客户类型不能为空"
instance = Client.objects.create(
onidc_id=onidc_id, creator_id=CreatorId,
name=name.strip(), style=style
)
return instance, None
def get_rack_zone(name, onidc_id):
"""
Return: (instance, error)
"""
qs = Option.objects.filter(text=name.strip(), onidc_id=onidc_id)
if qs.exists():
return qs.first(), None
else:
return None, "找不到指定机房区域,请新建"
def get_rack(name, onidc_id):
"""
Return: (instance, error)
"""
qs = Rack.objects.filter(name=name.strip(), onidc_id=onidc_id)
if qs.filter(actived=True).exists():
return qs.first(), None
elif qs.filter(actived=False).exists():
return None, "该机柜未分配使用"
else:
return None, "找不到该机柜"
def clean_units(data, rack_id):
units = sorted([int(i) for i in data.split('|') if len(i) != 0])
units_list = [
str(x).zfill(2) for x in range(units[0], units[-1]+1)
]
instances = Unit.objects.filter(rack_id=rack_id, name__in=units_list)
if instances.exists():
used = instances.filter(actived=False)
if used.count() > 0:
return None, "有U位被占用中"
return instances, None
else:
return None, "找不到U位信息"
def clean_pdus(data, rack_id):
pdus = re.split('[, |]', data)
pdus_list = [x.strip() for x in pdus if x]
instances = Pdu.objects.filter(rack_id=rack_id, name__in=pdus_list)
if instances.exists():
used = instances.filter(actived=False)
if used.count() > 0:
return instances.filter(actived=True), "部分PDU位被占用中"
return instances, None
else:
return None, "找不到PDU位信息"
def clean_tags(tags, onidc_id, creator_id):
tags = re.split('[, |]', tags)
tags_list = [x.strip() for x in tags if x]
default = dict(onidc_id=onidc_id, flag='Device-Tags')
instances = []
for tag in tags_list:
default.update(**dict(text=tag))
verify = Option.objects.filter(**default)
if verify.exists():
instance = verify.first()
else:
default.update(**dict(creator_id=creator_id))
instance = Option.objects.create(**default)
instances.append(instance)
return instances
def get_or_create_tags(tags, onidc_id, creator_id, flag):
tags = re.split('[, |]', tags)
tags_list = [x.strip() for x in tags if x]
default = dict(onidc_id=onidc_id, flag=flag)
instances = []
for tag in tags_list:
default.update(**dict(text=tag))
verify = Option.objects.filter(**default)
if verify.exists():
instance = verify.first()
else:
default.update(**dict(creator_id=creator_id))
instance = Option.objects.create(**default)
instances.append(instance)
return instances
|
import socket
from django.conf import settings
# noinspection PyUnusedLocal
def from_settings(request):
if not hasattr(from_settings, 'env_name'):
from_settings.env_name = settings.ENVIRONMENT_NAME if hasattr(
settings,
'ENVIRONMENT_NAME') else None
from_settings.env_colour = settings.ENVIRONMENT_COLOR if hasattr(
settings,
'ENVIRONMENT_COLOR') else None
if settings.DEBUG:
if not from_settings.env_name:
from_settings.env_name = f"Developing on {socket.gethostname()}"
if not from_settings.env_colour:
from_settings.env_colour = "green"
else:
from_settings.env_name = f'Production on {socket.gethostname()}'
from_settings.env_colour = "red"
return {
'ENVIRONMENT_NAME': from_settings.env_name,
'ENVIRONMENT_COLOR': from_settings.env_colour,
}
|
CN_DS_VICTORIAMETRICS = 1
CN_DS_POSTGRESQL = 0
|
import sys
import urllib2
import urllib
class Py3status:
urls = [
'youUrl.com']
status = ''
def websitesStatus(self):
self.status = ''
color = self.py3.COLOR_LOW
error = 0
for url in self.urls:
req = urllib2.Request('http://'+url)
resCode = '';
try:
response = urllib2.urlopen(req)
except urllib2.URLError as e:
resCode = '404'
if resCode == '404':
if (not error):
self.status = ''
color = self.py3.COLOR_HIGH
self.status = '[' + url + ']' + self.status
error=1
else:
if(not error):
self.status = 'All websites are up.'
return {
'full_text':self.status,
'color': color,
'cached_until': 10
}
|
from dataclasses import dataclass
from typing import Iterable
from rxbp.init.initsubscription import init_subscription
from rxbp.mixins.flowablemixin import FlowableMixin
from rxbp.observables.fromiteratorobservable import FromIteratorObservable
from rxbp.subscriber import Subscriber
from rxbp.subscription import Subscription
from rxbp.typing import ValueType
@dataclass
class FromIterableFlowable(FlowableMixin):
iterable: Iterable[ValueType]
def unsafe_subscribe(self, subscriber: Subscriber) -> Subscription:
iterator = iter(self.iterable)
return init_subscription(
observable=FromIteratorObservable(
iterator=iterator,
subscribe_scheduler=subscriber.subscribe_scheduler,
scheduler=subscriber.scheduler,
),
)
|
import os
from livestyled.schemas.ticket_integration import TicketIntegrationSchema
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
TEST_API_DOMAIN = 'test.livestyled.com'
def test_deserialize_ticket_integration():
with open(os.path.join(FIXTURES_DIR, 'example_ticket_integration.json'), 'r') as fixture_file:
ticket_integration = fixture_file.read()
deserialized_ticket_integration = TicketIntegrationSchema().loads(ticket_integration)
assert deserialized_ticket_integration == {
'adapter': 'XXXXXXds',
'auth_required': False,
'can_share': False,
'config_payload': None,
'endpoint_url': 'XXXXXXsd',
'id': 17,
'default': False,
'label': 'XXXXXXdsds',
'login_request': 'XXXXXXXdsds',
'module': 'SHARE',
'name': 'SeatGeek',
}
|
import processing
import detection
import classification
import numpy as np
import cv2
if __name__ == "__main__":
#Run for image files
img_filenames = ['1.png', '2.png', '3.png', '4.png', '5.png']
CNNmodel = None
for img_file in img_filenames:
#TODO Step 1 Processing
numpy_save = False
model_file = "VGG_Transfer_model.h5"
weights_file = None
#Read Image, Turn to grayscale and subtract mean
image = np.array(cv2.imread(img_file))
temp_img = np.copy(image) - np.mean(image)
pyramid = processing.create_pyramid(temp_img)
wndw_imgs, wndw_loc = processing.window_cutouts(pyramid)
print("WINDOW IMAGE SHAPE", wndw_imgs.shape)
print("WINDOW LOC SHAPE", wndw_loc.shape)
if numpy_save:
np.save("wndw_imgs.npy", wndw_imgs)
np.save("wndw_loc.npy", wndw_loc)
#TODO Step 2 DETECTION
print("TEST IMG SIZES", wndw_imgs.shape)
if CNNmodel is None:
predictions, CNNmodel = detection.CNN_model(test_dataset=wndw_imgs, model_file=model_file)
else:
predictions = np.array(CNNmodel.predict(x=wndw_imgs, batch_size=128))
print("PREDICTIONS Finished")
if numpy_save:
np.save("predictions.npy", predictions)
# TODO Step 3 CLASSIFICATION
final_image = classification.run(img_file=img_file, predictions=predictions, \
wndw_loc=wndw_loc, model = CNNmodel)
|
# -*- coding: utf-8 -*-
from qiniu import QiniuMacAuth, http, urlsafe_base64_encode
import json
def createTemplate(access_key, secret_key, body):
"""
创建模板
https://developer.qiniu.com/qvs/api/6721/create-template
:param access_key: 公钥
:param secret_key: 私钥
:param body: 请求体
{
"name": 必填,模版名称,格式为 1 ~ 100个字符,可包含小写字母、数字、中划线
"desc":非必填,模板描述
"bucket": 必填,模版对应的对象存储的bucket
"deleteAfterDays": 必填,存储过期时间,默认永久不过期
"fileType": 必填,文件存储类型,取值:0(普通存储),1(低频存储)
"recordFileFormat": 非必填,录制文件存储格式,取值:0(m3u8格式存储)
"templateType": 必填,模板类型,取值:0(录制模版), 1(截图模版)
"recordType":templateType为0时须指定,录制模式, 0(不录制),1(实时录制)
"jpgOverwriteStatus": templateType为1时须指定,开启覆盖式截图(一般用于流封面)
"jpgSequenceStatus":templateType为1时须指定,开启序列式截图
"jpgOnDemandStatus":templateType为1时须指定,开启按需截图
"recordInterval":非必填,录制文件时长 单位为秒,600~3600
"snapInterval": 非必填,截图间隔, 单位为秒, 10~600
}
:return:
{
}
"""
auth = QiniuMacAuth(access_key, secret_key)
# 请求URL
url = "http://qvs.qiniuapi.com/v1/templates"
# 发起POST请求
ret, res = http._post_with_qiniu_mac(url, body, auth)
headers = {"code": res.status_code, "reqid": res.req_id, "xlog": res.x_log}
# 格式化响应体
Headers = json.dumps(headers, indent=4, ensure_ascii=False)
result = json.dumps(ret, indent=4, ensure_ascii=False)
return Headers, result
# 七牛账号 AK、SK
access_key = '<access_key>'
secret_key = '<secret_key>'
# 请求体
body = {
"name": "test0013",
"desc": "this is a test",
"delateAfterDays": 7,
"interval": 5,
"templateType": 1,
"bucket": "yangjunren",
"jpgOverwriteStatus": True,
"jpgSequenceStatus": True,
"jpgOnDemandStatus": True
}
headers, result = createTemplate(access_key, secret_key, body)
print(f'{headers}\n{result}')
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( mat , N ) :
for i in range ( N ) :
for j in range ( N ) :
if ( mat [ i ] [ j ] != mat [ j ] [ i ] ) :
return False
return True
#TOFILL
if __name__ == '__main__':
param = [
([[29]],0,),
([[ 1, 3, 5 ], [ 3, 2, 4 ], [ 5, 4, 1 ]], 3,),
([[ 1, 2, 5 ], [ 3, 2, 4 ], [ 5, 4, 1 ]], 3,),
([[37, 56, 39, 95, 78, 69, 89, 45, 66, 99, 20, 10, 6, 33, 78, 26, 86, 61, 78, 36, 62, 23, 80, 89, 83], [42, 75, 30, 64, 25, 95, 17, 90, 6, 11, 1, 77, 16, 75, 86, 96, 67, 27, 80, 27, 99, 2, 82, 48, 25], [36, 83, 89, 85, 38, 40, 12, 29, 71, 29, 96, 75, 37, 79, 90, 66, 62, 29, 68, 98, 99, 74, 98, 88, 94], [69, 5, 52, 10, 35, 63, 75, 55, 17, 45, 65, 56, 70, 52, 61, 94, 61, 35, 13, 51, 1, 23, 77, 34, 80], [64, 11, 91, 93, 65, 22, 41, 25, 7, 85, 26, 82, 97, 51, 24, 10, 13, 95, 18, 11, 58, 32, 21, 41, 60], [90, 46, 56, 8, 17, 36, 86, 73, 5, 56, 59, 14, 45, 75, 51, 58, 95, 71, 39, 85, 57, 29, 75, 13, 44], [40, 43, 89, 50, 56, 62, 55, 30, 28, 68, 41, 84, 12, 77, 90, 38, 53, 23, 42, 84, 67, 11, 94, 10, 77], [74, 31, 44, 37, 25, 93, 21, 15, 11, 98, 75, 45, 8, 98, 26, 21, 52, 50, 24, 96, 82, 26, 41, 51, 16], [41, 52, 57, 84, 51, 59, 79, 68, 40, 16, 76, 35, 26, 73, 80, 59, 79, 84, 3, 5, 40, 55, 77, 48, 93], [71, 53, 72, 27, 73, 96, 36, 36, 39, 75, 57, 36, 7, 21, 15, 46, 88, 47, 59, 61, 41, 54, 23, 73, 12], [8, 22, 50, 34, 84, 96, 13, 20, 8, 70, 99, 97, 25, 14, 97, 59, 51, 53, 16, 67, 38, 74, 45, 97, 16], [95, 25, 78, 52, 46, 8, 73, 56, 13, 78, 63, 15, 53, 55, 5, 39, 13, 67, 97, 19, 31, 96, 53, 66, 80], [5, 30, 49, 58, 18, 36, 38, 50, 49, 28, 56, 33, 2, 2, 32, 12, 39, 51, 6, 15, 96, 47, 45, 45, 15], [10, 79, 97, 99, 12, 35, 4, 10, 84, 43, 31, 31, 31, 20, 73, 77, 37, 59, 24, 89, 59, 94, 88, 73, 29], [74, 78, 86, 45, 12, 8, 60, 67, 26, 20, 81, 31, 77, 42, 50, 32, 6, 32, 32, 43, 32, 1, 11, 12, 21], [21, 10, 9, 12, 32, 85, 18, 50, 39, 69, 5, 71, 56, 78, 22, 97, 99, 93, 79, 31, 92, 18, 8, 33, 15], [7, 35, 36, 40, 77, 41, 11, 87, 51, 23, 46, 4, 42, 34, 46, 7, 37, 20, 99, 29, 97, 36, 71, 56, 96], [5, 57, 15, 64, 45, 54, 2, 56, 40, 1, 16, 6, 72, 80, 47, 76, 6, 48, 2, 75, 61, 11, 10, 98, 75], [98, 75, 99, 62, 8, 10, 96, 52, 95, 4, 3, 45, 30, 75, 47, 34, 67, 57, 21, 41, 75, 75, 88, 53, 99], [40, 77, 89, 69, 74, 98, 68, 89, 99, 22, 23, 24, 74, 67, 11, 60, 34, 16, 26, 43, 19, 28, 48, 52, 85], [81, 50, 42, 81, 54, 72, 87, 27, 47, 26, 83, 34, 15, 4, 1, 92, 40, 74, 92, 61, 36, 18, 3, 43, 46], [80, 28, 65, 52, 79, 12, 96, 25, 80, 36, 21, 10, 76, 78, 63, 51, 27, 18, 53, 55, 98, 75, 79, 5, 37], [52, 98, 60, 25, 33, 97, 15, 1, 38, 45, 7, 12, 68, 26, 10, 72, 50, 25, 96, 64, 54, 43, 27, 16, 92], [61, 86, 67, 38, 64, 43, 82, 14, 64, 95, 63, 92, 69, 49, 72, 52, 82, 23, 32, 48, 12, 58, 24, 3, 86], [2, 88, 8, 1, 46, 4, 72, 89, 32, 16, 31, 5, 43, 13, 8, 11, 67, 33, 4, 22, 58, 60, 98, 99, 81]],21,),
([[32, 53, 61, 4, 94, 83, 17, 81, 12, 79, 93, 11, 91, 14, 15], [13, 34, 5, 70, 47, 93, 43, 97, 24, 44, 49, 93, 33, 2, 34], [94, 82, 63, 86, 67, 80, 10, 15, 76, 76, 39, 51, 15, 91, 20], [71, 90, 63, 91, 53, 14, 13, 78, 84, 44, 96, 39, 66, 80, 82], [60, 33, 64, 97, 47, 93, 89, 32, 10, 64, 77, 3, 60, 87, 26], [69, 81, 93, 32, 34, 95, 76, 38, 85, 22, 30, 53, 84, 86, 2], [71, 38, 57, 33, 49, 92, 28, 63, 54, 6, 62, 95, 36, 74, 19], [6, 34, 8, 6, 41, 89, 15, 22, 4, 73, 86, 56, 18, 24, 99], [67, 18, 89, 84, 39, 89, 61, 77, 78, 94, 44, 28, 30, 51, 33], [82, 64, 52, 28, 73, 14, 69, 99, 54, 49, 7, 44, 60, 1, 51], [99, 38, 66, 68, 74, 99, 59, 98, 62, 39, 63, 32, 21, 85, 23], [15, 1, 29, 94, 19, 33, 88, 70, 10, 46, 47, 55, 18, 71, 10], [92, 59, 34, 42, 98, 91, 42, 67, 7, 15, 35, 53, 1, 14, 90], [22, 84, 62, 36, 99, 16, 63, 6, 22, 7, 95, 17, 80, 50, 59], [42, 40, 14, 73, 80, 53, 8, 91, 78, 59, 66, 88, 72, 71, 63]],13,),
([[93, 91, 59, 11, 73, 34, 33, 29, 78, 95, 52, 61, 39, 63, 91, 82, 75, 35, 18, 71, 19, 42, 64], [92, 7, 2, 46, 32, 22, 94, 78, 67, 73, 52, 15, 70, 89, 48, 40, 60, 4, 21, 67, 60, 67, 39], [94, 67, 26, 74, 69, 58, 14, 10, 9, 3, 75, 67, 48, 38, 39, 41, 43, 78, 67, 6, 46, 78, 16], [25, 44, 27, 86, 54, 56, 75, 43, 59, 83, 83, 80, 94, 72, 94, 56, 8, 51, 29, 14, 12, 13, 12], [78, 10, 44, 59, 8, 24, 37, 43, 89, 8, 64, 77, 67, 73, 40, 74, 46, 83, 92, 18, 82, 72, 8], [59, 36, 96, 21, 3, 88, 16, 83, 55, 22, 22, 77, 12, 60, 92, 72, 9, 84, 79, 68, 24, 48, 45], [6, 64, 87, 15, 30, 84, 27, 27, 98, 97, 58, 10, 73, 72, 78, 1, 74, 4, 59, 82, 94, 41, 90], [43, 14, 29, 73, 37, 22, 88, 99, 36, 95, 58, 15, 61, 7, 99, 91, 42, 98, 25, 64, 44, 6, 4], [66, 14, 4, 35, 77, 93, 34, 26, 56, 90, 68, 78, 75, 3, 87, 8, 44, 90, 78, 5, 58, 86, 78], [12, 67, 94, 20, 3, 33, 77, 18, 75, 26, 7, 90, 3, 1, 17, 12, 73, 81, 82, 23, 91, 2, 27], [55, 15, 44, 69, 95, 49, 63, 35, 19, 53, 92, 2, 52, 20, 59, 3, 8, 40, 30, 12, 49, 17, 66], [23, 39, 27, 57, 19, 44, 66, 32, 33, 43, 23, 14, 80, 57, 98, 57, 58, 62, 40, 44, 47, 84, 46], [53, 29, 49, 53, 9, 73, 25, 47, 81, 50, 71, 16, 37, 18, 39, 78, 56, 82, 8, 57, 89, 20, 57], [1, 88, 13, 75, 52, 97, 30, 81, 57, 5, 22, 51, 79, 74, 1, 46, 79, 42, 42, 93, 64, 21, 79], [99, 69, 19, 14, 15, 51, 83, 91, 16, 83, 53, 55, 23, 36, 18, 45, 88, 71, 89, 45, 7, 69, 88], [84, 85, 20, 74, 87, 46, 33, 15, 34, 79, 5, 9, 91, 64, 60, 28, 9, 50, 36, 9, 31, 45, 55], [78, 15, 41, 66, 63, 96, 27, 64, 60, 56, 71, 14, 60, 93, 40, 20, 51, 5, 82, 72, 50, 71, 88], [60, 86, 20, 27, 20, 6, 8, 79, 22, 35, 42, 77, 92, 20, 93, 69, 3, 27, 69, 60, 20, 23, 96], [12, 55, 49, 96, 80, 27, 65, 51, 76, 77, 72, 44, 29, 39, 16, 5, 43, 57, 97, 20, 36, 96, 48], [50, 2, 12, 39, 53, 63, 12, 34, 34, 12, 17, 6, 30, 86, 37, 87, 80, 26, 48, 40, 31, 46, 66], [67, 88, 91, 37, 17, 94, 68, 59, 82, 40, 27, 95, 12, 31, 28, 26, 13, 82, 17, 41, 32, 22, 99], [80, 50, 3, 22, 59, 95, 16, 66, 40, 56, 86, 56, 78, 14, 62, 69, 27, 47, 80, 68, 87, 74, 95], [17, 27, 51, 59, 59, 79, 24, 54, 99, 13, 14, 70, 70, 52, 96, 85, 21, 30, 54, 86, 19, 59, 47]],12,),
([[1, 88, 52, 21, 60, 48, 74, 12, 87, 76, 80, 55, 3, 66, 6, 22, 67, 73, 21, 37, 33, 1, 65, 71, 37, 40, 63, 52, 76, 32, 27, 42, 52], [29, 46, 66, 46, 83, 25, 99, 65, 57, 28, 18, 63, 18, 24, 51, 29, 19, 31, 95, 86, 29, 20, 66, 68, 46, 19, 7, 42, 16, 52, 33, 39, 43], [84, 46, 4, 15, 43, 30, 39, 43, 14, 70, 86, 18, 46, 79, 21, 76, 91, 61, 75, 95, 65, 25, 89, 81, 71, 32, 48, 89, 82, 35, 90, 76, 78], [8, 22, 76, 32, 46, 13, 33, 1, 92, 67, 80, 50, 32, 10, 1, 71, 47, 7, 62, 52, 68, 4, 57, 89, 5, 71, 55, 67, 57, 99, 75, 76, 39], [80, 43, 71, 85, 10, 82, 29, 26, 30, 65, 38, 15, 89, 19, 28, 97, 15, 78, 61, 38, 99, 32, 78, 77, 41, 85, 76, 15, 88, 84, 63, 1, 43], [14, 2, 8, 11, 20, 44, 59, 17, 12, 84, 74, 21, 67, 4, 88, 54, 27, 95, 74, 68, 76, 79, 90, 34, 1, 59, 52, 45, 18, 73, 50, 34, 54], [54, 52, 30, 4, 53, 24, 50, 45, 61, 90, 7, 45, 85, 78, 34, 10, 11, 45, 49, 40, 51, 71, 99, 28, 62, 15, 38, 49, 1, 50, 14, 13, 22], [57, 85, 41, 37, 82, 73, 95, 5, 31, 65, 86, 57, 15, 90, 29, 54, 41, 91, 34, 85, 76, 35, 55, 98, 33, 42, 87, 8, 83, 99, 91, 30, 84], [92, 74, 42, 25, 14, 65, 30, 13, 89, 12, 24, 70, 73, 38, 87, 52, 70, 35, 28, 5, 42, 84, 80, 20, 22, 51, 87, 76, 47, 97, 39, 28, 68], [47, 72, 21, 48, 50, 49, 76, 62, 35, 80, 72, 5, 76, 90, 37, 73, 41, 92, 40, 58, 72, 2, 50, 86, 94, 80, 48, 24, 91, 33, 70, 94, 42], [26, 78, 95, 16, 21, 2, 59, 8, 7, 90, 21, 18, 82, 1, 91, 8, 92, 2, 22, 20, 78, 35, 60, 31, 41, 67, 72, 90, 24, 15, 38, 79, 99], [38, 81, 95, 66, 5, 2, 2, 90, 38, 37, 10, 91, 72, 74, 99, 24, 24, 95, 4, 40, 14, 26, 12, 27, 6, 27, 14, 22, 49, 20, 3, 73, 80], [73, 49, 96, 98, 25, 27, 91, 2, 22, 66, 48, 53, 1, 54, 39, 10, 12, 37, 46, 17, 3, 85, 76, 59, 27, 15, 45, 41, 67, 5, 34, 63, 98], [85, 13, 89, 14, 82, 61, 3, 3, 45, 96, 18, 32, 96, 44, 93, 37, 99, 27, 40, 24, 56, 36, 99, 6, 71, 78, 17, 61, 27, 44, 70, 3, 39], [35, 66, 83, 87, 17, 9, 9, 35, 9, 12, 67, 85, 57, 92, 97, 98, 43, 22, 60, 30, 31, 80, 99, 65, 73, 65, 87, 37, 82, 4, 10, 27, 2], [55, 68, 40, 97, 8, 15, 61, 7, 94, 24, 20, 55, 5, 7, 2, 74, 77, 21, 3, 53, 14, 53, 80, 63, 54, 72, 24, 78, 50, 6, 88, 93, 26], [34, 44, 69, 98, 98, 77, 67, 5, 86, 85, 91, 88, 39, 53, 8, 68, 36, 70, 95, 69, 6, 2, 1, 62, 29, 87, 18, 3, 80, 31, 22, 8, 22], [77, 29, 80, 10, 46, 34, 56, 59, 33, 78, 96, 23, 15, 25, 26, 12, 64, 19, 49, 19, 96, 74, 91, 23, 56, 63, 52, 64, 18, 99, 50, 13, 66], [36, 22, 84, 7, 12, 79, 93, 8, 23, 13, 97, 5, 83, 7, 68, 9, 19, 89, 65, 68, 82, 71, 83, 52, 87, 28, 93, 6, 44, 27, 46, 4, 87], [30, 45, 58, 62, 54, 24, 96, 75, 30, 90, 80, 57, 53, 70, 89, 84, 10, 1, 44, 59, 11, 76, 20, 76, 60, 44, 16, 79, 62, 90, 56, 75, 3], [2, 44, 83, 96, 87, 44, 24, 13, 1, 39, 5, 13, 8, 51, 49, 49, 48, 40, 30, 44, 92, 93, 53, 36, 84, 69, 71, 30, 38, 7, 75, 75, 84], [33, 79, 68, 51, 10, 38, 40, 3, 24, 2, 23, 51, 59, 42, 19, 8, 26, 82, 44, 48, 73, 36, 9, 97, 11, 41, 62, 88, 24, 32, 33, 81, 90], [45, 33, 2, 66, 78, 21, 87, 22, 65, 32, 29, 69, 36, 25, 22, 69, 52, 67, 24, 97, 92, 47, 85, 80, 11, 6, 51, 83, 61, 82, 44, 10, 76], [33, 64, 15, 76, 50, 5, 1, 38, 98, 12, 30, 11, 73, 44, 46, 71, 81, 52, 63, 26, 27, 97, 39, 5, 73, 87, 94, 36, 1, 52, 8, 1, 74], [7, 38, 59, 60, 67, 7, 8, 34, 40, 42, 96, 32, 69, 91, 13, 55, 12, 74, 1, 85, 7, 10, 81, 37, 48, 65, 42, 13, 23, 57, 92, 19, 32], [10, 82, 8, 16, 35, 58, 81, 48, 48, 23, 26, 55, 23, 50, 23, 54, 56, 45, 71, 12, 22, 17, 77, 48, 78, 71, 50, 83, 59, 39, 71, 60, 91], [17, 34, 75, 9, 39, 67, 23, 40, 4, 57, 16, 59, 85, 25, 5, 1, 96, 20, 11, 97, 32, 83, 39, 45, 57, 82, 36, 42, 88, 96, 9, 24, 79], [47, 46, 86, 98, 59, 10, 2, 42, 7, 1, 9, 42, 26, 79, 57, 22, 87, 3, 11, 56, 86, 62, 40, 78, 16, 98, 5, 53, 72, 66, 11, 45, 62], [87, 65, 74, 6, 67, 83, 29, 79, 87, 49, 8, 89, 88, 52, 12, 1, 4, 94, 98, 60, 43, 97, 44, 30, 40, 13, 30, 19, 20, 38, 63, 68, 23], [89, 11, 31, 76, 41, 98, 57, 30, 80, 96, 82, 8, 95, 36, 77, 82, 62, 35, 27, 6, 64, 74, 37, 47, 44, 71, 80, 66, 43, 57, 47, 89, 90], [90, 18, 20, 92, 67, 57, 1, 74, 95, 84, 56, 8, 48, 58, 64, 71, 57, 51, 99, 40, 84, 3, 63, 11, 58, 76, 46, 12, 8, 45, 86, 84, 15], [49, 31, 46, 94, 40, 31, 20, 2, 6, 78, 26, 97, 87, 89, 37, 92, 99, 71, 59, 66, 64, 17, 91, 48, 66, 12, 80, 32, 18, 62, 16, 5, 24], [49, 75, 64, 46, 42, 88, 78, 1, 90, 26, 68, 90, 4, 96, 16, 80, 40, 84, 81, 49, 84, 96, 42, 11, 62, 93, 55, 27, 85, 29, 32, 41, 12]],22,),
([[97, 17, 59, 40, 18, 53, 65, 84, 85, 42, 38, 32, 22, 61, 89, 32, 31, 99, 58, 77, 80, 56, 83, 41, 15, 46, 97, 59, 65, 51, 13, 24, 87, 93, 16, 49, 32, 16, 43, 88, 53, 21, 33, 59, 60], [27, 29, 33, 50, 32, 46, 28, 51, 26, 48, 58, 47, 63, 47, 70, 19, 79, 81, 98, 65, 19, 67, 81, 46, 78, 75, 80, 54, 94, 91, 82, 87, 49, 27, 56, 44, 75, 77, 44, 23, 90, 42, 64, 34, 99], [43, 84, 88, 96, 26, 2, 13, 3, 12, 27, 14, 74, 38, 76, 40, 75, 50, 66, 95, 62, 10, 6, 55, 42, 61, 22, 47, 19, 74, 47, 91, 92, 10, 45, 60, 17, 79, 43, 12, 84, 64, 80, 47, 84, 50], [27, 22, 91, 13, 59, 69, 81, 98, 22, 94, 67, 71, 15, 71, 3, 29, 6, 49, 91, 65, 54, 34, 58, 8, 89, 15, 38, 11, 73, 27, 77, 76, 11, 58, 35, 44, 57, 87, 21, 28, 7, 77, 95, 35, 81], [88, 86, 74, 80, 6, 12, 1, 16, 98, 63, 58, 91, 5, 83, 11, 37, 63, 75, 8, 53, 16, 95, 11, 65, 47, 81, 49, 25, 55, 26, 34, 2, 16, 31, 22, 86, 32, 70, 2, 71, 11, 10, 16, 51, 1], [35, 39, 74, 59, 99, 77, 78, 76, 44, 3, 38, 75, 98, 25, 87, 72, 64, 27, 50, 4, 62, 88, 60, 63, 13, 31, 64, 14, 84, 86, 76, 67, 96, 5, 96, 76, 92, 91, 87, 68, 69, 45, 9, 9, 93], [57, 81, 83, 66, 96, 54, 15, 2, 78, 96, 49, 90, 12, 90, 36, 76, 97, 90, 87, 13, 37, 40, 92, 34, 54, 83, 89, 99, 85, 70, 16, 24, 51, 16, 94, 28, 74, 17, 84, 48, 24, 80, 20, 55, 26], [29, 22, 20, 96, 29, 87, 57, 98, 76, 83, 17, 86, 10, 82, 69, 1, 90, 89, 77, 39, 46, 12, 20, 6, 18, 2, 73, 33, 54, 1, 75, 22, 68, 21, 29, 20, 69, 51, 27, 97, 18, 22, 41, 37, 18], [21, 6, 28, 2, 79, 11, 11, 26, 91, 43, 87, 56, 8, 63, 46, 59, 84, 98, 26, 65, 63, 88, 53, 41, 93, 11, 8, 30, 79, 82, 25, 64, 60, 11, 48, 51, 73, 32, 12, 42, 23, 88, 83, 74, 82], [15, 94, 47, 98, 42, 39, 13, 42, 23, 45, 22, 60, 27, 52, 69, 11, 40, 6, 67, 32, 74, 40, 20, 18, 98, 82, 2, 13, 56, 46, 62, 77, 47, 59, 90, 64, 12, 12, 12, 23, 18, 24, 47, 91, 70], [40, 45, 67, 62, 58, 95, 96, 92, 54, 9, 34, 60, 27, 27, 60, 25, 83, 78, 40, 83, 76, 95, 36, 25, 58, 61, 52, 6, 14, 7, 93, 90, 34, 36, 51, 75, 76, 81, 87, 31, 82, 53, 61, 26, 87], [50, 8, 23, 75, 95, 19, 22, 41, 81, 49, 57, 91, 31, 17, 17, 98, 99, 11, 84, 60, 4, 58, 3, 72, 36, 43, 83, 20, 5, 90, 86, 55, 26, 50, 74, 88, 52, 96, 61, 89, 15, 53, 34, 16, 47], [64, 74, 70, 61, 41, 85, 45, 2, 49, 19, 38, 87, 17, 6, 54, 48, 44, 59, 34, 15, 91, 22, 35, 83, 2, 44, 20, 45, 62, 61, 97, 81, 56, 56, 2, 12, 82, 23, 19, 54, 69, 21, 60, 20, 80], [6, 59, 90, 96, 99, 23, 54, 18, 42, 85, 48, 13, 28, 14, 94, 37, 99, 47, 53, 41, 40, 22, 35, 77, 9, 80, 77, 18, 53, 73, 8, 19, 80, 75, 43, 92, 32, 19, 7, 24, 23, 7, 40, 79, 23], [79, 72, 73, 91, 22, 22, 20, 21, 14, 85, 22, 33, 78, 13, 86, 90, 85, 15, 75, 12, 6, 32, 24, 17, 98, 88, 25, 60, 63, 86, 23, 86, 84, 45, 76, 81, 53, 27, 65, 45, 56, 1, 37, 78, 43], [90, 67, 47, 22, 16, 72, 11, 25, 17, 50, 89, 84, 15, 7, 22, 32, 89, 15, 10, 5, 81, 6, 3, 31, 43, 72, 33, 23, 43, 12, 10, 33, 13, 48, 6, 24, 27, 92, 63, 99, 24, 55, 10, 20, 22], [45, 52, 19, 18, 80, 74, 48, 70, 47, 13, 8, 88, 84, 89, 5, 68, 90, 35, 15, 35, 75, 33, 40, 68, 60, 21, 67, 96, 35, 1, 18, 6, 19, 31, 48, 60, 56, 49, 8, 70, 87, 68, 12, 15, 51], [68, 10, 30, 46, 76, 42, 39, 8, 59, 61, 70, 81, 87, 50, 7, 97, 53, 7, 96, 93, 30, 77, 54, 38, 82, 30, 85, 30, 18, 62, 98, 29, 49, 45, 51, 20, 31, 47, 83, 13, 77, 45, 70, 57, 87], [28, 1, 55, 6, 63, 56, 56, 97, 48, 21, 77, 81, 95, 80, 48, 64, 45, 45, 17, 72, 42, 89, 64, 95, 92, 52, 40, 64, 8, 51, 66, 73, 50, 20, 68, 99, 60, 54, 64, 43, 32, 9, 30, 49, 1], [49, 96, 37, 62, 18, 86, 55, 83, 16, 85, 49, 64, 57, 39, 68, 15, 12, 80, 64, 93, 89, 77, 20, 34, 19, 75, 93, 92, 19, 82, 49, 29, 20, 28, 8, 40, 46, 56, 99, 69, 41, 89, 84, 71, 28], [25, 56, 58, 92, 77, 94, 72, 67, 80, 80, 87, 10, 6, 83, 38, 90, 18, 91, 20, 6, 81, 30, 16, 25, 51, 16, 70, 37, 64, 71, 60, 96, 55, 52, 56, 17, 27, 3, 92, 98, 29, 4, 27, 84, 76], [99, 74, 14, 56, 22, 24, 90, 11, 84, 72, 29, 73, 38, 70, 92, 90, 9, 45, 26, 89, 52, 6, 21, 60, 59, 21, 91, 11, 20, 17, 98, 51, 64, 55, 86, 16, 85, 77, 98, 54, 54, 56, 7, 96, 13], [96, 83, 88, 44, 40, 69, 28, 81, 40, 94, 62, 59, 50, 11, 15, 60, 10, 20, 30, 35, 99, 96, 59, 51, 58, 12, 46, 7, 64, 18, 28, 11, 98, 35, 76, 76, 15, 54, 40, 19, 40, 53, 10, 72, 22], [21, 20, 69, 1, 27, 36, 33, 90, 63, 14, 86, 32, 11, 93, 93, 74, 65, 49, 84, 94, 34, 61, 56, 95, 39, 50, 30, 14, 35, 25, 53, 56, 29, 40, 65, 53, 99, 64, 21, 81, 14, 10, 74, 1, 12], [79, 15, 42, 97, 70, 30, 28, 31, 17, 97, 85, 50, 51, 87, 67, 49, 92, 28, 81, 14, 80, 89, 3, 69, 70, 95, 68, 67, 60, 68, 99, 44, 74, 55, 69, 78, 34, 2, 79, 34, 4, 12, 13, 73, 4], [31, 44, 56, 6, 71, 62, 82, 94, 22, 78, 12, 48, 46, 72, 25, 42, 75, 55, 25, 80, 81, 54, 92, 68, 98, 26, 6, 52, 85, 64, 58, 57, 72, 68, 75, 34, 2, 83, 39, 67, 73, 95, 76, 12, 73], [39, 32, 69, 72, 32, 22, 88, 51, 91, 41, 50, 17, 45, 59, 44, 32, 48, 30, 28, 83, 18, 20, 74, 11, 60, 34, 39, 38, 17, 49, 87, 71, 6, 56, 24, 60, 72, 4, 81, 66, 22, 51, 51, 16, 85], [40, 8, 71, 64, 71, 4, 25, 59, 70, 82, 79, 85, 16, 55, 24, 11, 71, 42, 3, 41, 22, 26, 4, 16, 63, 17, 19, 79, 7, 66, 55, 45, 87, 72, 1, 17, 39, 8, 57, 85, 50, 55, 26, 95, 53], [33, 30, 94, 36, 21, 41, 37, 21, 29, 8, 52, 39, 69, 14, 85, 38, 15, 30, 71, 27, 72, 35, 41, 53, 61, 95, 45, 30, 91, 1, 33, 78, 7, 62, 22, 51, 69, 85, 55, 31, 54, 27, 44, 79, 87], [60, 53, 17, 94, 36, 66, 2, 97, 20, 10, 69, 58, 81, 47, 63, 39, 62, 82, 60, 73, 74, 32, 63, 39, 18, 24, 2, 16, 79, 51, 84, 54, 56, 62, 71, 82, 89, 77, 60, 75, 72, 91, 20, 64, 98], [68, 79, 77, 49, 86, 26, 52, 61, 9, 5, 30, 4, 31, 14, 25, 28, 15, 67, 95, 77, 9, 66, 23, 48, 33, 28, 63, 8, 36, 2, 24, 22, 79, 24, 69, 91, 97, 53, 85, 81, 58, 35, 55, 26, 46], [25, 85, 11, 24, 78, 24, 73, 2, 6, 25, 81, 3, 5, 32, 48, 55, 93, 36, 36, 25, 56, 28, 35, 13, 79, 60, 27, 75, 6, 56, 27, 42, 94, 97, 38, 55, 19, 86, 13, 68, 6, 29, 94, 89, 61], [15, 12, 21, 82, 25, 38, 69, 76, 49, 29, 62, 42, 22, 95, 48, 28, 23, 53, 16, 60, 40, 97, 39, 68, 6, 47, 11, 10, 31, 71, 14, 59, 6, 58, 18, 33, 30, 84, 92, 1, 57, 81, 59, 26, 53], [18, 24, 18, 39, 79, 36, 90, 32, 84, 70, 91, 72, 39, 86, 37, 38, 71, 73, 34, 98, 28, 63, 73, 30, 41, 95, 8, 8, 78, 9, 98, 25, 9, 64, 3, 96, 27, 74, 66, 82, 59, 40, 24, 23, 41], [53, 49, 66, 61, 64, 34, 27, 64, 60, 35, 53, 72, 71, 58, 13, 76, 77, 53, 17, 57, 60, 15, 78, 19, 35, 18, 17, 84, 25, 37, 23, 23, 75, 46, 84, 7, 87, 62, 23, 91, 85, 21, 58, 96, 50], [28, 66, 93, 9, 35, 61, 68, 86, 23, 6, 84, 69, 12, 59, 65, 39, 41, 3, 42, 43, 85, 66, 96, 29, 47, 92, 97, 26, 15, 45, 90, 73, 61, 85, 20, 49, 27, 65, 9, 58, 51, 38, 84, 19, 44], [11, 78, 89, 76, 45, 7, 3, 80, 62, 1, 15, 44, 11, 1, 3, 22, 43, 6, 22, 50, 28, 78, 96, 29, 5, 35, 11, 1, 7, 3, 86, 31, 3, 17, 18, 79, 99, 80, 94, 99, 17, 79, 42, 27, 65], [30, 30, 69, 65, 4, 11, 58, 13, 10, 88, 84, 18, 87, 42, 99, 44, 62, 91, 79, 24, 30, 65, 41, 67, 24, 32, 63, 4, 98, 1, 21, 8, 46, 12, 1, 22, 78, 89, 28, 72, 64, 40, 89, 55, 87], [60, 41, 80, 59, 68, 36, 33, 94, 45, 75, 50, 47, 77, 44, 68, 88, 33, 97, 76, 21, 97, 46, 97, 73, 31, 62, 94, 16, 12, 54, 9, 35, 53, 43, 70, 89, 56, 64, 28, 87, 29, 86, 58, 24, 20], [27, 97, 19, 90, 38, 60, 3, 23, 59, 91, 91, 74, 24, 56, 52, 41, 66, 98, 22, 66, 28, 88, 38, 86, 67, 58, 37, 2, 57, 87, 77, 79, 97, 45, 53, 77, 84, 7, 77, 39, 68, 63, 46, 91, 96], [2, 15, 5, 3, 16, 49, 90, 6, 35, 38, 84, 86, 64, 85, 32, 1, 48, 23, 18, 17, 31, 93, 54, 77, 60, 66, 73, 96, 86, 18, 18, 83, 63, 31, 29, 88, 97, 83, 80, 51, 32, 21, 30, 7, 38], [12, 59, 92, 14, 71, 17, 23, 77, 20, 5, 6, 13, 3, 53, 31, 3, 8, 71, 50, 71, 75, 88, 59, 21, 20, 93, 74, 49, 80, 74, 38, 33, 69, 59, 12, 8, 70, 87, 48, 67, 38, 93, 34, 4, 7], [85, 74, 96, 89, 77, 85, 83, 59, 8, 61, 50, 84, 8, 16, 48, 62, 56, 28, 74, 21, 44, 79, 70, 41, 35, 56, 85, 17, 26, 63, 74, 34, 71, 32, 4, 10, 79, 56, 35, 33, 25, 47, 11, 34, 36], [17, 12, 80, 97, 26, 74, 13, 82, 85, 87, 87, 36, 69, 45, 79, 88, 12, 83, 97, 89, 38, 77, 88, 67, 76, 66, 20, 40, 34, 22, 15, 97, 66, 35, 98, 91, 31, 77, 53, 94, 90, 88, 57, 65, 38], [38, 86, 10, 46, 27, 42, 2, 58, 19, 62, 11, 14, 57, 33, 44, 18, 29, 30, 3, 32, 15, 49, 87, 60, 98, 46, 80, 50, 6, 80, 20, 49, 28, 26, 56, 48, 6, 53, 59, 80, 33, 12, 78, 39, 2]],34,),
([[19, 98, 9, 31, 79, 4, 63, 46, 32, 81, 5, 39, 97, 92, 13, 68, 28, 13, 92, 57, 99, 24, 9, 7, 22, 3, 72, 4, 42, 2, 53, 46, 6, 57, 86, 3, 17, 74, 88, 60, 39, 28, 45, 94], [92, 4, 82, 39, 3, 65, 97, 16, 46, 94, 40, 55, 97, 36, 60, 95, 36, 36, 47, 48, 10, 22, 28, 36, 32, 13, 34, 63, 65, 80, 91, 22, 31, 48, 93, 22, 71, 55, 40, 4, 78, 43, 81, 65], [2, 82, 3, 56, 85, 77, 49, 27, 60, 67, 69, 37, 48, 66, 94, 70, 27, 77, 5, 52, 58, 25, 91, 62, 16, 48, 71, 52, 67, 15, 81, 67, 61, 66, 69, 24, 95, 44, 71, 25, 20, 89, 66, 66], [10, 50, 70, 11, 93, 30, 85, 27, 42, 36, 45, 97, 27, 56, 37, 70, 39, 8, 76, 47, 67, 54, 9, 43, 12, 40, 3, 97, 77, 12, 37, 7, 70, 41, 4, 87, 4, 67, 38, 27, 11, 93, 93, 37], [58, 8, 32, 78, 84, 88, 93, 60, 65, 10, 19, 39, 45, 48, 18, 71, 88, 86, 16, 6, 71, 82, 99, 49, 88, 80, 19, 83, 65, 22, 31, 14, 30, 95, 51, 32, 43, 17, 92, 98, 62, 17, 61, 6], [93, 9, 31, 30, 59, 73, 10, 64, 33, 3, 93, 53, 41, 78, 15, 10, 80, 97, 92, 39, 24, 79, 13, 83, 11, 13, 40, 59, 96, 54, 61, 90, 59, 80, 17, 13, 13, 15, 11, 1, 35, 82, 44, 58], [1, 86, 52, 66, 94, 53, 82, 65, 3, 74, 48, 15, 67, 77, 62, 88, 30, 43, 32, 99, 35, 55, 15, 34, 98, 82, 99, 23, 32, 50, 50, 83, 93, 40, 44, 12, 68, 22, 43, 79, 85, 42, 99, 19], [72, 79, 4, 25, 51, 60, 37, 26, 73, 44, 55, 50, 31, 70, 25, 60, 6, 19, 5, 69, 59, 54, 5, 49, 20, 54, 77, 73, 78, 13, 97, 48, 87, 94, 63, 82, 82, 43, 78, 12, 39, 91, 57, 93], [71, 79, 83, 9, 84, 62, 22, 36, 96, 3, 82, 16, 3, 76, 88, 58, 75, 23, 33, 68, 61, 14, 38, 73, 98, 53, 61, 33, 83, 67, 56, 61, 38, 27, 40, 6, 96, 48, 18, 32, 84, 36, 79, 23], [14, 85, 46, 3, 7, 17, 68, 58, 50, 99, 70, 96, 99, 46, 59, 22, 72, 91, 28, 2, 59, 54, 66, 63, 27, 7, 12, 8, 9, 86, 18, 92, 38, 34, 70, 95, 15, 61, 68, 5, 87, 77, 61, 27], [45, 58, 95, 19, 30, 63, 94, 5, 62, 75, 74, 41, 65, 79, 85, 86, 96, 26, 77, 69, 78, 54, 55, 68, 8, 9, 95, 3, 27, 9, 93, 98, 29, 74, 77, 65, 40, 78, 96, 80, 56, 26, 33, 95], [72, 25, 97, 94, 1, 1, 27, 68, 37, 24, 44, 88, 6, 39, 65, 93, 88, 77, 92, 15, 64, 31, 86, 76, 17, 26, 77, 53, 41, 45, 81, 26, 51, 92, 38, 50, 42, 42, 32, 85, 9, 80, 5, 38], [9, 70, 79, 82, 69, 41, 74, 80, 27, 40, 53, 23, 92, 75, 4, 68, 80, 28, 29, 58, 17, 70, 18, 13, 64, 60, 61, 35, 89, 55, 35, 42, 11, 76, 54, 38, 32, 78, 25, 97, 98, 59, 70, 57], [41, 4, 7, 99, 19, 31, 20, 21, 25, 12, 98, 17, 96, 1, 79, 65, 63, 25, 71, 34, 44, 70, 1, 79, 77, 21, 77, 40, 17, 17, 76, 34, 39, 75, 14, 79, 87, 4, 33, 25, 41, 86, 32, 1], [63, 88, 53, 7, 43, 37, 70, 15, 34, 63, 65, 72, 35, 76, 46, 24, 1, 77, 79, 34, 37, 13, 16, 36, 70, 98, 76, 54, 44, 38, 47, 49, 36, 64, 63, 24, 68, 89, 11, 46, 3, 7, 54, 11], [65, 41, 55, 59, 26, 54, 14, 47, 16, 12, 93, 59, 32, 10, 93, 83, 55, 73, 89, 19, 39, 9, 17, 91, 8, 87, 55, 77, 41, 8, 13, 77, 55, 81, 20, 69, 25, 16, 43, 82, 59, 73, 35, 10], [99, 19, 13, 89, 69, 81, 34, 43, 87, 67, 10, 32, 97, 71, 13, 38, 11, 15, 87, 83, 8, 49, 88, 66, 30, 44, 54, 97, 83, 31, 24, 86, 39, 93, 34, 61, 4, 50, 53, 81, 28, 38, 4, 16], [42, 43, 64, 31, 79, 9, 68, 83, 34, 88, 11, 35, 28, 92, 11, 38, 98, 15, 61, 8, 65, 24, 50, 10, 17, 78, 1, 11, 41, 3, 17, 64, 75, 88, 33, 32, 25, 91, 47, 43, 81, 81, 57, 40], [68, 82, 75, 41, 40, 76, 37, 74, 15, 58, 58, 11, 98, 99, 8, 31, 15, 93, 79, 64, 31, 7, 94, 89, 79, 77, 74, 19, 49, 15, 3, 18, 22, 96, 95, 74, 45, 21, 34, 93, 74, 28, 54, 10], [32, 78, 32, 52, 30, 56, 72, 19, 22, 88, 28, 41, 43, 69, 73, 72, 59, 56, 82, 40, 77, 70, 16, 18, 42, 81, 2, 82, 64, 11, 55, 2, 2, 57, 18, 86, 16, 27, 17, 54, 17, 6, 97, 13], [6, 90, 83, 19, 61, 90, 86, 11, 86, 96, 7, 86, 6, 15, 38, 41, 56, 18, 35, 98, 45, 29, 69, 88, 32, 94, 5, 44, 98, 50, 82, 21, 22, 61, 39, 85, 99, 5, 33, 71, 24, 39, 72, 15], [70, 5, 87, 48, 20, 76, 21, 86, 89, 12, 66, 30, 7, 58, 18, 60, 18, 92, 48, 34, 72, 83, 6, 45, 60, 71, 84, 24, 93, 92, 69, 17, 62, 33, 62, 6, 3, 74, 54, 11, 87, 46, 4, 7], [26, 97, 35, 28, 41, 50, 99, 39, 80, 10, 71, 7, 25, 69, 90, 30, 11, 71, 39, 26, 57, 55, 22, 12, 64, 86, 66, 60, 62, 52, 62, 76, 65, 15, 40, 7, 55, 37, 86, 97, 33, 29, 19, 69], [14, 9, 5, 35, 85, 28, 45, 2, 6, 31, 32, 75, 59, 14, 74, 59, 1, 55, 31, 59, 8, 66, 99, 95, 12, 31, 99, 96, 81, 57, 8, 19, 53, 11, 57, 69, 59, 28, 2, 11, 64, 18, 47, 53], [5, 19, 5, 40, 83, 76, 92, 48, 99, 23, 55, 34, 87, 97, 58, 77, 98, 93, 30, 61, 82, 56, 99, 5, 4, 69, 39, 79, 73, 50, 72, 74, 22, 88, 24, 73, 22, 34, 48, 76, 81, 4, 57, 63], [30, 65, 97, 91, 78, 4, 35, 33, 51, 12, 68, 98, 78, 2, 91, 95, 33, 91, 45, 56, 28, 98, 30, 34, 1, 52, 13, 82, 40, 65, 9, 70, 72, 72, 88, 49, 25, 26, 26, 40, 34, 8, 2, 82], [16, 92, 72, 63, 18, 39, 42, 83, 32, 62, 32, 85, 93, 69, 84, 22, 27, 1, 13, 97, 6, 13, 78, 72, 67, 37, 76, 8, 93, 20, 62, 23, 68, 25, 32, 58, 25, 69, 10, 64, 31, 4, 57, 71], [34, 21, 83, 7, 98, 58, 33, 42, 53, 85, 55, 50, 38, 81, 46, 81, 15, 8, 49, 53, 37, 83, 93, 38, 97, 28, 61, 97, 7, 99, 72, 7, 59, 21, 25, 67, 32, 48, 55, 75, 85, 96, 66, 23], [45, 10, 78, 55, 60, 9, 83, 3, 32, 54, 87, 83, 76, 23, 14, 36, 48, 67, 10, 86, 68, 79, 52, 99, 49, 44, 5, 92, 91, 15, 94, 8, 55, 20, 77, 6, 1, 46, 42, 82, 70, 49, 90, 34], [57, 17, 89, 63, 61, 59, 92, 79, 4, 91, 33, 20, 21, 41, 74, 44, 32, 64, 37, 61, 26, 22, 40, 59, 50, 77, 96, 73, 39, 16, 98, 74, 88, 10, 45, 90, 34, 63, 68, 93, 86, 89, 11, 84], [88, 95, 25, 69, 31, 57, 87, 53, 81, 66, 56, 66, 91, 22, 81, 53, 57, 33, 5, 13, 17, 43, 84, 84, 92, 12, 84, 71, 56, 69, 29, 25, 11, 41, 11, 96, 38, 82, 62, 79, 81, 24, 44, 19], [37, 5, 4, 1, 94, 17, 43, 50, 30, 64, 82, 36, 1, 69, 82, 29, 81, 85, 66, 36, 62, 20, 83, 54, 82, 13, 47, 75, 97, 28, 55, 43, 44, 21, 94, 53, 47, 96, 87, 25, 96, 41, 31, 13], [6, 1, 8, 56, 62, 87, 69, 93, 22, 64, 69, 17, 18, 45, 54, 39, 65, 95, 88, 54, 16, 69, 32, 26, 35, 53, 43, 41, 24, 44, 79, 23, 75, 94, 45, 94, 55, 70, 69, 64, 14, 30, 4, 6], [39, 18, 51, 56, 89, 57, 59, 61, 17, 97, 38, 76, 81, 89, 37, 17, 91, 31, 14, 53, 36, 86, 5, 40, 70, 69, 88, 22, 14, 25, 84, 65, 49, 35, 52, 92, 29, 58, 72, 82, 31, 21, 6, 9], [30, 18, 30, 84, 60, 55, 10, 13, 41, 2, 5, 33, 65, 37, 61, 58, 12, 41, 28, 82, 36, 94, 42, 54, 54, 38, 85, 71, 69, 58, 99, 79, 9, 48, 18, 12, 27, 78, 77, 94, 36, 49, 9, 34], [76, 50, 89, 50, 22, 5, 15, 18, 77, 15, 89, 98, 66, 21, 87, 81, 61, 4, 48, 1, 7, 61, 53, 95, 35, 21, 60, 76, 5, 3, 59, 76, 10, 46, 50, 62, 59, 94, 17, 56, 44, 19, 18, 26], [28, 49, 32, 20, 85, 46, 58, 16, 76, 1, 46, 32, 14, 14, 83, 65, 25, 42, 13, 53, 68, 60, 84, 68, 41, 6, 26, 91, 22, 29, 40, 66, 36, 87, 19, 16, 88, 34, 63, 25, 75, 69, 84, 14], [21, 90, 44, 52, 79, 85, 80, 75, 48, 78, 85, 62, 80, 2, 42, 66, 28, 5, 8, 73, 81, 83, 42, 26, 95, 98, 93, 74, 58, 11, 97, 95, 22, 54, 93, 41, 85, 40, 12, 16, 43, 26, 94, 87], [97, 88, 6, 98, 19, 23, 25, 93, 16, 2, 93, 58, 97, 18, 44, 54, 9, 2, 55, 5, 20, 4, 5, 17, 5, 50, 72, 96, 25, 25, 89, 42, 31, 92, 47, 79, 51, 55, 60, 27, 39, 78, 13, 96], [35, 48, 14, 36, 53, 39, 5, 72, 10, 2, 95, 39, 25, 34, 79, 56, 81, 22, 33, 70, 58, 82, 30, 63, 67, 95, 12, 10, 62, 63, 36, 56, 6, 31, 33, 74, 63, 38, 26, 16, 24, 24, 73, 25], [23, 54, 67, 32, 74, 47, 35, 86, 14, 25, 59, 54, 79, 94, 95, 78, 8, 8, 95, 3, 97, 12, 32, 96, 21, 74, 41, 42, 57, 90, 77, 62, 73, 97, 95, 56, 12, 56, 58, 23, 89, 93, 33, 18], [41, 12, 62, 58, 4, 13, 31, 22, 39, 58, 30, 34, 95, 6, 90, 49, 45, 77, 93, 50, 26, 39, 86, 52, 4, 35, 5, 28, 21, 73, 10, 55, 33, 40, 5, 73, 81, 33, 81, 70, 91, 91, 78, 5], [81, 4, 71, 37, 78, 13, 29, 98, 98, 39, 48, 89, 35, 62, 20, 95, 59, 44, 54, 89, 58, 93, 52, 50, 46, 98, 10, 19, 11, 40, 40, 36, 87, 55, 44, 89, 44, 45, 85, 63, 91, 2, 6, 99], [73, 20, 55, 97, 47, 93, 27, 1, 13, 67, 65, 84, 58, 90, 76, 70, 50, 9, 55, 36, 20, 10, 10, 31, 84, 89, 45, 31, 9, 88, 4, 45, 24, 78, 72, 91, 53, 94, 78, 40, 58, 82, 77, 29]],37,),
([[91, 36, 24, 57], [88, 3, 45, 19], [49, 9, 86, 22], [55, 16, 72, 81]],3,),
([[27, 35, 35, 78, 52, 41, 22, 22, 75, 96, 91, 20, 46, 34, 83, 62, 10, 13, 92, 8, 86, 54, 92, 16, 17, 40, 49, 62, 19, 49, 38, 82, 62, 37, 93, 15, 85], [61, 56, 7, 36, 86, 37, 70, 40, 78, 17, 1, 44, 66, 42, 45, 46, 55, 21, 5, 84, 41, 86, 40, 87, 65, 13, 88, 89, 92, 68, 23, 4, 40, 61, 58, 98, 84], [17, 30, 92, 24, 95, 96, 38, 59, 63, 93, 64, 71, 52, 54, 15, 56, 70, 54, 81, 97, 61, 44, 1, 63, 59, 3, 13, 11, 61, 12, 82, 80, 33, 41, 4, 88, 47], [46, 54, 71, 9, 83, 93, 70, 36, 58, 86, 86, 38, 43, 67, 25, 78, 5, 18, 28, 30, 70, 95, 18, 25, 34, 72, 92, 71, 63, 98, 25, 65, 59, 66, 98, 96, 63], [12, 44, 54, 26, 54, 86, 31, 97, 22, 48, 8, 80, 28, 78, 68, 24, 83, 25, 47, 17, 66, 91, 8, 62, 37, 5, 46, 4, 59, 70, 29, 8, 48, 74, 99, 61, 53], [74, 64, 16, 76, 25, 79, 64, 78, 60, 70, 67, 27, 17, 89, 35, 69, 62, 94, 82, 84, 27, 44, 81, 63, 98, 56, 8, 57, 76, 61, 99, 3, 47, 14, 45, 79, 39], [67, 24, 62, 2, 69, 68, 2, 62, 11, 17, 12, 83, 77, 83, 84, 21, 56, 31, 31, 69, 40, 2, 11, 52, 24, 48, 62, 95, 2, 90, 17, 60, 55, 49, 75, 55, 42], [77, 90, 94, 20, 72, 64, 84, 75, 28, 75, 73, 36, 27, 6, 28, 13, 87, 47, 11, 85, 39, 24, 75, 45, 90, 48, 42, 84, 59, 29, 68, 82, 46, 58, 12, 32, 95], [8, 89, 11, 26, 41, 60, 19, 48, 17, 63, 10, 34, 93, 51, 45, 28, 18, 96, 36, 5, 82, 80, 3, 6, 97, 60, 80, 44, 66, 66, 69, 92, 52, 1, 5, 68, 93], [66, 79, 5, 59, 95, 26, 14, 41, 75, 83, 74, 52, 42, 81, 82, 60, 89, 15, 47, 33, 95, 37, 47, 36, 70, 46, 52, 72, 75, 26, 29, 2, 24, 18, 33, 85, 86], [33, 32, 33, 40, 62, 14, 45, 26, 27, 10, 71, 81, 43, 68, 97, 16, 24, 21, 93, 50, 79, 62, 92, 52, 18, 8, 9, 59, 44, 70, 98, 67, 18, 83, 73, 13, 40], [69, 47, 24, 37, 44, 46, 44, 75, 60, 74, 3, 17, 51, 5, 35, 82, 91, 90, 57, 31, 77, 60, 80, 50, 22, 80, 72, 32, 18, 33, 64, 45, 38, 30, 64, 42, 13], [77, 68, 42, 6, 79, 27, 96, 53, 7, 31, 88, 66, 72, 71, 65, 8, 53, 68, 30, 83, 61, 37, 84, 45, 53, 13, 32, 62, 2, 77, 8, 96, 48, 14, 85, 33, 36], [85, 59, 70, 69, 48, 30, 28, 41, 76, 58, 41, 11, 6, 20, 91, 29, 73, 48, 71, 85, 82, 15, 2, 97, 75, 53, 55, 70, 13, 44, 58, 17, 41, 25, 69, 14, 29], [52, 30, 12, 91, 95, 93, 91, 69, 9, 26, 27, 15, 79, 98, 14, 2, 46, 70, 80, 73, 80, 44, 86, 19, 72, 44, 45, 85, 67, 79, 66, 22, 17, 58, 80, 47, 14], [41, 69, 55, 21, 80, 31, 32, 80, 9, 37, 9, 21, 56, 8, 24, 80, 95, 20, 5, 50, 2, 67, 58, 96, 89, 99, 30, 15, 93, 2, 70, 93, 22, 70, 93, 62, 81], [96, 82, 25, 18, 46, 75, 69, 63, 54, 27, 44, 62, 70, 75, 29, 96, 4, 69, 60, 82, 72, 23, 38, 62, 12, 85, 22, 96, 58, 92, 61, 18, 67, 94, 77, 65, 35], [39, 26, 17, 50, 32, 22, 39, 89, 32, 88, 59, 8, 44, 30, 77, 23, 64, 77, 30, 70, 94, 98, 17, 88, 73, 54, 19, 31, 25, 97, 38, 55, 50, 37, 35, 96, 60], [86, 67, 75, 88, 98, 30, 15, 75, 84, 88, 74, 39, 99, 42, 95, 27, 5, 76, 98, 75, 29, 62, 91, 56, 43, 80, 79, 13, 97, 5, 94, 50, 49, 90, 73, 69, 99], [55, 59, 1, 67, 9, 26, 66, 92, 20, 90, 14, 2, 21, 59, 19, 46, 15, 32, 36, 78, 35, 9, 98, 95, 25, 41, 44, 74, 98, 49, 55, 15, 66, 62, 26, 42, 35], [45, 32, 62, 64, 52, 96, 43, 92, 55, 44, 91, 79, 59, 54, 88, 85, 1, 85, 87, 22, 50, 31, 50, 29, 39, 1, 65, 50, 18, 49, 75, 37, 70, 76, 35, 72, 43], [65, 43, 66, 35, 34, 42, 80, 8, 6, 40, 68, 23, 63, 14, 89, 58, 36, 34, 76, 21, 45, 58, 15, 45, 17, 50, 88, 55, 92, 31, 31, 85, 97, 10, 66, 53, 11], [56, 79, 89, 34, 87, 43, 92, 68, 3, 14, 29, 85, 17, 70, 45, 53, 50, 48, 69, 65, 74, 5, 28, 96, 71, 42, 60, 2, 22, 92, 97, 95, 98, 10, 28, 88, 78], [36, 61, 2, 51, 34, 35, 43, 11, 32, 38, 47, 81, 85, 95, 5, 64, 86, 53, 29, 1, 30, 26, 86, 10, 13, 25, 15, 1, 75, 44, 35, 13, 19, 48, 12, 73, 84], [82, 64, 25, 6, 5, 38, 12, 55, 66, 67, 26, 51, 31, 6, 30, 96, 82, 39, 9, 99, 73, 63, 70, 99, 4, 30, 45, 26, 74, 70, 31, 26, 71, 8, 61, 85, 38], [48, 62, 97, 16, 3, 62, 56, 67, 99, 87, 12, 88, 55, 13, 15, 7, 24, 13, 19, 67, 5, 50, 74, 64, 48, 49, 84, 80, 63, 7, 98, 34, 79, 5, 57, 74, 42], [72, 85, 45, 71, 40, 9, 64, 93, 60, 20, 17, 39, 63, 22, 71, 45, 28, 6, 81, 66, 61, 8, 7, 80, 66, 22, 43, 49, 71, 26, 98, 54, 39, 12, 41, 99, 2], [52, 93, 84, 53, 55, 19, 26, 37, 13, 87, 25, 58, 47, 23, 3, 51, 78, 79, 35, 78, 17, 6, 58, 84, 48, 10, 14, 27, 68, 83, 52, 51, 45, 66, 57, 27, 47], [88, 42, 63, 58, 68, 66, 46, 22, 85, 54, 78, 84, 98, 84, 33, 73, 42, 38, 77, 13, 55, 69, 97, 58, 49, 50, 46, 1, 91, 39, 6, 52, 68, 73, 63, 90, 2], [61, 24, 64, 5, 65, 50, 55, 35, 71, 4, 50, 85, 73, 90, 58, 1, 20, 75, 32, 13, 28, 10, 2, 5, 71, 97, 71, 66, 14, 85, 18, 14, 13, 83, 21, 30, 35], [96, 51, 55, 58, 82, 71, 12, 74, 38, 3, 46, 73, 57, 71, 26, 46, 48, 18, 63, 44, 57, 59, 82, 62, 46, 18, 85, 15, 6, 60, 59, 82, 23, 32, 35, 55, 35], [2, 24, 90, 62, 90, 44, 4, 22, 51, 16, 56, 30, 66, 37, 18, 19, 94, 9, 31, 82, 69, 74, 86, 49, 40, 80, 23, 94, 60, 10, 75, 92, 30, 25, 27, 72, 74], [98, 93, 17, 27, 23, 91, 74, 80, 70, 1, 89, 49, 17, 33, 32, 14, 4, 96, 62, 17, 89, 14, 6, 11, 28, 9, 72, 30, 60, 44, 38, 80, 64, 84, 74, 62, 53], [99, 7, 63, 10, 21, 94, 70, 34, 12, 75, 55, 68, 87, 33, 33, 14, 2, 3, 52, 18, 35, 68, 8, 71, 37, 44, 26, 11, 57, 81, 69, 77, 20, 99, 82, 14, 77], [86, 13, 54, 5, 89, 15, 79, 15, 86, 36, 85, 17, 13, 59, 94, 16, 60, 16, 50, 99, 49, 2, 8, 91, 69, 92, 58, 52, 5, 23, 42, 74, 26, 71, 82, 83, 2], [89, 44, 88, 67, 64, 70, 91, 85, 18, 33, 46, 80, 57, 85, 66, 51, 45, 2, 39, 3, 80, 28, 28, 97, 31, 44, 20, 11, 11, 39, 6, 64, 63, 60, 63, 31, 38], [99, 18, 9, 42, 28, 67, 23, 10, 5, 2, 25, 60, 87, 67, 53, 17, 41, 33, 92, 5, 87, 73, 70, 6, 73, 81, 13, 3, 73, 14, 67, 36, 84, 46, 82, 1, 20]],36,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
import re
def response(hey_bob):
hey_bob = re.sub(r"[^a-zA-Z0-9\?]", "", hey_bob)
print(len(hey_bob))
hey_bob_list = list(hey_bob)
flag_lowercase = False
flag_uppercase = False
flag_question = False
if len(hey_bob) == 0: return "Fine. Be that way!"
if hey_bob[-1] == "?": flag_question = True
for letter in hey_bob_list:
if letter.islower(): flag_lowercase = True
if letter.isupper(): flag_uppercase = True
if flag_uppercase and flag_lowercase is False and flag_question is False: return "Whoa, chill out!"
if flag_uppercase and flag_lowercase is False and flag_question: return "Calm down, I know what I'm doing!"
if flag_question: return "Sure."
return "Whatever."
|
""" Skill Labeller Preprocessor API endpoint """
import json
import falcon
import random
import logging
try:
from skilloracle import SkillOracle
except:
from ..skilloracle import SkillOracle
class SkillOracleEndpoint(object):
def __init__(self, fetcher=None):
self.host = "skilloracle"
self.oracle = SkillOracle(host=self.host, port=7000)
self.put_valid_keys = { 'name', 'context', 'label'}
self.fetcher = fetcher
if not fetcher:
fetcher = None # what kind of default woudl we do here?
def on_put(self, req, resp):
query = falcon.uri.parse_query_string(req.query_string)
# ^ just use req.params.items or, below, req.params.keys()
query_keys = set(query.keys())
#if self.put_valid_keys.issuperset(query_keys):
if query_keys.issubset(self.put_valid_keys):
label = query['label']\
if 'label' in query else None
name = query['name']\
if 'name' in query else None
context = query['context']\
if 'context' in query else None
response = self.oracle.PUT(label=label,
name=name,
context=context)
resp.body = json.dumps(response) # should this versioned?
resp.status = falcon.HTTP_200
def on_get(self, req, resp):
response = self.oracle.GET()
response["response"] = json.loads(response["response"])
print(response)
# Note tested to date, need to resolve fetcher/db access
name = response['response']['name']
context = response['response']['context']
importance = response['importance']
number = response['number of candidates']
resp.body = json.dumps({'name':name,
'context':context,
'importance':importance,
'number of candidates': number})
resp.status = falcon.HTTP_200
|
import os
class UrlList:
DEFAULT_URL_LIST = 'config/url_list.txt'
def __init__(self, url_list_path=DEFAULT_URL_LIST):
if not os.path.exists(url_list_path):
raise FileNotFoundError()
self.url_list_path = url_list_path
def read(self):
urls = []
with open(self.url_list_path) as f:
urls = [l.strip() for l in f.readlines()]
return urls
|
from django import template
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from timetable.models import RoomLink
register = template.Library()
@register.filter
def for_event(weeks, event):
result = []
oldBookingTxt = True
bookingTxt = None
run = 1
for week in weeks:
try:
bookingTxt = week.booking_set.filter(event=event)[0].room
except IndexError:
bookingTxt = None
if bookingTxt == oldBookingTxt:
run += 1
else:
result.append((run, oldBookingTxt))
run = 1
oldBookingTxt = bookingTxt
if bookingTxt == oldBookingTxt:
result.append((run, oldBookingTxt))
return result[1:]
affixes = '*^+-!&%$#@=?'
affix_map = {ord(i): None for i in affixes}
@register.simple_tag
def room_link(r, _class):
# Remove all special characters from affixes.
rooms = r.split(",")
rooms_simple = [i.strip().translate(affix_map) for i in rooms]
results = []
for (room, simple) in zip(rooms, rooms_simple):
roomlink = None
# print(room)
if len(simple) > 0:
try:
roomlink = RoomLink.objects.get(room__iexact=simple)
except (RoomLink.DoesNotExist, RoomLink.MultipleObjectsReturned):
roomlink = RoomLink.objects.filter(
room__icontains=simple).first()
if roomlink is None:
results.append(room)
else:
results.append(format_html('<a target="_blank" rel="noopener noreferrer" href="{}" class="{}">{}</a>',
roomlink.url, _class, room))
return mark_safe(",".join(results))
|
"""
This script is for unit testing of embedded
pinterest_pin extractor
Use pytest to run this script
Command to run: /stampify$ python -m pytest
"""
import pytest
from data_models.embedded_pinterest_pin import EPinterestPin
from extraction.content_extractors import embedded_pinterest_pin_extractor
from tests.test_extraction import unit_test_utils as test_utils
__EXTRACTOR = embedded_pinterest_pin_extractor.EPinterestPinExtractor()
__soup = test_utils.soup('pinterest_pin.html')
expected_output_1 \
= EPinterestPin('https://www.pinterest.com/pin/99360735500167749/')
acceptable_test_data = [(__soup.find('a', class_='a_tag1'),
expected_output_1), ]
non_acceptable_test_data = [(__soup.find('a', class_='a_tag2'), None),
(__soup.find('a', class_='a_tag3'), None),
(__soup.find('a', class_='a_tag4'), None),
(__soup.find('img'), None), ]
@pytest.mark.parametrize("input_node, expected", acceptable_test_data)
def test_tag_should_return_epinterestpin_object(input_node, expected):
actual_pinterest_pin_content = __EXTRACTOR.validate_and_extract(input_node)
__assert_pinterest_pin(actual_pinterest_pin_content, expected)
@pytest.mark.parametrize("input_node, expected", non_acceptable_test_data)
def test_tag_should_return_none(input_node, expected):
actual_pinterest_pin_content = __EXTRACTOR.validate_and_extract(input_node)
assert actual_pinterest_pin_content is expected
def __assert_pinterest_pin(actual_pinterest_pin, expected_pinterest_pin):
"""This is custom assert to compare actual and
expected pinterest pin content"""
assert isinstance(actual_pinterest_pin, EPinterestPin)
assert actual_pinterest_pin.pin_url == expected_pinterest_pin.pin_url
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import logging
import time
from bisect import bisect_right
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import List
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
from fairseq.distributed import utils as distributed_utils
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
logger = logging.getLogger(__name__)
def default_virtual_size_func(datasets, ratios, max_scale_up=1.5):
sizes = [len(d) for d in datasets]
if ratios is None:
return sum(sizes)
largest_idx = np.argmax(sizes)
largest_r = ratios[largest_idx]
largest_s = sizes[largest_idx]
# set virtual sizes relative to the largest dataset
virtual_sizes = [(r / largest_r) * largest_s for r in ratios]
vsize = sum(virtual_sizes)
max_size = sum(sizes) * max_scale_up
return int(vsize if vsize < max_size else max_size)
class CollateFormat(Enum):
single = 1
ordered_dict = 2
class SampledMultiDataset(FairseqDataset):
"""Samples from multiple sub-datasets according to given sampling ratios.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concatenating all dataset together).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
shuffle (bool): whether or not to shuffle data (default: True).
"""
def __init__(
self,
datasets,
sampling_ratios=None,
seed=2,
epoch=1,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=default_virtual_size_func,
split="",
shared_collater=False,
shuffle=True,
):
super().__init__()
self.shared_collater = shared_collater
self.shuffle = shuffle
if isinstance(datasets, OrderedDict):
self.keys = list(datasets.keys())
datasets = list(datasets.values())
elif isinstance(datasets, List):
self.keys = list(range(len(datasets)))
else:
raise AssertionError()
self.datasets = datasets
self.split = split
self.eval_key = eval_key
if self.eval_key is not None:
self.collate_format = CollateFormat.single
else:
self.collate_format = collate_format
self.seed = seed
self._cur_epoch = None
self.cumulated_sizes = None
# self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset
# namely, data item i is sampled from the kth sub-dataset self.datasets[k]
# where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k]
self._cur_indices = None
self._sizes = None
self.virtual_size_per_dataset = None
# caching properties
self._reset_cached_properties()
self.setup_sampling(sampling_ratios, virtual_size)
self.set_epoch(epoch)
def _clean_if_not_none(self, var_list):
for v in var_list:
if v is not None:
del v
def _reset_cached_properties(self):
self._clean_if_not_none([self._sizes, self._cur_indices])
self._sizes = None
self._cur_indices = None
def setup_sampling(self, sample_ratios, virtual_size):
sizes = [len(d) for d in self.datasets]
if sample_ratios is None:
# default back to concating datasets
self.sample_ratios = None
self.virtual_size = sum(sizes)
else:
if not isinstance(sample_ratios, np.ndarray):
sample_ratios = np.array(sample_ratios)
self.sample_ratios = sample_ratios
virtual_size = (
default_virtual_size_func if virtual_size is None else virtual_size
)
self.virtual_size = (
virtual_size(self.datasets, self.sample_ratios)
if callable(virtual_size)
else virtual_size
)
def adjust_sampling(self, epoch, sampling_ratios, virtual_size):
if sampling_ratios is not None:
sampling_ratios = self._sync_sample_ratios(sampling_ratios)
self.setup_sampling(sampling_ratios, virtual_size)
def _sync_sample_ratios(self, ratios):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
ratios = torch.DoubleTensor(ratios)
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(
ratios.cuda(), group=distributed_utils.get_data_parallel_group()
)
else:
distributed_utils.all_reduce(
ratios, group=distributed_utils.get_data_parallel_group()
)
ret = ratios.cpu()
ret = ret.numpy()
return ret
def random_choice_in_dataset(self, rng, dataset, choice_size):
if hasattr(dataset, "random_choice_in_dataset"):
return dataset.random_choice_in_dataset(rng, choice_size)
dataset_size = len(dataset)
return rng.choice(
dataset_size, choice_size, replace=(choice_size > dataset_size)
)
def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size):
def get_counts(sample_ratios):
counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64)
diff = virtual_size - counts.sum()
assert diff >= 0
# due to round-offs, the size might not match the desired sizes
if diff > 0:
dataset_indices = rng.choice(
len(sample_ratios), size=diff, p=sample_ratios
)
for i in dataset_indices:
counts[i] += 1
return counts
def get_in_dataset_indices(datasets, sizes, sample_ratios):
counts = get_counts(sample_ratios)
# uniformally sample desired counts for each dataset
# if the desired counts are large, sample with replacement:
indices = [
self.random_choice_in_dataset(rng, d, c)
for c, d in zip(counts, datasets)
]
return indices
sizes = [len(d) for d in datasets]
if sample_ratios is None:
# default back to concating datasets
in_dataset_indices = [list(range(s)) for s in sizes]
virtual_sizes_per_dataset = sizes
else:
ratios = sample_ratios / sample_ratios.sum()
in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios)
virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices]
virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64)
cumulative_sizes = np.cumsum(virtual_sizes_per_dataset)
assert sum(virtual_sizes_per_dataset) == virtual_size
assert cumulative_sizes[-1] == virtual_size
if virtual_size < sum(sizes):
logger.warning(
f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})."
" If virtual size << real data size, there could be data coverage issue."
)
in_dataset_indices = np.hstack(in_dataset_indices)
return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset
def _get_dataset_and_index(self, index):
i = bisect_right(self.cumulated_sizes, index)
return i, self._cur_indices[index]
def __getitem__(self, index):
# self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]]
# where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k]
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx])
return ret
def num_tokens(self, index):
return self.sizes[index].max()
def num_tokens_vec(self, indices):
sizes_vec = self.sizes[np.array(indices)]
# max across all dimensions but first one
return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape))))
def size(self, index):
return self.sizes[index]
def __len__(self):
return self.virtual_size
def collater(self, samples, **extra_args):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.collate_format == "ordered_dict":
collect_samples = [[] for _ in range(len(self.datasets))]
for (i, sample) in samples:
collect_samples[i].append(sample)
batch = OrderedDict(
[
(self.keys[i], dataset.collater(collect_samples[i]))
for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))
if len(collect_samples[i]) > 0
]
)
elif self.shared_collater:
batch = self.datasets[0].collater([s for _, s in samples])
else:
samples_dict = defaultdict(list)
pad_to_length = (
defaultdict(int)
if "pad_to_length" not in extra_args
else extra_args["pad_to_length"]
)
for ds_idx, s in samples:
pad_to_length["source"] = max(
pad_to_length["source"], s["source"].size(0)
)
if s["target"] is not None:
pad_to_length["target"] = max(
pad_to_length["target"], s["target"].size(0)
)
samples_dict[ds_idx].append(s)
batches = [
self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)
for i in range(len(self.datasets))
if len(samples_dict[i]) > 0
]
def straight_data(tensors):
batch = torch.cat(tensors, dim=0)
return batch
src_lengths = straight_data(
[b["net_input"]["src_lengths"] for b in batches]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
def straight_order(tensors):
batch = straight_data(tensors)
return batch.index_select(0, sort_order)
batch = {
"id": straight_order([b["id"] for b in batches]),
"nsentences": sum(b["nsentences"] for b in batches),
"ntokens": sum(b["ntokens"] for b in batches),
"net_input": {
"src_tokens": straight_order(
[b["net_input"]["src_tokens"] for b in batches]
),
"src_lengths": src_lengths,
},
"target": straight_order([b["target"] for b in batches])
if batches[0]["target"] is not None
else None,
}
if "prev_output_tokens" in batches[0]["net_input"]:
batch["net_input"]["prev_output_tokens"] = straight_order(
[b["net_input"]["prev_output_tokens"] for b in batches]
)
if "src_lang_id" in batches[0]["net_input"]:
batch["net_input"]["src_lang_id"] = straight_order(
[b["net_input"]["src_lang_id"] for b in batches]
)
if "tgt_lang_id" in batches[0]:
batch["tgt_lang_id"] = straight_order(
[b["tgt_lang_id"] for b in batches]
)
return batch
@property
def sizes(self):
if self._sizes is not None:
return self._sizes
start_time = time.time()
in_sub_dataset_indices = [
self._cur_indices[
0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]
]
for i in range(len(self.datasets))
]
sub_dataset_sizes = [
d.sizes[indices]
for d, indices in zip(self.datasets, in_sub_dataset_indices)
]
self._sizes = np.vstack(sub_dataset_sizes)
logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}")
return self._sizes
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")]
return sort_indices
def prefetch(self, indices):
prefetch_indices = [[] for _ in range(len(self.datasets))]
for i in indices:
ds_idx, ds_sample_idx = self._get_dataset_and_index(i)
prefetch_indices[ds_idx].append(ds_sample_idx)
for i in range(len(prefetch_indices)):
self.datasets[i].prefetch(prefetch_indices[i])
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch):
super().set_epoch(epoch)
if epoch == self._cur_epoch:
# re-enter so return
return
for d in self.datasets:
if hasattr(d, "set_epoch"):
d.set_epoch(epoch)
self._cur_epoch = epoch
self._establish_virtual_datasets()
def _establish_virtual_datasets(self):
if self.sample_ratios is None and self._cur_indices is not None:
# not a samping dataset, no need to resample if indices are already established
return
self._reset_cached_properties()
start_time = time.time()
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
int(
hashlib.sha1(
str(self.__class__.__name__).encode("utf-8")
).hexdigest(),
16,
)
% (2**32),
self.seed % (2**32), # global seed
self._cur_epoch, # epoch index,
]
)
self._clean_if_not_none(
[self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes]
)
self._sizes = None
indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices(
rng, self.datasets, self.sample_ratios, self.virtual_size
)
self._cur_indices = indices
self.cumulated_sizes = cumulated_sizes
self.virtual_size_per_dataset = virtual_size_per_dataset
raw_sizes = [len(d) for d in self.datasets]
sampled_sizes = self.virtual_size_per_dataset
logger.info(
f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; "
f"raw total size: {sum(raw_sizes)}"
)
logger.info(
f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; "
f"resampled total size: {sum(sampled_sizes)}"
)
if self.sample_ratios is not None:
logger.info(
f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}"
)
else:
logger.info(f"[{self.split}] A concat dataset")
logger.info(
f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}"
)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
return data_utils.filter_paired_dataset_indices_by_size(
src_sizes, tgt_sizes, indices, max_sizes
)
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce.contrib.remote_environment import RemoteEnvironment, MsgPackNumpyProtocol
from tensorforce.contrib.state_settable_environment import StateSettableEnvironment
from tensorforce import TensorForceError
from cached_property import cached_property
import re
import time
import itertools
import logging
class UE4Environment(RemoteEnvironment, StateSettableEnvironment):
"""
A special RemoteEnvironment for UE4 game connections.
Communicates with the remote to receive information on the definitions of action- and observation spaces.
Sends UE4 Action- and Axis-mappings as RL-actions and receives observations back defined by ducandu plugin Observer
objects placed in the Game
(these could be camera pixels or other observations, e.g. a x/y/z position of some game actor).
"""
def __init__(
self,
host="localhost",
port=6025,
connect=True,
discretize_actions=False,
delta_time=1/60,
num_ticks=4
):
"""
Args:
host (str): The hostname to connect to.
port (int): The port to connect to.
connect (bool): Whether to connect already in this c'tor.
discretize_actions (bool): Whether to treat axis-mappings defined in UE4 game as discrete actions.
This would be necessary e.g. for agents that use q-networks where the output are q-values per discrete
state-action pair.
delta_time (float): The fake delta time to use for each single game tick.
num_ticks (int): The number of ticks to be executed in this step (each tick will repeat the same given
actions).
"""
RemoteEnvironment.__init__(self, host, port)
# RemoteEnvironment should send a name of the game upon connection.
self.game_name = None
self.action_space_desc = None
self.observation_space_desc = None
self.discretize_actions = discretize_actions
self.discretized_actions = None
self.delta_time = delta_time
self.num_ticks = num_ticks
# Our tcp messaging protocol to use (simple len-header + msgpack-numpy-body).
self.protocol = MsgPackNumpyProtocol()
if connect:
self.connect()
def __str__(self):
return "UE4Environment({}:{}{})".format(self.host, self.port, "[connected; {}]".
format(self.game_name) if self.socket else "")
def connect(self):
RemoteEnvironment.connect(self)
# Get action- and state-specs from our game.
self.protocol.send({"cmd": "get_spec"}, self.socket)
response = self.protocol.recv(self.socket)
if "observation_space_desc" not in response or "action_space_desc" not in response:
raise TensorForceError("ERROR in UE4Environment.connect: no observation- or action-space-desc sent "
"by remote server!")
# Observers
self.observation_space_desc = response["observation_space_desc"]
# Action-mappings
self.action_space_desc = response["action_space_desc"]
if self.discretize_actions:
self.discretize_action_space_desc()
# Invalidate our states- and actions caches.
if "states" in self.__dict__:
del self.__dict__["states"]
if "actions" in self.__dict__:
del self.__dict__["actions"]
def seed(self, seed=None):
if not seed:
seed = time.time()
# Send command.
self.protocol.send({"cmd": "seed", "value": int(seed)}, self.socket)
# Wait for response.
response = self.protocol.recv(self.socket)
if "status" not in response:
raise RuntimeError("Message without field 'status' received!")
elif response["status"] != "ok":
raise RuntimeError("Message 'status' for seed command is not 'ok' ({})!".format(response["status"]))
return seed
def reset(self):
"""
same as step (no kwargs to pass), but needs to block and return observation_dict
- stores the received observation in self.last_observation
"""
# Send command.
self.protocol.send({"cmd": "reset"}, self.socket)
# Wait for response.
response = self.protocol.recv(self.socket)
# Extract observations.
return self.extract_observation(response)
def set_state(self, setters, **kwargs):
if "cmd" in kwargs:
raise TensorForceError("Key 'cmd' must not be present in **kwargs to method `set`!")
# Forward kwargs to remote (only add command: set).
message = kwargs
message["cmd"] = "set"
# Sanity check given setters.
# Solve single tuple with prop-name and value -> should become a list (len=1) of this tuple.
if len(setters) >= 2 and not isinstance(setters[1], (list, tuple)):
setters = list((setters,))
for set_cmd in setters:
if not re.match(r'\w+(:\w+)*', set_cmd[0]):
raise TensorForceError("ERROR: property ({}) in setter-command does not match correct pattern!".
format(set_cmd[0]))
if len(set_cmd) == 3 and not isinstance(set_cmd[2], bool):
raise TensorForceError("ERROR: 3rd item in setter-command must be of type bool ('is_relative' flag)!")
message["setters"] = setters
self.protocol.send(message, self.socket)
# Wait for response.
response = self.protocol.recv(self.socket)
return self.extract_observation(response)
def execute(self, actions):
"""
Executes a single step in the UE4 game. This step may be comprised of one or more actual game ticks for all of
which the same given
action- and axis-inputs (or action number in case of discretized actions) are repeated.
UE4 distinguishes between action-mappings, which are boolean actions (e.g. jump or dont-jump) and axis-mappings,
which are continuous actions
like MoveForward with values between -1.0 (run backwards) and 1.0 (run forwards), 0.0 would mean: stop.
"""
action_mappings, axis_mappings = [], []
# TODO: what if more than one actions are passed?
# Discretized -> each action is an int
if self.discretize_actions:
# Pull record from discretized_actions, which will look like: [A, Right, SpaceBar].
combination = self.discretized_actions[actions]
# Translate to {"axis_mappings": [('A', 1.0), (Right, 1.0)], "action_mappings": [(SpaceBar, True)]}
for key, value in combination:
# Action mapping (True or False).
if isinstance(value, bool):
action_mappings.append((key, value))
# Axis mapping: always use 1.0 as value as UE4 already multiplies with the correct scaling factor.
else:
axis_mappings.append((key, value))
# Non-discretized: Each action is a dict of action- and axis-mappings defined in UE4 game's input settings.
# Re-translate Incoming action names into keyboard keys for the server.
elif actions:
try:
action_mappings, axis_mappings = self.translate_abstract_actions_to_keys(actions)
except KeyError as e:
raise TensorForceError("Action- or axis-mapping with name '{}' not defined in connected UE4 game!".
format(e))
# message = {"cmd": "step", 'delta_time': 0.33,
# 'actions': [('X', True), ('Y', False)],
# 'axes': [('Left': 1.0), ('Up': -1.0)]
# }
message = dict(
cmd="step",
delta_time=self.delta_time,
num_ticks=self.num_ticks,
actions=action_mappings,
axes=axis_mappings
)
self.protocol.send(message, self.socket)
# Wait for response (blocks).
response = self.protocol.recv(self.socket)
r = response.pop("_reward", 0.0)
is_terminal = response.pop("_is_terminal", False)
obs = self.extract_observation(response)
# Cache last observation
self.last_observation = obs
return obs, is_terminal, r
@cached_property
def states(self):
observation_space = {}
# Derive observation space from observation_space_desc.
if self.observation_space_desc:
for key, desc in self.observation_space_desc.items():
type_ = desc["type"]
if type_ == "Bool":
space = dict(type="float", shape=())
elif type_ == "IntBox":
space = dict(
type="float",
shape=desc.get("shape", ()),
min_value=desc.get("min", None),
max_value=desc.get("max", None)
)
elif type_ == "Continuous":
space = dict(
type="float",
shape=desc.get("shape", ()),
min_value=desc.get("min", None),
max_value=desc.get("max", None)
)
# TODO: Enums
else:
raise TensorForceError("Unsupported space type {} coming from Environment ("
"observation_space_desc)!".format(type_))
observation_space[key] = space
# Simplest case: if only one observer -> use that one.
if len(observation_space) == 1:
observation_space = list(observation_space.values())[0]
return observation_space
@cached_property
def actions(self):
# Derive action space from action_space_desc.
if not self.action_space_desc:
return {}
# Discretize all mappings. Pretend that each single mapping and combination thereof is its own discrete action.
# E.g. MoveForward=Up(1.0)+Down(-1.0) MoveRight=Right(1.0)+Left(-1.0) -> UpRight, UpLeft, Right, Left, Up, Down,
# DownRight, DownLeft, Idle
if self.discretize_actions:
return dict(type="int", num_actions=len(self.discretized_actions))
# Leave each mapping as independent action, which may be continuous and can be combined with all other actions
# in any way.
else:
action_space = {}
for action_name, properties in self.action_space_desc.items():
# UE4 action mapping -> bool
if properties["type"] == "action":
action_space[action_name] = dict(type="int", num_actions=2)
# UE4 axis mapping -> continuous (float) unless we have discretized axes
else:
min_ = 0.0
max_ = 0.0
for mapping in properties["keys"]:
if mapping[1] > max_:
max_ = mapping[1]
if mapping[1] < min_:
min_ = mapping[1]
action_space[action_name] = dict(type="float", shape=(), min_value=min_, max_value=max_)
return action_space
def translate_abstract_actions_to_keys(self, abstract):
"""
Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])
each single item in abstract will undergo the following translation:
Example1:
we want: "MoveRight": 5.0
possible keys for the action are: ("Right", 1.0), ("Left", -1.0)
result: "Right": 5.0 * 1.0 = 5.0
Example2:
we want: "MoveRight": -0.5
possible keys for the action are: ("Left", -1.0), ("Right", 1.0)
result: "Left": -0.5 * -1.0 = 0.5 (same as "Right": -0.5)
"""
# Solve single tuple with name and value -> should become a list (len=1) of this tuple.
if len(abstract) >= 2 and not isinstance(abstract[1], (list, tuple)):
abstract = list((abstract,))
# Now go through the list and translate each axis into an actual keyboard key (or mouse event/etc..).
actions, axes = [], []
for a in abstract:
# first_key = key-name (action mapping or discretized axis mapping) OR tuple (key-name, scale) (continuous
# axis mapping)
first_key = self.action_space_desc[a[0]]["keys"][0]
# action mapping
if isinstance(first_key, (bytes, str)):
actions.append((first_key, a[1]))
# axis mapping
elif isinstance(first_key, tuple):
axes.append((first_key[0], a[1] * first_key[1]))
else:
raise TensorForceError("action_space_desc contains unsupported type for key {}!".format(a[0]))
return actions, axes
def discretize_action_space_desc(self):
"""
Creates a list of discrete action(-combinations) in case we want to learn with a discrete set of actions,
but only have action-combinations (maybe even continuous) available from the env.
E.g. the UE4 game has the following action/axis-mappings:
```javascript
{
'Fire':
{'type': 'action', 'keys': ('SpaceBar',)},
'MoveRight':
{'type': 'axis', 'keys': (('Right', 1.0), ('Left', -1.0), ('A', -1.0), ('D', 1.0))},
}
```
-> this method will discretize them into the following 6 discrete actions:
```javascript
[
[(Right, 0.0),(SpaceBar, False)],
[(Right, 0.0),(SpaceBar, True)]
[(Right, -1.0),(SpaceBar, False)],
[(Right, -1.0),(SpaceBar, True)],
[(Right, 1.0),(SpaceBar, False)],
[(Right, 1.0),(SpaceBar, True)],
]
```
"""
# Put all unique_keys lists in one list and itertools.product that list.
unique_list = []
for nice, record in self.action_space_desc.items():
list_for_record = []
if record["type"] == "axis":
# The main key for this record (always the first one)
head_key = record["keys"][0][0]
# The reference value (divide by this one to get the others)
head_value = record["keys"][0][1]
# The zero key (idle action; axis scale=0.0)
list_for_record.append((head_key, 0.0))
set_ = set()
for key_and_scale in self.action_space_desc[nice]["keys"]:
# Build unique lists of mappings (each axis value should only be represented once).
if key_and_scale[1] not in set_:
list_for_record.append((head_key, key_and_scale[1] / head_value))
set_.add(key_and_scale[1])
else:
# Action-mapping
list_for_record = [(record["keys"][0], False), (record["keys"][0], True)]
unique_list.append(list_for_record)
def so(in_):
# in_ is List[Tuple[str,any]] -> sort by concat'd sequence of str(any's)
st = ""
for i in in_:
st += str(i[1])
return st
# Then sort and get the entire list of all possible sorted meaningful key-combinations.
combinations = list(itertools.product(*unique_list))
combinations = list(map(lambda x: sorted(list(x), key=lambda y: y[0]), combinations))
combinations = sorted(combinations, key=so)
# Store that list as discretized_actions.
self.discretized_actions = combinations
@staticmethod
def extract_observation(message):
if "obs_dict" not in message:
raise TensorForceError("Message without field 'obs_dict' received!")
ret = message["obs_dict"]
# Only one observer -> use that one (no dict of dicts).
if len(ret) == 1:
ret = list(ret.values())[0]
return ret
|
import glob, os
from variables import *
# Reads .wav files in training_data directory, processes them, and outputs a .tdata file
# with the aggregated training data that can be used by the model for training later on
def prepare_training_data():
print("==PREPARE TRAINING DATA==")
from training_data import TrainingData
training_data = TrainingData()
training_data.prepare()
training_data.save(in_models_dir=True)
# Trains Keras/Tensorflow model by loading .tdata file from disk, fitting, saving, and finally
# evaluating the resulting model accuracy
def train():
print("==TRAIN ALL MODELS==")
from model import Model
model = Model()
model.train()
model.save(in_models_dir=True)
Model.evaluate()
def evaluate():
print("==EVALUATE MODELS==")
from model import Model
Model.evaluate()
# Processes recordings in the recordings folder using the saved Keras model and outputs
# call labels in .csv format and .txt format (tab-delimited Audacity readable) into the results directory.
# A new results directory is created each time this is run, as a sub-directory within 'results'.
# Results directories are named according to the date and time of the run, like this:
# [YYYYMMDD]_[HHMMSS]_[recording filename]
# It will process .wav files in the recordings directory regardless of whether they have been
# processed before, but will store unique results files for each time processed
def process():
print("==PROCESS RECORDINGS==")
from scanner import Scanner
from exporter import Exporter
from recording import Recording
home_dir = os.getcwd()
os.chdir(Vars.RECORDINGS_DIR)
recordings = []
wavefiles = glob.glob('*.wav')
for wavefile in wavefiles:
recordings.append(Recording(wavefile))
os.chdir(home_dir)
print('processing the following recordings: ' + str(wavefiles))
model = Scanner().model
for recording in recordings:
print(' ')
print('processing ' + recording.file)
scanner = Scanner(preload=False)
scanner.model = model
exporter = Exporter()
scanner.process(recording)
exporter.process(recording)
print(' ')
# List all command line interface options
def shortcuts_all():
sc = [
('prepare training data', prepare_training_data),
('train models', train),
('process recordings', process),
('evaluate models', evaluate),
('exit', None)
]
return sc
# Controller function for the command line interface
def controller():
shortcuts = None
shortcuts = shortcuts_all()
while True:
print(' ')
print("==ANIMAL CALL DETECTION AND CLASSIFICATION (ACDC)==")
for i in range(len(shortcuts)):
print(str(i) + ') ' + shortcuts[i][0])
selection = input('enter number for the action you would like to perform: ')
print(' ')
selection = int(selection)
if shortcuts[selection][0] == 'exit':
break
shortcuts[selection][1]()
if __name__ == "__main__":
controller()
|
from stage import *
class Active(Stage):
def __init__(self,
activeFn,
inputNames,
outputDim,
defaultValue=0.0,
outputdEdX=True,
name=None):
Stage.__init__(self,
name=name,
inputNames=inputNames,
outputDim=outputDim,
defaultValue=defaultValue,
outputdEdX=outputdEdX)
self.activeFn = activeFn
def forward(self, X):
self.Y = self.activeFn.forward(X)
return self.Y
def backward(self, dEdY):
self.dEdW = 0
return self.activeFn.backward(dEdY, self.Y, 0)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
__version__ = '0.0.1'
@frappe.whitelist(allow_guest=True)
def authenticate(user, password):
print "Esta ingresando", password, frappe.db.sql("""select company_name from `tabCompany` where company_name = 'Nodux'""", as_dict=1)
print "Mal ", frappe.db.sql("""select company_name from `tabCompany` where pass_auth = %s""", password, as_dict=1)
company = frappe.db.sql("""select company_name from `tabCompany` where pass_auth = %s""", password, as_dict=1)
c = 0
a = None
print "company", company
if company != []:
flag = '1'
flag_c = '0'
flag_a = '0'
c = company.formato
a = company.date_active
else:
print "Pasa 1.1"
flag = '0'
flag_c = '0'
flag_a = '0'
print "Pasa 1"
if c == 1:
flag_c = '1'
else:
flag_c = '0'
print "Pasa 2"
if a != None:
date= datetime.now()
limit= (date-a).days
if limit > 5:
flag_a = '1'
else:
flag_a = '0'
return flag, flag_c, flag_a
#return flag, flag_c, flag_a
|
from django.db import models
# Create your models here.
class song(models.Model):
song = models.CharField(max_length=122)
|
# -*- coding: utf-8 -*-
"""Test constants for BEL Commons."""
import os
__all__ = [
'dir_path',
]
dir_path = os.path.dirname(os.path.realpath(__file__))
|
import os
def dot2svg(filename, engine='dot'):
data = {
'engine': engine,
'file': filename.replace(".dot", "")
}
command = "{engine} -Tsvg {file}.dot > {file}.svg".format(**data)
print(command)
os.system(command)
def browser(filename):
data = {
'engine': 'python -m webbrowser',
'file': filename
}
if 'file:' not in filename and 'http' not in filename:
os.system("python -m webbrowser -t file:///{file}".format(**data))
else:
os.system("python -m webbrowser -t {file}".format(**data))
|
import logging
import werkzeug.urls
import urlparse
import urllib2
import simplejson
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.osv import osv, fields
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'oauth_provider_id': fields.many2one('auth.oauth.provider', 'OAuth Provider'),
'oauth_uid': fields.char('OAuth User ID', help="Oauth Provider user_id", copy=False),
'oauth_access_token': fields.char('OAuth Access Token', readonly=True, copy=False),
}
_sql_constraints = [
('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'),
]
def _auth_oauth_rpc(self, cr, uid, endpoint, access_token, context=None):
params = werkzeug.url_encode({'access_token': access_token})
if urlparse.urlparse(endpoint)[4]:
url = endpoint + '&' + params
else:
url = endpoint + '?' + params
f = urllib2.urlopen(url)
response = f.read()
return simplejson.loads(response)
def _auth_oauth_validate(self, cr, uid, provider, access_token, context=None):
""" return the validation data corresponding to the access token """
p = self.pool.get('auth.oauth.provider').browse(cr, uid, provider, context=context)
validation = self._auth_oauth_rpc(cr, uid, p.validation_endpoint, access_token)
if validation.get("error"):
raise Exception(validation['error'])
if p.data_endpoint:
data = self._auth_oauth_rpc(cr, uid, p.data_endpoint, access_token)
validation.update(data)
return validation
def _auth_oauth_signin(self, cr, uid, provider, validation, params, context=None):
""" retrieve and sign in the user corresponding to provider and validated access token
:param provider: oauth provider id (int)
:param validation: result of validation of access token (dict)
:param params: oauth parameters (dict)
:return: user login (str)
:raise: openerp.exceptions.AccessDenied if signin failed
This method can be overridden to add alternative signin methods.
"""
try:
oauth_uid = validation['user_id']
user_ids = self.search(cr, uid, [("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not user_ids:
raise openerp.exceptions.AccessDenied()
assert len(user_ids) == 1
user = self.browse(cr, uid, user_ids[0], context=context)
user.write({'oauth_access_token': params['access_token']})
return user.login
except openerp.exceptions.AccessDenied, access_denied_exception:
if context and context.get('no_user_creation'):
return None
state = simplejson.loads(params['state'])
token = state.get('t')
oauth_uid = validation['user_id']
email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid))
name = validation.get('name', email)
values = {
'name': name,
'login': email,
'email': email,
'oauth_provider_id': provider,
'oauth_uid': oauth_uid,
'oauth_access_token': params['access_token'],
'active': True,
}
try:
_, login, _ = self.signup(cr, uid, values, token, context=context)
return login
except SignupError:
raise access_denied_exception
def auth_oauth(self, cr, uid, provider, params, context=None):
# Advice by Google (to avoid Confused Deputy Problem)
# if validation.audience != OUR_CLIENT_ID:
# abort()
# else:
# continue with the process
access_token = params.get('access_token')
validation = self._auth_oauth_validate(cr, uid, provider, access_token)
# required check
if not validation.get('user_id'):
raise openerp.exceptions.AccessDenied()
# retrieve and sign in user
login = self._auth_oauth_signin(cr, uid, provider, validation, params, context=context)
if not login:
raise openerp.exceptions.AccessDenied()
# return user credentials
return (cr.dbname, login, access_token)
def check_credentials(self, cr, uid, password):
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
res = self.search(cr, SUPERUSER_ID, [('id', '=', uid), ('oauth_access_token', '=', password)])
if not res:
raise
#
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import ais
import os
import sys
import time
sys.path.append('.')
import settings
from B2SUtils import db_utils
from B2SUtils.db_utils import get_conn
from B2SUtils.db_utils import init_db_pool
from common.constants import VESSEL_STATUS
nav_status_mapping = {
0: VESSEL_STATUS.UNDER_WAY_USING_ENGINE,
1: VESSEL_STATUS.AT_ANCHOR,
2: VESSEL_STATUS.NOT_UNDER_COMMAND,
3: VESSEL_STATUS.SPECIAL_POS_REPORT,
5: VESSEL_STATUS.MOORED,
}
ship_type_mapping = {
69: 'Passenger, No additional information',
70: 'Cargo, all ships of this type',
71: 'Cargo, Hazardous category A',
72: 'Cargo, Hazardous category B',
73: 'Cargo, Hazardous category C',
74: 'Cargo, Hazardous category D',
79: 'Cargo, No additional information'
}
def extract_advdm_info(msgs, msg_type):
aismsg = ''
for idx, msg in enumerate(msgs):
nmeamsg = msg.split(',')
if nmeamsg[0] != '!AIVDM':
raise Exception('Wrong message: %s' % msg)
assert int(nmeamsg[1]) == len(msgs)
assert int(nmeamsg[2]) == idx + 1
aismsg += nmeamsg[5]
checksum = nmeamsg[6]
return decode_advdm(aismsg, msg_type)
def decode_advdm(aismsg, msg_type):
bv = ais.binary.ais6tobitvec(aismsg)
return getattr(ais, 'ais_msg_%s' % msg_type, 'ais_msg_1').decode(bv)
def extract_extra_info(msg):
info = {}
msg, checksum = msg.split('*')
for one in msg.split(','):
if one.startswith('s:'):
info['source'] = one[2:]
elif one.startswith('c:'):
info['time'] = format_epoch_time(int(one[2:]))
elif one.startswith('g:'):
info['index'], info['total'], info['id'] = one[2:].split('-')
info['index'] = int(info['index'])
info['total'] = int(info['total'])
return info
def format_epoch_time(seconds, format='%Y-%m-%d %H:%M'):
return time.strftime(format, time.gmtime(seconds))
def process_file(filename):
with open(filename) as f:
init_db_pool(settings.DATABASE)
line = f.readline()
while line:
data = {}
advdm_msgs = []
while True:
_, extra_msg, advdm_msg = line.split('\\')
info = extract_extra_info(extra_msg)
data.update(info)
advdm_msgs.append(advdm_msg)
if 'total' in info and info['index'] < info['total']:
line = f.readline()
else:
break
data['ais_pos'] = extract_advdm_info(advdm_msgs, 1)
if len(advdm_msgs) > 1:
data['ais_ship'] = extract_advdm_info(advdm_msgs, 5)
with get_conn() as conn:
try:
save_vessel_data(conn, data)
except Exception, e:
conn.rollback()
raise
# next
line = f.readline()
def save_vessel_data(conn, data):
time = data['time']
ais_pos = data['ais_pos']
mmsi = str(ais_pos['UserID'])
lon = ais_pos['longitude']
lat = ais_pos['latitude']
heading = ais_pos['TrueHeading']
speed = ais_pos['SOG']
nav_status = nav_status_mapping.get(ais_pos['NavigationStatus'],
ais_pos['NavigationStatus'])
vessel_values = {
'mmsi': mmsi,
}
if 'ais_ship' in data:
ais_ship = data['ais_ship']
vessel_values.update({
'name': ais_ship['name'].strip(),
'imo': ais_ship['IMOnumber'],
'cs': ais_ship['callsign'],
'type': ship_type_mapping.get(ais_ship['shipandcargo'],
ais_ship['shipandcargo']),
})
vessels = db_utils.select(conn, "vessel",
columns=("id", ),
where={'mmsi': mmsi},
limit=1)
if len(vessels) > 0:
id_vessel = vessels[0][0]
db_utils.update(conn, "vessel", values=vessel_values)
else:
id_vessel = db_utils.insert(conn, "vessel",
values=vessel_values, returning='id')[0]
pos_values = {
'id_vessel': id_vessel,
'location': '',
'longitude': lon,
'latitude': lat,
'heading': heading,
'speed': speed,
'time': time,
'status': nav_status,
}
existings = db_utils.select(conn, "vessel_position",
columns=("id", ),
where={'id_vessel': id_vessel,
'time': time},
limit=1)
if len(existings) == 0:
print "inserting: ", pos_values
db_utils.insert(conn, "vessel_position", values=pos_values)
else:
print "existing: ", pos_values
if __name__ == '__main__':
if len(sys.argv) != 2:
print "missing NMEA filename"
sys.exit(1)
else:
filename = sys.argv[1]
if not os.path.isfile(filename):
print "wrong NMEA filename: ", filename
sys.exit(1)
process_file(filename)
|
from register import find_max_correlation, extract_patches, Registrator
from numpy import testing
import numpy as np
from skimage.color import rgb2gray
class TestRegister():
def setup(self):
self.image = np.zeros((8, 8))
self.image[3:5, 3:5] = 1
self.template = np.zeros((3, 3))
self.template[:2, :2] = 0.5
self.colour = np.tile(self.image[..., np.newaxis], [1, 1, 3])
patch = np.zeros((2, 2, 3))
patches = [patch.copy(), patch.copy(), patch.copy()]
patches[0][1, 1, :] = 1
patches[1][0, 0, :] = 1
patches[2][0, 1, :] = 1
self.patches = list(map(rgb2gray, patches))
self.windows = np.array([[2, 2], [4, 4],
[4, 4], [6, 6],
[4, 2], [6, 4]])
def test_correlation(self):
"""Test the correlation template returns the correct location"""
point = find_max_correlation(self.image, self.template)
testing.assert_equal(point, (3, 3))
def test_window_no_pad(self):
""" Test the window extraction grabs correct location """
extracted = extract_patches(self.colour, self.windows)
print(extracted)
testing.assert_equal(extracted, self.patches)
def test_window_with_pad(self):
""" Test the padding creates the correct size patch """
extracted, windows = extract_patches(self.colour, self.windows, pad=2)
testing.assert_equal(extracted[0].shape, [6, 6])
def test_window_edges(self):
""" Test that the extracted patches clip at the boundary. """
extracted, windows = extract_patches(self.colour, self.windows, pad=10)
testing.assert_equal(extracted[0].shape, [8, 8])
def test_Registrator(self):
registerer = Registrator(self.windows, self.colour, pad=1)
matched, tform = registerer(self.colour)
testing.assert_allclose(matched, self.colour, rtol=1e-6, atol=1e-6)
if __name__ == "__main__":
testing.run_module_suite()
|
# coding: utf-8
# ## Intro
# This notebook aggregates the environmental data by event whereas before we were looking at the data by date.
# ### Calculate number of locations that flooded
# In[1]:
get_ipython().magic(u'matplotlib inline')
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from focus_intersection import subset_floods, flood_df, subset_locations
from main_db_script import data_dir, db_filename
from hr_db_scripts.main_db_script import get_table_for_variable_code, get_db_table_as_df
import pandas as pd
import numpy as np
import re
import sqlite3
import math
con = sqlite3.connect(db_filename)
pd.options.mode.chained_assignment = None # default='warn'
# In this case we are just focusing on the subset of points that is in the downtown area thus the "subset_floods."
# In[2]:
flood_events = pd.read_csv('flood_events.csv')
flood_events['event_date'] = pd.to_datetime(flood_events['event_date'])
flood_events['event_name'] = flood_events['event_name'].str.strip()
flood_events['dates'] = pd.to_datetime(flood_events['dates'])
# In[3]:
grouped = flood_events.groupby(['event_date', 'event_name'])
# Get the number of dates the event spanned, the number of unique locations that were flooded during the event and the total number of locations flooded on all event dates.
# In[4]:
event_total_flooded = grouped.size()
event_dates = grouped['dates'].unique()
num_event_dates = grouped['dates'].nunique()
num_locations = grouped['location'].nunique()
# In[5]:
event_df = pd.concat([event_dates, event_total_flooded, num_event_dates, num_locations], axis=1)
event_df.columns = ['dates', 'num_flooded', 'num_dates', 'num_locations']
# In[6]:
event_df.tail()
# ### Where num_flooded does not equal num_locations _investigation_
# Let's checkout one of the events where the num_flooded is greater than the num_locations. I would expect this to mean that one location was flooded on multiple days of the same event. But for '2014-07-24' the event is only on one day so that isn't what I expected.
# In[7]:
idx = pd.IndexSlice
event_df.sort_index(inplace=True)
event_df.loc[idx['2014-07-24', :], :]
# In[8]:
fl_724 = flood_events[flood_events['dates'] == '2014-07-24']
fl_724[fl_724['location'].duplicated(keep=False)]
# So _here's_ what is happening. The location name is the same in two rows but there are two different event types: "flooded street" and "flooded underpass."
# Now that I think about it, that may explain all the differences between the num_location and num_flooded columns. Let's try another one, this time one that spans more than one day: Irene.
# In[9]:
event_df.sort_index(inplace=True)
event_df.loc[idx[:, 'Irene-2011-08-27'], :]
# In[10]:
irene = flood_events[flood_events['event_name'].str.contains('Irene')].sort_values('location')
irene[irene['location'].duplicated(keep=False)]
# Looks like that's it. Which is not what I was hoping to show. I was thinking that that tell me something about the variety of locations that were flooded over the days but that's not the case.
# Let's try this one more time with Hurricane Joaquin
# In[11]:
jqn = flood_df[flood_df['event'].str.contains('Joaquin')]
# In[12]:
jqn[jqn['location'].duplicated(keep=False)]
# So that is interesting. Even though for hurricanes Matthew and Joaquin, the seven and six days respectively, none
# of the flooded locations were reported twice for one event. Very interesting. So to me, this means we really should be looking at these things by 'event' and not by '\_date'. It also means that the num_locations col doesn't add any information. So imma delete that.
# In[13]:
del event_df['num_locations']
# ### Looking into date in "event" column versus dates in "\_date" column
# Sometimes the date listed in the "event" column is quite different than the date(s) listed in the "\_date" column. A good example of this is the event "unnamed (2/25/2016)" where the dates in the "\_date" column are 2016-05-05, 2016-05-06, and 2016-05-31"
# In[14]:
flood_df[flood_df['event'].str.contains('2/25/2016')]
# So to look at this more closely, I will calculate the difference in days between the "event" column date and the dates in the "\_date" column.
# When I tried to calculate the time between the 'event_date' and the 'dates' to see how far off these were I found that two events had the same 'event_date'. So I think it's appropriate to drop the 'unnamed' one based on the fact that the dates in the "\_date" column are further from the "event_date".
# In[15]:
event_df.sort_index(inplace=True)
event_df.loc['2016-07-30']
# In[16]:
i = event_df.loc['2016-07-30', 'unnamed-2016-07-30'].name
event_df.drop(i, inplace=True)
i = event_df.loc['2014-09-13', "NAPSG-2014-09-13"].name
event_df.drop(i, inplace=True)
# In[17]:
event_df.reset_index(inplace=True)
event_df.set_index('event_date', inplace=True)
event_df
# In[18]:
days_away = []
max_days = []
for d in event_df.index:
try:
ar = event_df.loc[d, 'dates'] - np.datetime64(d)
ar = ar.astype('timedelta64[D]')
days = ar / np.timedelta64(1, 'D')
days_away.append(days)
max_days.append(days.max())
except ValueError:
print d
event_df['days_away_from_event'] = days_away
event_df['max_days_away'] = max_days
print event_df.shape
event_df.head()
# I don't trust the events that have higher days away so I will disregard any event with a "max_days_away" greater than 10. Five events fall under this category.
# In[19]:
# event_df = event_df[event_df['max_days_away']<10]
print event_df.shape
event_df
# In[20]:
feature_df = get_db_table_as_df('nor_daily_observations', dbfilename=db_filename)
feature_df = pd.read_csv('nor_daily_observations.csv')
feature_df['Datetime'] = pd.to_datetime(feature_df['Datetime'])
feature_df.set_index('Datetime', inplace = True)
feature_df.head()
# ### Combine env. data with event data
# In[21]:
def add_event_data(evnt_data, evnt_df, col_name, func, idx):
res = func(evnt_data[col_name])
evnt_df.loc[idx, col_name] = res
return evnt_df
# Now for each event we get an aggregate of the different variables for the given dates
# In[22]:
event_df = pd.concat([event_df, pd.DataFrame(columns=feature_df.columns)])
for ind in event_df.index:
# get the dates of the event and include the date in the "event" column
ds = event_df.loc[ind, 'dates']
ind = np.datetime64(ind)
ds = np.append(ds, ind) if not ind in ds else ds
event_data = feature_df.loc[ds]
# combining data on event scale
# get max over the event for these features
max_cols = ['rhrmx', 'r15mx', 'wind_vel_daily_avg', 'wind_vel_hourly_max_avg', 'ht', 'hht', 'lt', 'llt']
# get mean over the event for these features
mean_cols = ['W', 'td', 'gw', 'AWDR', 'AWND']
# get sum over the event for these features
sum_cols = ['rd']
for feat in feature_df.columns:
if any(feat.startswith(col) for col in max_cols):
event_df = add_event_data(event_data, event_df, feat, np.max, ind)
elif any(feat.startswith(col) for col in mean_cols):
event_df = add_event_data(event_data, event_df, feat, np.mean, ind)
elif any(feat.startswith(col) for col in sum_cols):
event_df = add_event_data(event_data, event_df, feat, np.sum, ind)
elif feat.startswith('r3d'):
event_df.loc[ind, feat] = event_data.loc[ind, feat]
elif re.search(re.compile(r'r\w{2}-\d+_td-\d+'), feat):
feat_spl = feat.split('-')
var = '{}mx-{}'.format(feat_spl[0], feat_spl[1].split('_')[0])
max_ind = event_data[var].idxmax()
if isinstance(max_ind, float):
if math.isnan(max_ind):
event_df.loc[ind, feat] = np.nan
else:
val = event_data.loc[max_ind, feat]
event_df.loc[ind, feat] = event_data.loc[max_ind, feat]
event_df.head()
# In[23]:
event_df.shape
# In[24]:
event_df.head()
# In[25]:
cols = event_df.columns.tolist()
lft_cols = ['event_name', 'dates', 'num_flooded', 'days_away_from_event', 'max_days_away', 'num_dates']
lft_cols.reverse()
for c in lft_cols:
cols.insert(0, cols.pop(cols.index(c)))
event_df = event_df.loc[:, cols]
event_df_for_storage = event_df.reset_index()
event_df_for_storage['dates'] = event_df_for_storage['dates'].apply(str)
event_df_for_storage['days_away_from_event'] = event_df_for_storage['days_away_from_event'].apply(str)
event_df_for_storage.rename(columns={'index':'event_date'}, inplace=True)
event_df_for_storage.head()
# In[26]:
event_df_for_storage.to_csv('{}event_data.csv'.format(data_dir), index=False)
event_df_for_storage.to_sql(name='event_data', con=con, if_exists='replace', index=False)
# ### Combining with the non-flooding event data
# First we have to combine all the dates in the "dates" column of the event_df into one array so we can filter those out of the overall dataset.
# In[27]:
flooded_dates = [np.datetime64(i) for i in event_df.index]
flooded_dates = np.array(flooded_dates)
fl_event_dates = np.concatenate(event_df['dates'].tolist())
all_fl_dates = np.concatenate([fl_event_dates, flooded_dates])
# In[28]:
non_flooded_records = feature_df[feature_df.index.isin(all_fl_dates) != True]
non_flooded_records['num_flooded'] = 0
non_flooded_records['flooded'] = False
non_flooded_records['event_name'] = np.nan
non_flooded_records['event_date'] = non_flooded_records.index
non_flooded_records.reset_index(drop=True, inplace=True)
non_flooded_records.head()
# Combine with flooded events
# In[29]:
event_df.reset_index(inplace=True)
flooded_records = event_df
flooded_records['event_date'] = event_df['index']
flooded_records['flooded'] = True
flooded_records.head()
# In[30]:
reformat = pd.concat([flooded_records, non_flooded_records], join='inner')
reformat.reset_index(inplace=True, drop=True)
reformat.head()
# In[31]:
reformat.to_sql(name="for_model", con=con, index=False, if_exists='replace')
# ## Make average table
# In[32]:
cols = pd.Series(feature_df.columns)
cols_splt = cols.str.split('-', expand=True)
# do this to make sure the tide when the hourly and 15-min max rains have unique col names
for a in cols_splt.iterrows():
if a[1].str.contains('\d_td').sum() == 1:
cols_splt.loc[a[0], 0] += "_td"
col_vars = cols_splt[0].unique()
col_vars
# In[33]:
avdf = pd.DataFrame()
for v in col_vars:
if v not in ['r15_td', 'rhr_td']:
avdf[v] = reformat[[a for a in feature_df.columns if a.startswith(v)]].mean(axis=1)
else:
avdf[v] = reformat[cols[cols.str.contains(r'{}-\d+_td-\d+'.format(v.split('_')[0]))]].mean(axis=1)
# In[34]:
avdf = pd.concat([reformat[['event_date', 'event_name', 'num_flooded']], avdf], axis=1)
# In[35]:
avdf.head()
# In[36]:
avdf['ht'] = np.where(avdf['ht'].isnull(), avdf['hht'], avdf['ht'])
avdf['hht'] = np.where(avdf['hht'].isnull(), avdf['ht'], avdf['hht'])
avdf['lt'] = np.where(avdf['lt'].isnull(), avdf['llt'], avdf['lt'])
avdf['llt'] = np.where(avdf['llt'].isnull(), avdf['lt'], avdf['llt'])
avdf['WGF6'] = np.where(avdf['WGF6'].isnull(), avdf['AWND'], avdf['WGF6'])
# In[37]:
avdf.to_sql(name='for_model_avgs', con=con, index=False, if_exists='replace')
avdf.to_csv('for_model_avgs.csv')
# In[38]:
avdf
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for audio_record."""
import numpy as np
import tensorflow as tf
import unittest
from tensorflow_lite_support.python.task.audio.core import audio_record
_mock = unittest.mock
_CHANNELS = 2
_SAMPLING_RATE = 16000
_BUFFER_SIZE = 15600
class AudioRecordTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Mock sounddevice.InputStream
with _mock.patch("sounddevice.InputStream") as mock_input_stream_new_method:
self.mock_input_stream = _mock.MagicMock()
mock_input_stream_new_method.return_value = self.mock_input_stream
self.record = audio_record.AudioRecord(_CHANNELS, _SAMPLING_RATE,
_BUFFER_SIZE)
# Save the initialization arguments of InputStream for later assertion.
_, self.init_args = mock_input_stream_new_method.call_args
def test_init_args(self):
# Assert parameters of InputStream initialization
self.assertEqual(
self.init_args["channels"], _CHANNELS,
"InputStream's channels doesn't match the initialization argument.")
self.assertEqual(
self.init_args["samplerate"], _SAMPLING_RATE,
"InputStream's samplerate doesn't match the initialization argument.")
def test_life_cycle(self):
# Assert start recording routine.
self.record.start_recording()
self.mock_input_stream.start.assert_called_once()
# Assert stop recording routine.
self.record.stop()
self.mock_input_stream.stop.assert_called_once()
def test_read_succeeds_with_valid_sample_size(self):
callback_fn = self.init_args["callback"]
# Create dummy data to feed to the AudioRecord instance.
chunk_size = int(_BUFFER_SIZE * 0.5)
input_data = []
for _ in range(3):
dummy_data = np.random.rand(chunk_size, _CHANNELS).astype(float)
input_data.append(dummy_data)
callback_fn(dummy_data)
# Assert read data of a single chunk.
recorded_audio_data = self.record.read(chunk_size)
self.assertAllClose(recorded_audio_data, input_data[-1])
# Assert read all data in buffer.
recorded_audio_data = self.record.read(chunk_size * 2)
print(input_data[-2].shape)
expected_data = np.concatenate(input_data[-2:])
self.assertAllClose(recorded_audio_data, expected_data)
def test_read_fails_with_invalid_sample_size(self):
callback_fn = self.init_args["callback"]
# Create dummy data to feed to the AudioRecord instance.
dummy_data = np.zeros([_BUFFER_SIZE, 1], dtype=float)
callback_fn(dummy_data)
# Assert exception if read too much data.
with self.assertRaises(ValueError):
self.record.read(_BUFFER_SIZE + 1)
if __name__ == "__main__":
tf.test.main()
|
import crypto_tools
def caesar_little_doc():
return "encrypt/decrypt using caesar algo"
def caesar_full_doc():
return """
Just caesar algorithm.
Uses dictionary from alphabers.json and move characters
left and right
"""
def caesar_processing(data, lang, key):
caesar_dict = crypto_tools.get_param_json_data("alphabets.json", lang)
result = ""
data = crypto_tools.utf_decoder(data)
for char in data:
try:
index = caesar_dict.index(char)
except ValueError:
err_str = "There is no " + char + " character in alphabet"
raise ValueError(err_str)
index = (index + key) % len(caesar_dict)
result = result + caesar_dict[index]
return result
@crypto_tools.file_manipulation()
def caesar(data):
lang = crypto_tools.cterm('input', 'Data language: ', 'ans')
key = int(crypto_tools.cterm('input', 'Enter key(int): ', 'ans'))
encrypt = crypto_tools.cterm('input',
'You want encrypt or decrypt: ', 'ans')
if encrypt == "decrypt":
key = key * -1
elif encrypt != "encrypt":
raise ValueError("Incorrect type")
return caesar_processing(data, lang, key)
caesar.little_doc = caesar_little_doc
caesar.full_doc = caesar_full_doc
caesar.processor = caesar_processing
|
import math
r = float(input())
area = math.pi*r*r
perimeter = 2*math.pi*r
print('Area = ' + str(area) +
'\nPerimeter = ' + str(perimeter))
|
DISPLAY_DIR = '.pypastry'
DISPLAY_PATH = DISPLAY_DIR + '/display.txt'
RESULTS_PATH = 'results'
REPO_PATH = '.'
|
#------------------------------------#
# Author: Yueh-Lin Tsou #
# Update: 7/17/2019 #
# E-mail: hank630280888@gmail.com #
#------------------------------------#
"""------------------
- Image Contours
------------------"""
# Import OpenCV Library, numpy and command line interface
import cv2
import numpy as np
import argparse
from matplotlib import pyplot as plt
# ------------------- Function to draw contour points ------------------- #
def draw_points(image, contours):
temp_Image = image.copy()
for i in range(len(contours)):
for p in range(len(contours[i])):
cv2.circle(temp_Image,(contours[i][p][0][0], contours[i][p][0][1]), 2, (255, 0, 0), -1)
return temp_Image
# ------------------- Function to find image contours ------------------- #
def Image_Contour(image):
# convert to grayscale image
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# apply threshold to the image
ret,thresh = cv2.threshold(gray_img,250,255,cv2.THRESH_BINARY_INV)
# find contours
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contour_img_1 = draw_points(image, contours)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
contour_img_2 = draw_points(image, contours)
# draw contours
contour_img = cv2.drawContours(image, contours, -1, (255,0,0), 3)
# show result
plt.subplot(131),plt.imshow(cv2.cvtColor(contour_img, cv2.COLOR_BGR2RGB))
plt.title('draw Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(cv2.cvtColor(contour_img_1, cv2.COLOR_BGR2RGB))
plt.title('simple Image'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(cv2.cvtColor(contour_img_2, cv2.COLOR_BGR2RGB))
plt.title('contours Image'), plt.xticks([]), plt.yticks([])
plt.show()
# -------------------------- main -------------------------- #
if __name__ == '__main__':
# read one input from terminal
# (1) command line >> python Image_Contours.py -i shapes.png
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the input image")
args = vars(ap.parse_args())
# Read image
image = cv2.imread(args["image"])
## Functions
Image_Contour(image)
# Reference:
# Website: OpenCV-Python Document
# Link: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_pyramids/py_pyramids.html
|
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from bark.core.world.agent import Agent
from bark.core.world import World
from bark.core.world.map import MapInterface
from bark.runtime.commons.parameters import ParameterServer
from bark.runtime.commons.xodr_parser import XodrParser
import copy
import os
from pathlib import Path
# Module variable to maintain map directory
__MAPFILE_DIRECTORY = None
def SetMapfileDirectory(dir):
global __MAPFILE_DIRECTORY
__MAPFILE_DIRECTORY = dir
def GetMapfileDirectory():
global __MAPFILE_DIRECTORY
return __MAPFILE_DIRECTORY
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
@property
def map_file_name(self):
return self._map_file_name
@property
def full_map_file_name(self):
if GetMapfileDirectory():
return os.path.join(GetMapfileDirectory(), self._map_file_name)
return self._map_file_name
@property
def json_params(self):
return self._json_params
@property
def eval_agent_ids(self):
return self._eval_agent_ids
@property
def map_interface(self):
return self._map_interface
@map_interface.setter
def map_interface(self, map_interface):
self._map_interface = map_interface
def GetWorldState(self):
"""get initial world state of scenario to start simulation from here
Returns:
[bark.core.world.World]
"""
return self._build_world_state()
def copy(self):
return Scenario(agent_list=copy.deepcopy(self._agent_list),
eval_agent_ids=self._eval_agent_ids.copy(),
map_file_name=self._map_file_name,
json_params=self._json_params.copy(),
map_interface=self._map_interface)
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
self.CreateMapInterface(self.full_map_file_name)
world.SetMap(self._map_interface)
else:
world.SetMap(self._map_interface)
for agent in self._agent_list:
agent.GenerateRoadCorridor(self._map_interface)
world.AddAgent(agent)
world.UpdateAgentRTree()
return world
def __getstate__(self):
odict = self.__dict__.copy()
del odict['_map_interface']
return odict
def __setstate__(self, sdict):
sdict['_map_interface'] = None
self.__dict__.update(sdict)
# TODO(@hart): should be a commons function
def CreateMapInterface(self, map_file_name):
map_file_load_test = Path(map_file_name)
if map_file_load_test.is_file():
xodr_parser = XodrParser(map_file_name)
else:
print("Searching for map file {}".format(map_file_name))
objects_found = sorted(Path().rglob(map_file_name))
if len(objects_found) == 0:
raise ValueError("No Map found")
elif len(objects_found) > 1:
raise ValueError("Multiple Maps found")
else:
xodr_parser = XodrParser(objects_found[0].as_posix())
map_interface = MapInterface()
map_interface.SetOpenDriveMap(xodr_parser.map)
self._map_interface = map_interface
def GetDatasetScenarioDescription(self):
# only relevant for scenarios from dataset
try:
track_id_ego = self.eval_agent_ids[0]
track_file_name = self.json_params["track_file"]
dataset_scenario_desc = {
'TrackIdEgo': track_id_ego, 'TrackFileName': track_file_name}
except:
dataset_scenario_desc = {}
return dataset_scenario_desc
|
MAP_HEIGHT_MIN = 20
MAP_HEIGHT_MAX = 50
MAP_WIDTH_MIN = 20
MAP_WIDTH_MAX = 50
MAP_KARBONITE_MIN = 0
MAP_KARBONITE_MAX = 50
ASTEROID_ROUND_MIN = 10
ASTEROID_ROUND_MAX = 20
ASTEROID_KARB_MIN = 20
ASTEROID_KARB_MAX = 100
ORBIT_FLIGHT_MIN = 50
ORBIT_FLIGHT_MAX = 200
ROUND_LIMIT = 1000
def validate_map_dims(h, w):
return (MAP_HEIGHT_MAX >= h >= MAP_HEIGHT_MIN) and (MAP_WIDTH_MAX >= w >= MAP_WIDTH_MIN)
def validate_num_bots(bot_list):
return 0 < len(bot_list) <= 6 and (len(bot_list) % 2 == 0)
def validate_asteroid_pattern(asteroid_list):
rounds = [i[0] for i in asteroid_list]
karb = [i[1] for i in asteroid_list]
for i in range(len(rounds)):
if rounds[i] < 1 or rounds[i] > ROUND_LIMIT:
print("fail round limit check")
return False
if karb[i] < ASTEROID_KARB_MIN or karb[i] > ASTEROID_KARB_MAX:
print("fail karb limit check")
return False
rounds.sort()
if rounds[0] > ASTEROID_ROUND_MAX:
print("fail minimum round")
return False
if ROUND_LIMIT - rounds[-1] > ASTEROID_ROUND_MAX:
print("fail maximum round")
return False
for i in range(len(rounds) - 1):
diff = rounds[i+1] - rounds[i]
if diff < ASTEROID_ROUND_MIN or diff > ASTEROID_ROUND_MAX:
print("fail diff {}".format(diff))
return False
return True
def validate_orbital_pattern(amplitude, period, center):
if center - amplitude < ORBIT_FLIGHT_MIN:
return False
if center + amplitude > ORBIT_FLIGHT_MAX:
return False
return True
|
import os
import paddle
import paddle.nn as nn
import numpy as np
from U2Net.u2net import U2NET
from U2Net.processor import Processor
from paddlehub.module.module import moduleinfo
@moduleinfo(
name="U2Net", # 模型名称
type="CV", # 模型类型
author="jm12138", # 作者名称
author_email="jm12138@qq.com", # 作者邮箱
summary="U2Net", # 模型介绍
version="1.0.0" # 版本号
)
class U2Net(nn.Layer):
def __init__(self):
super(U2Net, self).__init__()
self.model = U2NET(3,1)
state_dict = paddle.load(os.path.join(self.directory, 'u2net.pdparams'))
self.model.set_dict(state_dict)
self.model.eval()
def predict(self, input_datas):
outputs = []
for data in input_datas:
data = paddle.to_tensor(data, 'float32')
d1,d2,d3,d4,d5,d6,d7= self.model(data)
outputs.append(d1.numpy())
outputs = np.concatenate(outputs, 0)
return outputs
def Segmentation(
self,
images=None,
paths=None,
batch_size=1,
input_size=320,
output_dir='output',
visualization=False):
# 初始化数据处理器
processor = Processor(paths, images, batch_size, input_size)
# 模型预测
outputs = self.predict(processor.input_datas)
# 预测结果后处理
results = processor.postprocess(outputs, visualization=visualization, output_dir=output_dir)
return results
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from EEGNAS.imported_models.Chrononet.base import ModelBase
from EEGNAS.imported_models.Chrononet.lasso_feature_selection import LassoFeatureSelection
import logging
logger = logging.getLogger(__name__)
class PytorchModelBase(nn.Module, ModelBase):
Skip_None = 'none'
Skip_Add = 'add'
Skip_Concat = 'concat'
Norm_None = 'none'
Norm_Batch = 'batch_norm'
Norm_Layer = 'layer_norm'
def __init__(self, input_size, output_size, context_size, rnn_normalization, skip_mode, lasso_selection,
use_context, **kwargs):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.context_size = context_size
self.rnn_normalization = rnn_normalization
self.skip_mode = skip_mode
self.use_context = use_context
self.state_tuple_dim = 1
self.lasso_module = LassoFeatureSelection(input_size, lasso_selection)
@staticmethod
def add_arguments(parser):
parser.section('model')
parser.add_argument("rnn_normalization", type=str, default='none', choices=['none', 'batch_norm', 'layer_norm'],
help="Whether to use batch norm or not", )
parser.add_argument("skip_mode", type=str, default='none',
choices=['none', 'add', 'concat'],
help="Whether to skip connections")
parser.add_argument("lasso_selection", type=float, default=0.0, help="TODO")
parser.add_argument("use_context", type=int, choices=[0, 1], default=0,
help="If 1 then context information will be used.")
return parser
def save_model(self, path):
torch.save(self.state_dict(), path)
def load_model(self, path):
self.load_state_dict(torch.load(path, map_location=lambda storage, loc: storage))
def count_params(self):
pp = 0
for p in list(self.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
# Dummy method for non RNN models that do not require hidden state. Reimplemented in RnnBase.
def initial_state(self):
return 0
# Dummy method for non RNN models that do not require hidden state. Reimplemented in RnnBase.
def export_state(self, states):
return [None for x in range(states[0])]
# Dummy method for non RNN models that do not require hidden state. Reimplemented in RnnBase.
def import_state(self, states):
return [len(states)]
class RnnBase(PytorchModelBase):
Initial_State_Random = 'random'
Initial_State_Zero = 'zero'
cell_mapper = {
'LSTM': nn.LSTM,
'GRU': nn.GRU,
'RNN': nn.RNN,
'IndRNN': IndRNN,
'IndGRU': IndGRU
}
@staticmethod
def add_arguments(parser):
PytorchModelBase.add_arguments(parser)
parser.add_argument("rnn_initial_state", type=str, default='random', choices=[RnnBase.Initial_State_Random,
RnnBase.Initial_State_Zero],
help="Initial state for RNN.")
parser.add_argument("rnn_dilation", type=int, default=1,
help="Dilation applied to the RNN cells. Assumes that sequence can be split into equal "
"dilation chunks in each layer.")
parser.add_argument("rnn_hidden_size", type=int, default=128,
help="Number of neurons in the RNN layer.")
parser.add_argument("rnn_num_layers", type=int, default=3,
help="Number of layers in the RNN network.")
parser.add_argument("dropout_f", type=float, default=0.0,
help="Dropout value in the forward direction.")
parser.add_argument("dropout_h", type=float, default=0.0,
help="Dropout value from hidden to hidden.")
parser.add_argument("dropout_i", type=float, default=0.0,
help="Dropout value on the input.")
parser.add_argument("rnn_cell_type", type=str, choices=RnnBase.cell_mapper.keys(),
default='GRU',
help="RNN cell type.")
parser.add_argument("skip_first", type=int, choices=[0, 1], default=0,
help="If skip connection should be applied in the first RNN layer")
parser.add_argument("skip_last", type=int, choices=[0, 1], default=0,
help="If skip connection should be applied in the last RNN layer")
parser.add_argument("use_mc_dropout", type=int, choices=[0, 1], default=0,
help="If set to 1 then during testing and validation will apply random dropout instead of "
"expected values to the weights")
return parser
def __init__(self, rnn_initial_state, rnn_dilation, rnn_hidden_size, rnn_num_layers, dropout_f, dropout_h, dropout_i,
rnn_cell_type, skip_first, skip_last, use_mc_dropout, **kwargs):
super().__init__(**kwargs)
self.rnn_initial_state = rnn_initial_state
self.rnn_dilation = rnn_dilation
self.rnn_hidden_size = rnn_hidden_size
self.rnn_num_layers = rnn_num_layers
self.dropout_f = dropout_f
self.dropout_h = dropout_h
self.dropout_i = dropout_i
self.rnn_cell_type = rnn_cell_type
self.skip_first = bool(skip_first)
self.skip_last = bool(skip_last)
self.use_mc_dropout = use_mc_dropout
if self.rnn_cell_type == "LSTM":
self.state_tuple_dim = 2
elif self.rnn_cell_type in ["GRU", "RNN", "IndRNN", "IndGRU"]:
self.state_tuple_dim = 1
else:
raise NotImplementedError("Cell type %s not recognized" % self.rnn_cell_type)
# Initial hidden state for the model. List of hidden_states for each layer. Layer hidden state represented as
# a numpy array ([dilation, 1, hidden_size]) for GRU or as a tuple of numpy arrays for LSTM
def initial_state(self):
if self.state_tuple_dim == 1:
return self._initial_state()
else:
return tuple(self._initial_state() for _ in range(self.state_tuple_dim))
# Create a new hidden state for one sample.
def _initial_state(self):
layer_state_list = []
for i_layer in range(self.rnn_num_layers):
cumulative_dilation = self.rnn_dilation ** i_layer
shape = (cumulative_dilation, 1, self.rnn_hidden_size)
if self.rnn_initial_state == self.Initial_State_Random:
h = np.array(np.random.normal(0, 1.0, shape), dtype=np.float32)
h = np.clip(h, -1, 1).astype(dtype=np.float32)
elif self.rnn_initial_state == self.Initial_State_Zero:
h = np.zeros(shape, np.float32)
else:
raise NotImplementedError()
layer_state_list.append(h)
return layer_state_list
# Zips together states from multiple samples
def import_state(self, sample_state_list):
if self.state_tuple_dim == 1:
layer_batch_state_list = self._import_state(sample_state_list)
return layer_batch_state_list
else:
layer_batch_state_list = []
for state_index in range(self.state_tuple_dim):
# Extract samples for specific state
sample_state_i_list = [s[state_index] for s in sample_state_list]
layer_batch_state_list.append(self._import_state(sample_state_i_list))
# Convert it such that Layers are first and then tuples with states describe each layer state
return [tuple(s) for s in zip(*layer_batch_state_list)]
# States come from different samples, merge them into one single minibatch,
# Each element from sample_state_list is a list of layer states of the shape [dilation, 1, hidden_size]
# We need to transform it into a variable with format [1, batch_size*dilation, hidden_size]
def _import_state(self, sample_state_list):
layer_batch_state_list = []
for i_layer in range(self.rnn_num_layers):
layer_sample_state_list = [s[i_layer] for s in sample_state_list]
# Concatenate samples for this layer,
# the shape after this operation should be [batch_size*dilation, 1, hidden_size]
layer_batch_state = np.concatenate(layer_sample_state_list)
# Pytorch expects the shape to be [Num_layers=1, batch_size*dilation, hidden_size],
# so we swap axes
layer_batch_state = np.swapaxes(layer_batch_state, 1, 0)
layer_batch_state = Variable(torch.from_numpy(layer_batch_state), requires_grad=False)
layer_batch_state_list.append(layer_batch_state)
return layer_batch_state_list
# Converts PyTorch hidden state representation into numpy arrays that can be used by the data reader class
def export_state(self, layer_batch_state_list):
if self.state_tuple_dim == 1:
sample_state_list = self._export_state(layer_batch_state_list)
return sample_state_list
else:
sample_state_list = []
for state_index in range(self.state_tuple_dim):
layer_batch_state_i_list = [s[state_index] for s in layer_batch_state_list]
sample_state_list.append(self._export_state(layer_batch_state_i_list))
# Convert it such that samples are first then states and then layers
return [tuple(s_l) for s_l in zip(*sample_state_list)]
# As an input we have a list of states for each layer in the RNN
# Each layer state will have the shape [1, batch_size*dilation, hidden_size]
# We need to extract hidden state for each sample and output a list where each element
# represents hidden state for one data sample, hidden state should be a list where each element
# represents one layer hidden_state
def _export_state(self, layer_batch_state_list):
# Input is a list where each element is hidden state for a layer, shape [1, batch_size*dilation, hidden_size]
# We need to make batch_size*dilation as a first dimension
layer_batch_state_list = [torch.transpose(s, 1, 0) for s in layer_batch_state_list]
# Because the first layer always has dilation of 1 we can find out batch_size
batch_size = layer_batch_state_list[0].size(0)
# Placeholder for samples
# Here we will store for each sample list with hidden states for each RNN layer
sample_state_list = [[] for _ in range(batch_size)]
for i_layer in range(self.rnn_num_layers):
dilation = self.rnn_dilation**i_layer
layer_batch_state = layer_batch_state_list[i_layer]
# layer_batch_state has dimension [batch_size*dilation, 1, hidden_size]
# We split it into a list of layer_states for each sample,
# those will have dimension [dilation, 1, hidden_size]
layer_sample_state_list = torch.split(layer_batch_state, dilation)
# We save hidden states from each sample into the correct place
for s, st in zip(sample_state_list, layer_sample_state_list):
s.append(st.cpu().data.numpy())
# At the end samples should be a list where each element represents hidden state for a given sample
# This hidden state should be a list where each element represents hidden state for a given layer
# Hidden state for one layer should have dimensions [dilation, 1, hidden_size]
return sample_state_list
def offset_size(self, sequence_size):
return 0
# Some simple tests
if __name__ == '__main__':
for cell_type in ['LSTM']:
for dilation in [2]:
rnn = RnnBase(rnn_initial_state='random',
rnn_dilation=dilation,
rnn_hidden_size=2,
rnn_num_layers=2,
dropout_f=0.0,
dropout_h=0.0,
dropout_i=0.0,
rnn_cell_type=cell_type,
skip_first=0,
skip_last=0,
use_mc_dropout=0,
input_size=5,
output_size=2,
context_size=0,
rnn_normalization='none',
skip_mode='none',
lasso_selection=0.0,
use_context=0)
initial_states = [rnn.initial_state() for s in range(2)]
print('Initial states %d' % len(initial_states))
for i_s in initial_states:
if isinstance(i_s, tuple):
for i_s_i in i_s:
for i_l in range(rnn.rnn_num_layers):
print(i_s_i[i_l])
else:
for i_l in range(rnn.rnn_num_layers):
print(i_s[i_l])
imported_states = rnn.import_state(initial_states)
for layer_i in range(rnn.rnn_num_layers):
print('States imported to pytorch ( layer_i %d)' % layer_i)
print(imported_states[layer_i])
exported_states = rnn.export_state(imported_states)
print('States exported from pytroch')
for e_s in exported_states:
if isinstance(e_s, tuple):
for e_s_i in e_s:
for i_l in range(rnn.rnn_num_layers):
print(e_s_i[i_l])
else:
for i_l in range(rnn.rnn_num_layers):
print(e_s[i_l])
for i_s, e_s in zip(initial_states, exported_states):
print('Compare sample')
if isinstance(i_s, tuple):
for i_s_i, e_s_i in zip(i_s, e_s):
for i_l in range(rnn.rnn_num_layers):
print(i_s_i[i_l])
print(e_s_i[i_l])
assert np.array_equal(i_s_i[i_l], e_s_i[i_l])
else:
for i_l in range(rnn.rnn_num_layers):
print(i_s[i_l])
print(e_s[i_l])
assert np.array_equal(i_s[i_l], e_s[i_l])
|
import pandas as pd
import pickle
from sklearn.linear_model import LogisticRegression
class Trainer:
def __init__(self):
pass
def Dataset(self):
self.dataset = pd.read_csv(r'dataset.csv')
print(self.dataset.head())
def trainModel(self):
self.Dataset()
self.X_train = self.dataset.drop('Label',axis=1)
self.y_train = self.dataset['Label']
self.model = LogisticRegression()
self.model.fit(self.X_train,self.y_train)
self.fileName = "classifier.pkl"
print("Saving Classifier.....")
try:
with open(self.fileName,'wb') as file:
pickle.dump(self.model,file)
except:
print("File not found")
obj = Trainer()
obj.trainModel()
|
import inject
import os
from mycloud.common import to_generator
from mycloud.photos.photos_client import PhotosClient
class FsPhotosClient:
photos_client: PhotosClient = inject.attr(PhotosClient)
async def add(self, local_path, name):
filename = os.path.basename(local_path)
with open(local_path, 'rb') as f:
generator = to_generator(f)
await self.photos_client.add(name, generator, filename)
|
# Fltk-widgets
#
# (c) 2022 Kevin Routley
#
# build a cross-ref between the test programs and the includes used
# basically a means to determine which widgets are exercised in which test
#
# There are two different possible outputs. (Un-)comment as desired.
# 1. Outputs markdown which is the table of widgets/images/test-programs
# to go in the readme.md
# 2. A list of FLTK header files which are NOT used by any test program.
#
import glob
dict = {}
def addToDict(filename, incl):
if incl not in dict:
dict[incl] = []
dict[incl].append(filename)
def processOne(filename):
with open(filename) as file:
lines = file.readlines()
# looking for lines of the form "#include <FL/Fl_xxxx.H>"
# turn that into "Fl_xxxx.H"
# need to account for e.g. fl_ask.H
for line in lines:
if line.startswith("#include"):
parts = line.split("<")
if len(parts) > 1:
str = parts[1].lower()
if (str.startswith("fl/fl_")):
nm = (parts[1][3:].split(">"))[0]
addToDict(filename, nm)
hxx = glob.glob("./fltk/test/*.h")
for file in hxx:
processOne(file)
cxx = glob.glob("./fltk/test/*.cxx")
for file in cxx:
processOne(file)
# outputs markdown table to incorporate into the readme.md in the repository
print("|Widget|Picture|Tests|")
print("|---|---|---|")
for key in sorted(dict.keys()):
print("|{0}||{1}|".format(key, ", ".join(sorted(dict[key])),key[:-2]))
# identify missing
#master = glob.glob("../FL/?l_*.H")
#for incl in sorted(master):
# test = incl[6:]
# if test not in dict:
# print(test)
|
# -*- coding: utf-8 -*-
"""Base Site class."""
from typing import ClassVar
from typing import Dict
from typing import List
from mrsimulator.utils.parseable import Parseable
from pydantic import validator
from .tensors import AntisymmetricTensor
from .tensors import SymmetricTensor
__author__ = "Deepansh Srivastava"
__email__ = "srivastava.89@osu.edu"
class Coupling(Parseable):
"""Base class representing a two-site coupled nuclear spin interaction tensor
parameters, which include the J-coupling and dipolar tensor.
.. rubric:: Attribute Documentation
Attributes
----------
site_index: list of int (required).
A list of two integers, each corresponding to the index of the coupled sites.
Example
-------
>>> coupling = Coupling(site_index=[0, 1])
isotropic_j: float (optional).
The isotropic j-coupling, in Hz, between the coupled sites. The default is 0.
Example
-------
>>> coupling.isotropic_j = 43.3
j_symmetric: :ref:`sy_api` or equivalent dict object (optional).
The attribute represents the parameters of the irreducible second-rank traceless
symmetric part of the J-coupling tensor. The default value is None.
The allowed attributes of the :ref:`sy_api` class for `j_symmetric` are
``zeta``, ``eta``, ``alpha``, ``beta``, and ``gamma``, where ``zeta`` is the
J anisotropy, in Hz, and ``eta`` is the J asymmetry parameter defined using the
Haeberlen convention. The Euler angles ``alpha``, ``beta``, and ``gamma`` are
in radians.
Example
-------
>>> coupling.j_symmetric = {'zeta': 10, 'eta': 0.5}
>>> # or equivalently
>>> coupling.j_symmetric = SymmetricTensor(zeta=10, eta=0.5)
j_antisymmetric: :ref:`asy_api` or equivalent dict object (optional).
The attribute represents the parameters of the irreducible first-rank
antisymmetric part of the J tensor. The default value is None.
The allowed attributes of the :ref:`asy_api` class for `j_antisymmetric` are
``zeta``, ``alpha``, and ``beta``, where ``zeta`` is the anisotropy parameter
of the anti-symmetric first-rank tensor given in Hz. The angles ``alpha`` and
``beta`` are in radians.
Example
-------
>>> coupling.j_antisymmetric = {'zeta': 20}
>>> # or equivalently
>>> coupling.j_antisymmetric = AntisymmetricTensor(zeta=20)
dipolar: :ref:`sy_api` or equivalent dict object (optional).
The attribute represents the parameters of the irreducible second-rank traceless
symmetric part of the direct-dipolar coupling tensor. The default value is None.
The allowed attributes of the :ref:`sy_api` class for `dipolar` are ``D``,
``alpha``, ``beta``, and ``gamma``, where ``D`` is the dipolar coupling
constant, in Hz. The Euler angles ``alpha``, ``beta``, and ``gamma`` are in
radians.
Example
-------
>>> coupling.dipolar = {'D': 320}
>>> # or equivalently
>>> coupling.dipolar = SymmetricTensor(D=320)
name: str (optional).
The name or id of the coupling. The default value is None.
Example
-------
>>> coupling.name = '1H-1H'
>>> coupling.name
'1H-1H'
label: str (optional).
The label for the coupling. The default value is None.
Example
-------
>>> coupling.label = 'Weak coupling'
>>> coupling.label
'Weak coupling'
description: str (optional).
A description of the coupling. The default value is None.
Example
-------
>>> coupling.description = 'An example coupled sites.'
>>> coupling.description
'An example coupled sites.'
Example
-------
The following are a few examples of setting the site object.
>>> coupling1 = Coupling(
... site_index=[0, 1],
... isotropic_j=20, # in Hz
... j_symmetric={
... "zeta": 10, # in Hz
... "eta": 0.5
... },
... dipolar={"D": 5.1e3}, # in Hz
... )
Using SymmetricTensor objects.
>>> coupling1 = Coupling(
... site_index=[0, 1],
... isotropic_j=20, # in Hz
... j_symmetric=SymmetricTensor(zeta=10, eta=0.5),
... dipolar=SymmetricTensor(D=5.1e3), # in Hz
... )
"""
site_index: List[int]
isotropic_j: float = 0.0
j_symmetric: SymmetricTensor = None
j_antisymmetric: AntisymmetricTensor = None
dipolar: SymmetricTensor = None
property_unit_types: ClassVar[Dict] = {"isotropic_j": "frequency"}
property_default_units: ClassVar[Dict] = {"isotropic_j": "Hz"}
property_units: Dict = {"isotropic_j": "Hz"}
test_vars: ClassVar[Dict] = {"site_index": [0, 1]}
@validator("dipolar")
def dipolar_must_not_contain_Cq_and_zeta(cls, v, values):
if v is None:
return v
_ = [
v.property_units.pop(item) if item in v.property_units else None
for item in ["Cq", "zeta"]
]
return v
@validator("j_symmetric", "j_antisymmetric")
def j_symmetric_must_not_contain_Cq_and_D(cls, v, values):
if v is None:
return v
_ = [
v.property_units.pop(item) if item in v.property_units else None
for item in ["Cq", "D"]
]
v.property_units["zeta"] = "Hz"
return v
@validator("site_index", always=True)
def validate_site_index(cls, v, *, values, **kwargs):
if len(v) != 2:
raise ValueError("Site index must a list of two integers.")
if v[0] == v[1]:
raise ValueError("The two site indexes must be unique integers.")
return v
class Config:
extra = "forbid"
validate_assignment = True
@classmethod
def parse_dict_with_units(cls, py_dict: dict):
"""Parse the physical quantity from a dictionary representation of the Coupling
object, where the physical quantity is expressed as a string with a number and
a unit.
Args:
dict py_dict: A required python dict object.
Returns:
:ref:`site_api` object.
Example
-------
>>> coupling_dict = {
... "site_index": [1, 2],
... "isotropic_j": "20 Hz",
... "j_symmetric": {"zeta": "10 Hz", "eta":0.5}
... }
>>> coupling1 = Coupling.parse_dict_with_units(coupling_dict)
"""
prop_mapping = {
"j_symmetric": SymmetricTensor,
"j_antisymmetric": AntisymmetricTensor,
"dipolar": SymmetricTensor,
}
for k, v in prop_mapping.items():
if k in py_dict:
py_dict[k] = v.parse_dict_with_units(py_dict[k])
if py_dict[k].property_units["zeta"] == "ppm" and k != "dipolar":
raise ValueError(
f"Error enforcing units for {k}.zeta: ppm. Use frequency units."
)
return super().parse_dict_with_units(py_dict)
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division
import os
import sys
import logging
import tensorflow as tf
import tensorflow_hub as hub
tf.logging.set_verbosity(0)
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
logging.info("GOOGLE USE MODEL (params: Path to Data & # of Hidden Layers[optional] )")
logging.info("\n\n\nPATH_TO_DATA: " + str(sys.argv[1])+ "\n\n")
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = sys.argv[1] # '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# tensorflow session
session = tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params['google_use'](batch)
return embeddings
def make_embed_fn(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
if (len(sys.argv)>3):
nhid = int(sys.argv[3])
else:
nhid=0
#params_senteval['classifier'] = {'nhid':nhid , 'optim': 'rmsprop', 'batch_size': 128,'tenacity': 3, 'epoch_size': 2}
params_senteval['classifier'] ={'nhid': nhid, 'optim': 'adam','batch_size': 64, 'tenacity': 5,'epoch_size': 4}
# Start TF session and load Google Universal Sentence Encoder
encoder = make_embed_fn("https://tfhub.dev/google/universal-sentence-encoder-large/3")
params_senteval['google_use'] = encoder
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['MEDNLI','ClinicalSTS','BIOSSES','ClinicalSTS2']
results = se.eval(transfer_tasks)
print(results)
|
"""
Simple multithreaded algorithm to show how the 4 phases of a genetic algorithm works
(Evaluation, Selection, Crossover and Mutation)
https://en.wikipedia.org/wiki/Genetic_algorithm
Author: D4rkia
"""
from __future__ import annotations
import random
# Maximum size of the population. bigger could be faster but is more memory expensive
N_POPULATION = 200
# Number of elements selected in every generation for evolution the selection takes
# place from the best to the worst of that generation must be smaller than N_POPULATION
N_SELECTED = 50
# Probability that an element of a generation can mutate changing one of its genes this
# guarantees that all genes will be used during evolution
MUTATION_PROBABILITY = 0.4
# just a seed to improve randomness required by the algorithm
random.seed(random.randint(0, 1000))
def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]:
"""
Verify that the target contains no genes besides the ones inside genes variable.
>>> from string import ascii_lowercase
>>> basic("doctest", ascii_lowercase, debug=False)[2]
'doctest'
>>> genes = list(ascii_lowercase)
>>> genes.remove("e")
>>> basic("test", genes)
Traceback (most recent call last):
...
ValueError: ['e'] is not in genes list, evolution cannot converge
>>> genes.remove("s")
>>> basic("test", genes)
Traceback (most recent call last):
...
ValueError: ['e', 's'] is not in genes list, evolution cannot converge
>>> genes.remove("t")
>>> basic("test", genes)
Traceback (most recent call last):
...
ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge
"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}")
# Verify that the target contains no genes besides the ones inside genes variable.
not_in_genes_list = sorted({c for c in target if c not in genes})
if not_in_genes_list:
raise ValueError(
f"{not_in_genes_list} is not in genes list, evolution cannot converge"
)
# Generate random starting population
population = []
for _ in range(N_POPULATION):
population.append("".join([random.choice(genes) for i in range(len(target))]))
# Just some logs to know what the algorithms is doing
generation, total_population = 0, 0
# This loop will end when we will find a perfect match for our target
while True:
generation += 1
total_population += len(population)
# Random population created now it's time to evaluate
def evaluate(item: str, main_target: str = target) -> tuple[str, float]:
"""
Evaluate how similar the item is with the target by just
counting each char in the right position
>>> evaluate("Helxo Worlx", Hello World)
["Helxo Worlx", 9]
"""
score = len(
[g for position, g in enumerate(item) if g == main_target[position]]
)
return (item, float(score))
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this will probably be slower
# we just need to call evaluate for every item inside population
population_score = [evaluate(item) for item in population]
# Check if there is a matching evolution
population_score = sorted(population_score, key=lambda x: x[1], reverse=True)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the Best result every 10 generation
# just to know that the algorithm is working
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}"
)
# Flush the old population keeping some of the best evolutions
# Keeping this avoid regression of evolution
population_best = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(population_best)
# Normalize population score from 0 to 1
population_score = [
(item, score / len(target)) for item, score in population_score
]
# Select, Crossover and Mutate a new population
def select(parent_1: tuple[str, float]) -> list[str]:
"""Select the second parent and generate new population"""
pop = []
# Generate more child proportionally to the fitness score
child_n = int(parent_1[1] * 100) + 1
child_n = 10 if child_n >= 10 else child_n
for _ in range(child_n):
parent_2 = population_score[random.randint(0, N_SELECTED)][0]
child_1, child_2 = crossover(parent_1[0], parent_2)
# Append new string to the population list
pop.append(mutate(child_1))
pop.append(mutate(child_2))
return pop
def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
"""Slice and combine two string in a random point"""
random_slice = random.randint(0, len(parent_1) - 1)
child_1 = parent_1[:random_slice] + parent_2[random_slice:]
child_2 = parent_2[:random_slice] + parent_1[random_slice:]
return (child_1, child_2)
def mutate(child: str) -> str:
"""Mutate a random gene of a child with another one from the list"""
child_list = list(child)
if random.uniform(0, 1) < MUTATION_PROBABILITY:
child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
return "".join(child_list)
# This is Selection
for i in range(N_SELECTED):
population.extend(select(population_score[int(i)]))
# Check if the population has already reached the maximum value and if so,
# break the cycle. if this check is disabled the algorithm will take
# forever to compute large strings but will also calculate small string in
# a lot fewer generations
if len(population) > N_POPULATION:
break
if __name__ == "__main__":
target_str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
genes_list = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
print(
"\nGeneration: %s\nTotal Population: %s\nTarget: %s"
% basic(target_str, genes_list)
)
|
import c4d
from .Const import Const
class UD():
"""
Class for handle our UserData. Since we cant access to curve data from Ressource file in python.
The only solution is to build our object menu ourself via userData.
It's a bit tricky since we have to rebuild everything from sratch everytime.
But user don't want to looses their settings if they open/close c4d, so we store all datas in our tag basecontainer
If there is no BaseCtonainer we build it from scratch
"""
def __init__(self, obj):
self.obj = obj
#List who gonna store our id of our UserData
# 0 = R // 1 = G // 2 = B
self.idCycle = [None] * 1
self.idGroups = [None] * 7
self.idSplines = [None] * 3
self.idNbPoints = [None] * 4
self.idInvert = [None] * 6
self.idSeparator = [None] * 3
self.idStringInput = [None] * 7
self.idButtons = [None] * 3
self.idGradient = [None] * 4
self.oldCycle = None
self.oldNbPoints = [None] * 4
self.oldInvert = [None] * 6
def create_group(self, groupId, name, parentGroup=None, columns=None, shortname=None, titleBar=True, Open=False):
"""
Create a Group
:param groupId: int => ID of the created groupID
:param name: str => name of the group
:param parentGroup: int => groupID of the parent group
:param columns: int => number of columns
:param shortname: str => display name of the group if we don't want to display name
:param titleBar: bool => if we can hide the group
:param Open: bool => If the group is opened or not
:return: The list of groups with the newly created one
"""
if (self.idGroups[groupId]) is None:
if shortname is None: shortname = name
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_GROUP)
bc[c4d.DESC_NAME] = name
bc[c4d.DESC_SHORT_NAME] = shortname
bc[c4d.DESC_TITLEBAR] = int(titleBar)
bc[c4d.DESC_GUIOPEN] = int(Open)
if parentGroup is not None:
bc[c4d.DESC_PARENTGROUP] = parentGroup
if columns is not None:
# DESC_COLUMNS VALUE IS WRONG IN 15.057 - SHOULD BE 22
bc[22] = columns
self.idGroups[groupId] = self.obj.AddUserData(bc)
return self.idGroups
def create_separator(self, separatorId, parentGroup=None, name=""):
"""
Create a separator
:param separatorId: int => ID of the created separator
:param parentGroup: int => groupID of the parent group
:param name: name but can be empty string
:return: The list of separator with the newly created one
"""
if (self.idSeparator[separatorId]) is None:
bc = c4d.GetCustomDataTypeDefault(c4d.DTYPE_SEPARATOR)
bc[c4d.DESC_NAME] = name
bc[c4d.DESC_SHORT_NAME] = name
bc[c4d.DESC_PARENTGROUP] = parentGroup
bc.SetLong(c4d.DESC_CUSTOMGUI, c4d.DTYPE_SEPARATOR)
bc.SetBool(c4d.DESC_SEPARATORLINE, False)
self.idSeparator[separatorId] = self.obj.AddUserData(bc)
return self.idSeparator
def create_button(self, buttonId, parentGroup=None, name=""):
"""
Create a button
:param buttonId: int => ID of the created button
:param parentGroup: int => groupID of the parent group
:param name: name but can be empty string
:return: The list of button with the newly created one
"""
if (self.idButtons[buttonId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_BUTTON)
bc[c4d.DESC_NAME] = name
bc[c4d.DESC_SHORT_NAME] = name
bc[c4d.DESC_PARENTGROUP] = parentGroup
bc[c4d.DESC_CUSTOMGUI] = c4d.CUSTOMGUI_BUTTON
self.idButtons[buttonId] = self.obj.AddUserData(bc)
def create_string_input(self, floatId, value, parentGroup=None, name=""):
"""
Create an input string
:param floatId: int => ID of the created InputField
:param value: str => value of the InputField
:param parentGroup: int => groupID of the parent group
:param name: str => name of the inputField
:return: The list of InputField with the newly created one
"""
if (self.idStringInput[floatId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_STRING)
bc[c4d.DESC_NAME] = name
bc[c4d.DESC_PARENTGROUP] = parentGroup
self.idStringInput[floatId] = self.obj.AddUserData(bc)
self.obj[self.idStringInput[floatId]] = value
return self.idStringInput
else:
self.obj[self.idStringInput[floatId]] = value
return self.idStringInput
def create_int_slider(self, sliderId, value, parentGroup=None, sliderText=""):
"""
Create a slider of integer
:param sliderId: int => ID of the created Slider
:param value: int => default value of the slider
:param parentGroup: int => groupID of the parent group
:param sliderText: the name of the slider
:return: The list of IntSlider with the newly created one
"""
if (self.idNbPoints[sliderId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_LONG)
bc[c4d.DESC_CUSTOMGUI] = c4d.CUSTOMGUI_LONGSLIDER
bc[c4d.DESC_NAME] = sliderText
bc[c4d.DESC_MIN] = 10
bc[c4d.DESC_MAX] = 100
bc[c4d.DESC_PARENTGROUP] = parentGroup
self.idNbPoints[sliderId] = self.obj.AddUserData(bc) # Add userdata container
self.obj[self.idNbPoints[sliderId]] = value
return self.idNbPoints
else:
self.obj[self.idNbPoints[sliderId]] = value
return self.idNbPoints[sliderId]
def create_cycle(self, cycleId, data, parentGroup=None, cycleText=""):
"""
Create a cycle
:param cycleId: int => ID of the created cycle
:param data: list of value ordered
:param parentGroup: int => groupID of the parent group
:param cycleText: the name of the slider
:return: The list of Cycle with the newly created one
"""
if (self.idCycle[cycleId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_LONG)
bc[c4d.DESC_NAME] = cycleText
bc[c4d.DESC_CUSTOMGUI] = c4d.CUSTOMGUI_CYCLE
bc[c4d.DESC_PARENTGROUP] = parentGroup
bc[c4d.DESC_MIN] = 0
bc[c4d.DESC_MAX] = len(data) - 1
cycle = c4d.BaseContainer()
for i in xrange(0, len(data)):
cycle.SetString(i, data[i])
bc.SetContainer(c4d.DESC_CYCLE, cycle)
self.idCycle[cycleId] = self.obj.AddUserData(bc)
return self.idCycle
else:
for id, bc in self.obj.GetUserDataContainer():
if id == self.idCycle[cycleId]:
cycle = c4d.BaseContainer()
for i in xrange(0, len(data)):
cycle.SetString(i, data[i])
bc[c4d.DESC_MAX] = len(data) - 1
bc.SetContainer(c4d.DESC_CYCLE, cycle)
self.obj.SetUserDataContainer(id, bc)
return self.idCycle
def create_gradient(self, colorId, gradientData, parentGroup=None, gradientText=""):
"""
Create a Gradient Ui
:param colorId: int => ID of hte created gradient ID
:param splineData: c4d.GradientData => Data for the gradient
:param parentGroup: int => groupID of the parent group
:param splineText: str => the name of the gradient
:return: The list of Spline with the newly created one
"""
if (self.idGradient[colorId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.CUSTOMDATATYPE_GRADIENT) # Create default container
bc[c4d.DESC_NAME] = gradientText # Rename the entry
bc[c4d.DESC_PARENTGROUP] = parentGroup
self.idGradient[colorId] = self.obj.AddUserData(bc) # Add userdata container
self.obj[self.idGradient[colorId]] = gradientData
return self.idGradient
else:
self.obj[self.idGradient[colorId]] = gradientData
return self.idGradient[colorId]
def create_spline(self, colorId, splineData, parentGroup=None, splineText=""):
"""
Create a Spline Ui
:param colorId: int => ID of hte created spline ID
:param splineData: c4d.SplineData => Data for the spline
:param parentGroup: int => groupID of the parent group
:param splineText: str => the name of the slider
:return: The list of Spline with the newly created one
"""
if (self.idSplines[colorId]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.CUSTOMDATATYPE_SPLINE) # Create default container
bc[c4d.DESC_NAME] = splineText # Rename the entry
bc[c4d.DESC_PARENTGROUP] = parentGroup
self.idSplines[colorId] = self.obj.AddUserData(bc) # Add userdata container
self.obj[self.idSplines[colorId]] = splineData
return self.idSplines
else:
self.obj[self.idSplines[colorId]] = splineData
return self.idSplines[colorId]
def create_bool(self, id, value, parentGroup, boolText=""):
"""
Create a checkbox
:param id: int => Id of the created Checkbox
:param value: bool => Default value
:param parentGroup: int => groupID of the parent group
:param boolText: str => the name of the slider
:return: The lsit of Checkboxs with the newly created one
"""
if (self.idInvert[id]) is None:
bc = c4d.GetCustomDatatypeDefault(c4d.DTYPE_BOOL) # Create default container
bc[c4d.DESC_NAME] = boolText # Rename the entry
bc[c4d.DESC_PARENTGROUP] = parentGroup
self.idInvert[id] = self.obj.AddUserData(bc) # Add userdata container
self.obj[self.idInvert[id]] = value
return self.idInvert
else:
self.obj[self.idInvert[id]] = value
return self.idInvert[id]
def get_current_data(self):
"""
Get all data from User Data to variable.
:return:
[listOfPoint_Red, listOfPoint_Blue, listOfPoint_Red],
[list of the InvertState], #actually there 6 since there is INVERT_X and INVERT_Y for each component of RGB
[list of the cycle value] #There is always only one value
"""
bufferCycle = list()
bufferCycle.append(self.obj[self.idCycle[Const.UI_CYCLE]])
bufferNbPts = list()
bufferNbPts.append(self.obj[self.idNbPoints[Const.UI_SLIDER_RED]])
bufferNbPts.append(self.obj[self.idNbPoints[Const.UI_SLIDER_GREEN]])
bufferNbPts.append(self.obj[self.idNbPoints[Const.UI_SLIDER_BLUE]])
bufferNbPts.append(self.obj[self.idNbPoints[Const.UI_SLIDER_RGB]])
bufferInvert = list()
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_RED_X]])
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_GREEN_X]])
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_BLUE_X]])
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_RED_Y]])
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_GREEN_Y]])
bufferInvert.append(self.obj[self.idInvert[Const.UI_BOOL_BLUE_Y]])
return bufferNbPts, bufferInvert, bufferCycle
def set_old_value(self):
"""
Set the value which are typed previously
"""
self.oldNbPoints = list()
self.oldInvert = list()
for i in xrange(0, len(self.idNbPoints)):
self.oldNbPoints.append(self.obj[self.idNbPoints[i]])
for i in xrange(0, len(self.idInvert)):
self.oldInvert.append(self.obj[self.idInvert[i]])
def get_mode_inverted(self, id):
"""
Return the state of invert data
:param id: the id to get (R G B)
:return: 0 if X and Y not inverted
1 if X inverted and Y not inverted
2 if X not inverted and Y inverted
3 if X and Y inverted
"""
buffer = 0
if self.obj[self.idInvert[id]]:
buffer += Const.X_INVERTED
if self.obj[self.idInvert[id + 3]]:
buffer += Const.Y_INVERTED
return buffer
def get_current_cycle_text(self, cycleId):
"""
Get all the datas of a cycle
:param cycleId: the cycle id to get the data
:return: str => the string representation of the actual value
"""
for id, bc in self.obj.GetUserDataContainer():
if id[1].id == self.idCycle[cycleId][1].id:
bcCycle = bc.GetContainer(c4d.DESC_CYCLE)
return bcCycle.GetString(self.obj[self.idCycle[cycleId]])
def create_bc_from_list(self, liste):
"""
Create a c4d.BaseContainer reprensation of a list.
Allow us to store the actual user data in the object for recreation
Inverse function of create_list_from_bc
:param liste:
:return: c4d.BaseContainer => the basecontainer representation of our list
"""
buffer = c4d.BaseContainer()
for i in xrange(0, len(liste)):
buffer[i] = liste[i]
return buffer
def create_bc_from_id(self):
"""
Get all data from our UD and store them to self.obj[Const.PLUGIN_ID] Container
Inverse function of populateIDFromBc
"""
bc = c4d.BaseContainer()
bc[0] = self.create_bc_from_list(self.idCycle)
bc[1] = self.create_bc_from_list(self.idGroups)
bc[2] = self.create_bc_from_list(self.idSplines)
bc[3] = self.create_bc_from_list(self.idNbPoints)
bc[4] = self.create_bc_from_list(self.idInvert)
bc[5] = self.create_bc_from_list(self.idSeparator)
bc[6] = self.create_bc_from_list(self.idStringInput)
bc[7] = self.create_bc_from_list(self.idButtons)
bc[8] = self.create_bc_from_list(self.idGradient)
currentBC = self.obj.GetDataInstance()
currentBC[Const.PLUGIN_ID] = bc
def create_list_from_bc(self, bc):
"""
Create a list from c4d.BaseContainer
Inverse function of createBcFromList
:param bc: c4d.BaseContainer => The basecontainer to read value from
:return: list => The list representation of our basecontainer
"""
buffer = list()
i = 0
while i < len(bc):
buffer.append(bc[i])
i += 1
return buffer
def populate_id_from_bc(self):
"""
Get all datas from self.obj[Const.PLUGIN_ID] Container and set userdata according thoses data
"""
bc = self.obj.GetDataInstance().GetContainer(Const.PLUGIN_ID)
self.idCycle = self.create_list_from_bc(bc[0])
self.idGroups = self.create_list_from_bc(bc[1])
self.idSplines = self.create_list_from_bc(bc[2])
self.idNbPoints = self.create_list_from_bc(bc[3])
self.idInvert = self.create_list_from_bc(bc[4])
self.idSeparator = self.create_list_from_bc(bc[5])
self.idStringInput = self.create_list_from_bc(bc[6])
self.idButtons = self.create_list_from_bc(bc[7])
self.idGradient = self.create_list_from_bc(bc[8])
|
#!/usr/bin/env python
"""
Explore encoding settings on a platform.
"""
from __future__ import print_function
import sys
import platform
import locale
from behave.textutil import select_best_encoding
def explore_platform_encoding():
python_version = platform.python_version()
print("python %s (platform: %s, %s, %s)" % (python_version, sys.platform,
platform.python_implementation(),
platform.platform()))
print("sys.getfilesystemencoding(): %s" % sys.getfilesystemencoding())
print("locale.getpreferredencoding(): %s" % locale.getpreferredencoding())
print("behave.textutil.select_best_encoding(): %s" % select_best_encoding())
return 0
if __name__ == "__main__":
sys.exit(explore_platform_encoding())
|
import numpy as np
from ._CFunctions import _CInternalField,_CInternalFieldDeg
def Field(p0,p1,p2,MaxDeg=None):
'''
Return the internal magnetic field vector(s). Check the model
config using JupiterMag.Internal.Config() to see whether Cartesian or
polar coordinates are used for input/output and to set the model.
Inputs
======
p0 : float
Array/scalar containing x or r right-handed System III
coordinate
p1 : float
Array/scalar containing y or theta right-handed System III
coordinate
p2 : float
Array/scalar containing z or phi right-handed System III
coordinate
MaxDeg : None|int
Maximum model degree to use. If None then the default value
(model dependant) will be used.
Returns
=======
B0 : float
Either Bx or Br in nT
B1 : float
Either By or Btheta in nT
B2 : float
Either Bz or Bphi in nT
'''
#make sure that the inputs are the correct type
if (hasattr(p0,'__iter__') == False):
_p0 = np.array([p0]).astype('float64')
else:
_p0 = np.array(p0).astype('float64')
if (hasattr(p1,'__iter__') == False):
_p1 = np.array([p1]).astype('float64')
else:
_p1 = np.array(p1).astype('float64')
if (hasattr(p2,'__iter__') == False):
_p2 = np.array([p2]).astype('float64')
else:
_p2 = np.array(p2).astype('float64')
_l = np.int32(np.size(_p0))
_B0 = np.zeros(_l,dtype='float64')
_B1 = np.zeros(_l,dtype='float64')
_B2 = np.zeros(_l,dtype='float64')
#call the model
if MaxDeg is None:
_CInternalField(_l,_p0,_p1,_p2,_B0,_B1,_B2)
else:
_MaxDeg = np.int32(MaxDeg)
_CInternalFieldDeg(_l,_p0,_p1,_p2,_MaxDeg,_B0,_B1,_B2)
return _B0,_B1,_B2
|
"""
Send SMS through ASPSMS
Adapted from repoze.sendmail: https://github.com/repoze/repoze.sendmail
Usage:
qp = SmsQueueProcessor(sms_directory)
qp.send_messages()
"""
import errno
import json
import logging
import os
import pycurl
import stat
import time
from io import BytesIO
log = logging.getLogger('onegov.election_day')
# The below diagram depicts the operations performed while sending a message.
# This sequence of operations will be performed for each file in the maildir
# on which ``send_message`` is called.
#
# Any error conditions not depected on the diagram will provoke the catch-all
# exception logging of the ``send_message`` method.
#
# In the diagram the "message file" is the file in the maildir's "cur"
# directory that contains the message and "tmp file" is a hard link to the
# message file created in the maildir's "tmp" directory.
#
# ( start trying to deliver a message )
# |
# |
# V
# +-----( get tmp file mtime )
# | |
# | | file exists
# | V
# | ( check age )-----------------------------+
# tmp file | | file is new |
# does not | | file is old |
# exist | | |
# | ( unlink tmp file )-----------------------+ |
# | | file does | |
# | | file unlinked not exist | |
# | V | |
# +---->( touch message file )------------------+ | |
# | file does | | |
# | not exist | | |
# V | | |
# ( link message file to tmp file )----------+ | | |
# | tmp file | | | |
# | already exists | | | |
# | | | | |
# V V V V V
# ( send message ) ( skip this message )
# |
# V
# ( unlink message file )---------+
# | |
# | file unlinked | file no longer exists
# | |
# | +-----------------+
# | |
# | V
# ( unlink tmp file )------------+
# | |
# | file unlinked | file no longer exists
# V |
# ( message delivered )<---------+
# The longest time sending a file is expected to take. Longer than this and
# the send attempt will be assumed to have failed. This means that sending
# very large files or using very slow mail servers could result in duplicate
# messages sent.
MAX_SEND_TIME = 60 * 60 * 3
class SmsQueueProcessor(object):
def __init__(self, path, username, password, originator=None):
self.path = path
self.username = username
self.password = password
self.originator = originator or "OneGov"
# Keep a pycurl object around, to use HTTP keep-alive - though pycurl
# is much worse in terms of it's API, the performance is *much* better
# than requests and it supports modern features like HTTP/2 or HTTP/3
self.url = 'https://json.aspsms.com/SendSimpleTextSMS'
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.TCP_KEEPALIVE, 1)
self.curl.setopt(pycurl.URL, self.url)
self.curl.setopt(pycurl.HTTPHEADER, ['Content-Type:application/json'])
self.curl.setopt(pycurl.POST, 1)
def split(self, filename):
""" Returns the path, the name and the suffix of the given path. """
if '/' in filename:
path, name = filename.rsplit('/', 1)
else:
path = ''
name = filename
if '.' in name:
name, suffix = name.split('.', 1)
else:
suffix = ''
return path, name, suffix
def message_files(self):
""" Returns a tuple of full paths that need processing.
The file names in the directory usually look like this:
* +41764033314.1571822840.745629
* +41764033314.1571822743.595377
The part before the first dot is the number, the rest is the suffix.
The messages are sorted by suffix, so by default the sorting
happens from oldest to newest message.
"""
files = []
for f in os.scandir(self.path):
if not f.is_file:
continue
# we expect to messages to in E.164 format, eg. '+41780000000'
if not f.name.startswith('+'):
continue
files.append(f)
files.sort(key=lambda i: self.split(i.name)[-1])
return tuple(os.path.join(self.path, f.name) for f in files)
def send(self, number, content):
code, body = self.send_request({
"UserName": self.username,
"Password": self.password,
"Originator": self.originator,
"Recipients": (number, ),
"MessageText": content,
})
if 400 <= code < 600:
raise RuntimeError(f"{code} calling {self.url}: {body}")
result = json.loads(body)
if result.get('StatusInfo') != 'OK' or result.get('StatusCode') != '1':
raise RuntimeError(f'Sending SMS failed, got: "{result}"')
def send_request(self, parameters):
""" Performes the API request using the given parameters. """
body = BytesIO()
self.curl.setopt(pycurl.WRITEDATA, body)
self.curl.setopt(pycurl.POSTFIELDS, json.dumps(parameters))
self.curl.perform()
code = self.curl.getinfo(pycurl.RESPONSE_CODE)
body.seek(0)
body = body.read().decode('utf-8')
return code, body
def parse(self, filename):
number = self.split(filename)[1].lstrip('+')
if not number.isdigit():
return None, None
with open(filename) as f:
return number, f.read()
def send_messages(self):
for filename in self.message_files():
self.send_message(filename)
def send_message(self, filename):
head, tail = os.path.split(filename)
tmp_filename = os.path.join(head, f'.sending-{tail}')
rejected_filename = os.path.join(head, f'.rejected-{tail}')
# perform a series of operations in an attempt to ensure
# that no two threads/processes send this message
# simultaneously as well as attempting to not generate
# spurious failure messages in the log; a diagram that
# represents these operations is included in a
# comment above this class
try:
# find the age of the tmp file (if it exists)
mtime = os.stat(tmp_filename)[stat.ST_MTIME]
except OSError as e:
if e.errno == errno.ENOENT:
# file does not exist
# the tmp file could not be stated because it
# doesn't exist, that's fine, keep going
age = None
else:
# the tmp file could not be stated for some reason
# other than not existing; we'll report the error
raise
else:
age = time.time() - mtime
# if the tmp file exists, check it's age
if age is not None:
try:
if age > MAX_SEND_TIME:
# the tmp file is "too old"; this suggests
# that during an attemt to send it, the
# process died; remove the tmp file so we
# can try again
os.remove(tmp_filename)
else:
# the tmp file is "new", so someone else may
# be sending this message, try again later
return
# if we get here, the file existed, but was too
# old, so it was unlinked
except OSError as e:
if e.errno == errno.ENOENT:
# file does not exist
# it looks like someone else removed the tmp
# file, that's fine, we'll try to deliver the
# message again later
return
# now we know that the tmp file doesn't exist, we need to
# "touch" the message before we create the tmp file so the
# mtime will reflect the fact that the file is being
# processed (there is a race here, but it's OK for two or
# more processes to touch the file "simultaneously")
try:
os.utime(filename, None)
except OSError as e:
if e.errno == errno.ENOENT:
# file does not exist
# someone removed the message before we could
# touch it, no need to complain, we'll just keep
# going
return
else:
# Some other error, propogate it
raise
# creating this hard link will fail if another process is
# also sending this message
try:
os.link(filename, tmp_filename)
except OSError as e:
if e.errno == errno.EEXIST:
# file exists, *nix
# it looks like someone else is sending this
# message too; we'll try again later
return
else:
# Some other error, propogate it
raise
# read message file and send contents
number, message = self.parse(filename)
if number and message:
self.send(number, message)
else:
log.error(
"Discarding SMS {} due to invalid content/number".format(
filename
)
)
os.link(filename, rejected_filename)
try:
os.remove(filename)
except OSError as e:
if e.errno == errno.ENOENT:
# file does not exist
# someone else unlinked the file; oh well
pass
else:
# something bad happend, log it
raise
try:
os.remove(tmp_filename)
except OSError as e:
if e.errno == errno.ENOENT:
# file does not exist
# someone else unlinked the file; oh well
pass
else:
# something bad happened, log it
raise
log.info("SMS to {} sent.".format(number))
|
from setuptools import setup
setup(name='python-ecb-daily',
version='0.3',
description='Python ECB Daily Rates Wrapper',
url='https://github.com/fatihsucu/python-ecb-daily',
author='Fatih Sucu',
author_email='fatihsucu0@gmail.com',
license='MIT',
packages=['ecb'],
install_requires=[
"feedparser"
],
zip_safe=False
)
|
from .tf_sensaturban_dataset import SensatUrbanDataset
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="neo_force_scheme",
version="0.0.1",
author="Leonardo Christino",
author_email="christinoleo@dal.ca",
description="NeoForceSceme, an extention of the original ForceScheme with performance improvements",
license="MIT",
keywords="gpu numba forcescheme projection dimenstionality reduction",
url="https://github.com/visml/neo_force_scheme",
packages=['neo_force_scheme'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: GPU :: NVIDIA CUDA",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
],
)
|
from typing import List
from . import movie_list, title_list
from .common import SamuraiListBaseType
class SamuraiContentsList(SamuraiListBaseType):
titles: List[title_list.SamuraiListTitle]
movies: List[movie_list.SamuraiListMovie]
def _read_list(self, xml):
assert xml.tag == 'contents'
self.titles = []
self.movies = []
for content in xml.content:
if hasattr(content, 'title'):
self.titles.append(title_list.SamuraiListTitle._parse(content.title))
elif hasattr(content, 'movie'):
self.movies.append(movie_list.SamuraiListMovie._parse(content.movie))
else:
raise ValueError(content.getchildren()[0].tag)
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.api import AbstractOverlay
from numpy import uint8, asarray
from skimage.color import gray2rgb
from skimage.transform import resize
from traits.api import Any
# ============= standard library imports ========================
# ============= local library imports ==========================
class VideoUnderlay(AbstractOverlay):
"""
video only needs to be an object the implements
get_image_data([,size=(w,h)])
returns ndarray
"""
video = Any
_cached_image = None
def overlay(self, component, gc, *args, **kw):
""" """
if self.video:
with gc:
img = self.video.get_image_data()
if img is not None:
x, y, w, h = (
component.x,
component.y,
component.width,
component.height,
)
gc.clip_to_rect(x, y, w, h)
gc.translate_ctm(x, y)
try:
gc.draw_image(
asarray(
resize(img, (int(h), int(w)), preserve_range=True),
dtype=uint8,
)
)
except IndexError:
pass
# ============= EOF ====================================
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import Operations
from ._resource_pools_operations import ResourcePoolsOperations
from ._vcenters_operations import VCentersOperations
from ._virtual_machines_operations import VirtualMachinesOperations
from ._virtual_machine_templates_operations import VirtualMachineTemplatesOperations
from ._virtual_networks_operations import VirtualNetworksOperations
from ._inventory_items_operations import InventoryItemsOperations
from ._hybrid_identity_metadata_operations import HybridIdentityMetadataOperations
from ._machine_extensions_operations import MachineExtensionsOperations
from ._guest_agent_operations import GuestAgentOperations
__all__ = [
'Operations',
'ResourcePoolsOperations',
'VCentersOperations',
'VirtualMachinesOperations',
'VirtualMachineTemplatesOperations',
'VirtualNetworksOperations',
'InventoryItemsOperations',
'HybridIdentityMetadataOperations',
'MachineExtensionsOperations',
'GuestAgentOperations',
]
|
import sqlite3 as lite
con = lite.connect('JobDetails.db')
cur = con.cursor()
cs = {}
amounts = {}
# for table in ['Reviews']:
# cur.execute("SELECT Country FROM " + table)
# countries = [each[0] for each in cur.fetchall()]
countries = []
for table in ['Jobs', 'ReviewJobs']:
cur.execute("SELECT CountryOfPoster FROM " + table)
countries += [each[0] for each in cur.fetchall()]
# for i in range(len(countries)):
# country = countries[i]
# num = cs.get(country)
# if num is None:
# cs.update({country: 1})
# else:
# cs.update({country: num + 1})
# cur.execute("SELECT Country, ConvertedCurrency FROM Reviews WHERE ConvertedCurrency != '' AND ConvertedCurrency != 'None' AND ConvertedCurrency != 'Unavailable'")
# results = [list(each) for each in cur.fetchall()]
#
# for result in results:
# country = result[0]
# paid = float(result[1])
# num = amounts.get(country)
# if num is None:
# amounts.update({country: paid})
# else:
# amounts.update({country: num + paid})
cur.execute("SELECT CountryOfPoster, ConvertedFinalCost FROM Jobs WHERE ConvertedFinalCost != '' AND ConvertedFinalCost != 'None' AND ConvertedFinalCost != 'Unavailable'")
results = [list(each) for each in cur.fetchall()]
print(len(results))
for result in results:
country = result[0]
paid = float(result[1])
num = amounts.get(country)
if num is None:
amounts.update({country: paid})
else:
amounts.update({country: num + paid})
num = cs.get(country)
if num is None:
cs.update({country: 1})
else:
cs.update({country: num + 1})
cur.execute("SELECT CountryOfPoster, ConvertedFinalCost FROM ReviewJobs WHERE ConvertedFinalCost != '' AND ConvertedFinalCost != 'None' AND ConvertedFinalCost != 'Unavailable'")
results = [list(each) for each in cur.fetchall()]
print(len(results))
for result in results:
country = result[0]
paid = float(result[1])
num = amounts.get(country)
if num is None:
amounts.update({country: paid})
else:
amounts.update({country: num + paid})
num = cs.get(country)
if num is None:
cs.update({country: 1})
else:
cs.update({country: num + 1})
countryNums = sorted(list(cs.values()))[-5:]
uniqueCs = list(cs.keys())
topCountries = []
theCountries = []
for i in range(len(uniqueCs)):
country = uniqueCs[i]
print("Country " + str(i + 1) + "/" + str(len(cs.keys())))
if cs.get(country) in countryNums:
topCountries.append([country + ": " + str(cs.get(country))])
theCountries.append(country)
print("\n\nCountries: " + str(topCountries) + "\n\n")
for country in theCountries:
print(country + ": " + '${0:,.2f}'.format(amounts.get(country)) + ' - Average: $' + '{0:,.2f}'.format(amounts.get(country) / cs.get(country)))
|
#!/usr/bin/env python3
# suspenders.py - keep your pants on
import os.path
import functools
from .util import elements
PANTS_TARGETS = [
'android_binary',
'android_dependency',
'android_library',
'android_resources',
'annotation_processor',
'benchmark',
'confluence',
'contrib_plugin',
'cpp_binary',
'cpp_library',
'create_datasets',
'create_thrift_libraries',
'credentials',
'go_binary',
'go_library',
'go_remote_libraries',
'go_remote_library',
'go_thrift_library',
'hadoop_binary',
'heron_binary',
'idl_jar_thrift_library',
'jar_library',
'java_agent',
'java_antlr_library',
'java_library',
'java_protobuf_library',
'java_ragel_library',
'java_tests',
'java_thrift_library',
'java_thriftstore_dml_library',
'java_wire_library',
'jaxb_library',
'junit_tests',
'jvm_app',
'jvm_binary',
'jvm_prep_command',
'managed_jar_dependencies',
'netrc_credentials',
'node_module',
'node_packer_module',
'node_preinstalled_module',
'node_remote_module',
'node_test',
'page',
'pants_plugin',
'pants_plugin_requirement_library',
'prep_command',
'python_antlr_library',
'python_binary',
'python_library',
'python_requirement_library',
'python_tests',
'python_thrift_library',
'resources',
'ruby_thrift_library'
'ruby_thrift_library',
'scala_js_binary',
'scala_js_binary',
'scala_js_library',
'scala_library',
'scalac_plugin',
'spindle_thrift_library',
'storm_binary',
'target',
'testbox_tests',
'thrift_jar',
'unpacked_jars',
]
PANTS_GLOBALS = [
'artifact',
'artifactory',
'buildfile_path',
'bundle',
'ConfluencePublish',
'contrib_setup_py',
'developer',
'DirectoryReMapper',
'Duplicate',
'exclude',
'from_target',
'get_buildroot',
'github',
'globs',
'intransitive',
'jar',
'jar_rules',
'license',
'make_lib',
'managed_jar_libraries',
'netrc',
'ossrh',
'pants_library',
'pants_requirement',
'pants_setup_py',
'pants_version',
'provided',
'public',
'python_artifact',
'python_requirement',
'python_requirements',
'repository',
'rglobs',
'scala_artifact',
'scala_jar',
'scm',
'scoped',
'setup_py',
'shading_exclude',
'shading_exclude_package',
'shading_keep',
'shading_keep_package',
'shading_relocate',
'shading_relocate_package',
'shading_zap',
'shading_zap_package',
'Skip',
'testing',
'Wiki',
'wiki_artifact',
'zglobs',
]
ROOT_TARGET_KIND = '<root>'
class Any:
""" Stub object that visually tracks simple operations """
def __init__(self, name):
self.n = name
def __call__(self, *args, **kwargs):
a = [str(a) for a in args] + ['{!r}={!r}'.format(k, v) for k, v in kwargs.items()]
return Any('{}({})'.format(self.n, ', '.join(a)))
def __getattr__(self, attr):
return Any('{}.{}'.format(self.n, attr))
def __add__(self, other):
return Any('{} + {!r}'.format(self.n, other))
def __sub__(self, other):
return Any('{} - {!r}'.format(self.n, other))
def __radd__(self, other):
return Any('{!r} + {}'.format(other, self.n))
def __rsub__(self, other):
return Any('{!r} - {}'.format(other, self.n))
def __str__(self):
return 'Any({})'.format(self.n)
def __iter__(self):
yield Any('{}[]'.format(self.n))
def __repr__(self):
return self.n
class BuildTarget:
""" Pants build target """
def __init__(self, kind, tid, deps=[], sources=[]):
self.kind = kind
self.tid = tid
self.dependencies = deps
self.sources = sources
def is_toplvl(self):
""" A "top level" build target is one which in not in a */src/* folder. """
return not self.tid.contains("/src/")
class BuildFile:
""" Result of parsing a build file """
def __init__(self, buildpath):
self.buildpath = buildpath
self.targets = {}
class PantsEnv:
""" Fake, fast BUILD file parsing environment. Not threadsafe. A small effort
was made to avoid unnecessary function calls during parse.
TODO: Handle modules. Some BUILD files import extra things. Ugh.
"""
GLOB_FMT = "|{kind}|{pattern}"
@staticmethod
def split_target(target):
""" Split a target in to (path, target) """
path, _, target = target.partition(':')
return (path, target)
@staticmethod
def root(path):
""" Find the pants root of a path, if any """
abspath = os.path.abspath(path)
path_pieces = abspath.split('/')
while path_pieces:
potential_root = os.path.join('/', *path_pieces)
if os.path.isfile(os.path.join(potential_root, 'pants.ini')):
return potential_root
path_pieces.pop()
return None
@classmethod
def from_path(cls, path):
root = PantsEnv.root(path)
if not root:
raise ValueError("No pants root found in {}".format(path))
return cls(root)
def __init__(self, root):
self.root = root
self.env = self.make_env(PANTS_TARGETS, PANTS_GLOBALS)
self.cache = {}
self._bf = None # Parsing state
def _glob(self, kind, args, kwargs):
globs = [self.GLOB_FMT.format(kind=kind, pattern=p) for p in args]
excludes = [e + '-' for e in elements(kwargs.get('exclude', []))]
return globs + excludes
def _new_target(self, kind, *args, **kwargs):
""" Generate a new target, assign a name and resolve relative dependency paths """
name_keys = ('name', 'basename')
# Extract name
name = None
for n in name_keys:
if n in kwargs:
name = kwargs[n]
break
if not name:
name = 'NO-NAME-{}'.format(len(self._bf.targets))
# Generate ID
tid = '{}:{}'.format(self._bf.buildpath, name)
# Resolve relative dependencies & sources
deps = [self._bf.buildpath + d if d.startswith(':') else d for d in kwargs.get('dependencies', [])]
for d in deps:
if not d:
print('empty dep in ' + self._bf.buildpath)
srcs = [os.path.join(self._bf.buildpath, s) for s in kwargs.get('sources', [])]
self._bf.targets[tid] = BuildTarget(kind, tid, deps, srcs)
def _parse(self, buildpath):
try:
self._bf = BuildFile(buildpath)
file = os.path.join(self.root, buildpath, 'BUILD')
with open(file, 'r') as f:
compiled = compile(f.read(), file, 'exec')
exec(compiled, self.env.copy())
# Make a root target that depends on all found targets in this file
self._bf.targets[buildpath] = BuildTarget(
kind=ROOT_TARGET_KIND,
tid=buildpath,
deps=list(self._bf.targets.keys())
)
return self._bf
finally:
self._bf = None
def make_env(self, targets, stubs):
env = {}
env.update({t: functools.partial(self._new_target, t) for t in targets})
env.update({s: Any(s) for s in stubs})
env.update({
'globs': lambda *a, **kw: self._glob('g', a, kw),
'rglobs': lambda *a, **kw: self._glob('r', a, kw),
'zglobs': lambda *a, **kw: self._glob('z', a, kw),
'pants_version': lambda: 20,
'buildfile_path': lambda: self._bf.file,
'get_buildroot': lambda: self.root,
})
return env
def parse(self, buildpath):
if buildpath not in self.cache:
self.cache[buildpath] = self._parse(buildpath)
return self.cache.get(buildpath, None)
def graph(self, buildpaths, depth=2, _graph=None):
""" Generate a mapping of targetId -> target, containing dependencies of at least
`depth`, for the given list of buildpaths
"""
graph = _graph if _graph else {}
to_parse = set()
for b in (self.parse(bp) for bp in buildpaths):
new_targets = b.targets
graph.update(new_targets)
deps = (PantsEnv.split_target(d)[0] for d in elements(t.dependencies for t in new_targets.values()))
[to_parse.add(d) for d in deps if d not in graph]
return graph if depth <= 0 else self.graph(to_parse, depth - 1, graph)
def resolve_glob(self, globstr):
raise NotImplementedError
def flush_cache(self):
self.cache.clear()
if __name__ == '__main__':
import sys
import os
def test(args):
import subprocess
import time
pants = PantsEnv.from_path(os.getcwd())
print('Generating list of all buildfiles... (~30 seconds)')
clk = time.time()
cmd = ["/usr/bin/find", ".", "-not", "-path", "*/\.*", "-name", "BUILD"]
results = subprocess.check_output(cmd, universal_newlines=True, cwd=pants.root)
results = [r.replace('/BUILD', '') for r in results.strip().split('\n')]
print('Found {} buildfiles in {:.1f} seconds. Now parsing'.format(len(results), time.time() - clk))
clk = time.time()
failed = []
ok = []
for bp in results:
try:
ok.append(pants.parse(bp))
except Exception as e:
failed.append((bp, e))
print('Done parsing {} in {:.1f} seconds'.format(len(results), time.time() - clk))
print('Failures')
for f in failed:
print('{}: {}'.format(f[0], f[1]))
print('Successfully parsed {}'.format(len(ok)))
def targets(args):
pants = PantsEnv.from_path(os.getcwd())
for buildpath in args:
for target in sorted(pants.parse(buildpath).targets.keys()):
print(target)
def dependencies(args):
pants = PantsEnv.from_path(os.getcwd())
tid = args[0]
bp, target = PantsEnv.split_target(tid)
depth = int(args[1]) if len(args) > 1 else 2
graph = pants.graph([bp], depth=depth)
deps = elements(target.dependencies for target in graph.values())
deps = set(PantsEnv.split_target(d)[0] for d in deps)
print('\n'.join(deps))
def print_help(args):
print("""
suspenders.py - keep your pants on
commands are test, targets, dependencies
""")
sys.exit(1)
commands = {
'test': test,
'targets': targets,
'deps': dependencies
}
cmd = sys.argv[1]
commands.get(cmd, print_help)(sys.argv[2:])
|
def fibonacci(n):
result = []
x, y = 0, 1
while x < n:
result.append(x)
x, y = y, y + x
return result
|
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.preprocessing.image import DirectoryIterator,DataFrameIterator
import numpy as np
class TimeDistributedImageDataGenerator(ImageDataGenerator):
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
dtype=None,
time_steps = 5):
self.time_steps = time_steps
super().__init__(featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
brightness_range=brightness_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format,
validation_split=validation_split,
dtype = dtype)
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: string, path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
return TimeDistributedDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation
)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
return TimeDistributedDataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames,
**kwargs
)
class TimeDistributedDataFrameIterator(DataFrameIterator):
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
TimeSteps = self.image_data_generator.time_steps
batch_x = np.zeros((len(index_array),) + (TimeSteps,) + self.image_shape, dtype=self.dtype)#KJ
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
for k in reversed(range(0,TimeSteps)):
try:
img = load_img(filepaths[j-k],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
except:
pass
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i][k] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), TimeSteps, len(self.class_indices)),
dtype=self.dtype)
for i, n_observation in enumerate(index_array):
for q in reversed(range(0,TimeSteps)):
batch_y[i,q,self.classes[n_observation-q]] = 1.
elif self.class_mode == 'multi_output':
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == 'raw':
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
class TimeDistributedDirectoryIterator(DirectoryIterator):
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
TimeSteps = self.image_data_generator.time_steps
batch_x = np.zeros((len(index_array),) + (TimeSteps,) + self.image_shape, dtype=self.dtype)#KJ
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
for k in reversed(range(0,TimeSteps)):
try:
img = load_img(filepaths[j-k],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
except:
pass
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i][k] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), TimeSteps, len(self.class_indices)),
dtype=self.dtype)
for i, n_observation in enumerate(index_array):
for q in reversed(range(0,TimeSteps)):
batch_y[i,q,self.classes[n_observation-q]] = 1.
elif self.class_mode == 'multi_output':
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == 'raw':
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
|
import itertools
from . import kernel_defs
_kern_base = [kernel_defs.LinKernel, kernel_defs.SEKernel, kernel_defs.PerKernel]
_kern_op = [kernel_defs.SumKernel, kernel_defs.ProdKernel]
_ext_pairs = list(itertools.product(_kern_base, _kern_op))
def replace(old_kernel, new_kernel):
parent = old_kernel.parent
parent.rem_child(old_kernel)
parent.add_child(new_kernel)
def extend(old_kernel, new_kernel, op_rule):
parent = old_kernel.parent
parent.rem_child(old_kernel)
parent.add_child(op_rule([old_kernel, new_kernel]))
def remove(old_kernel):
parent = old_kernel.parent
parent.rem_child(old_kernel)
def mutation_generator(root):
yield root.clone()
for i, root_k in enumerate(root.kernels):
if root_k.is_operator:
continue
# REPLACE
for base_rule in _kern_base:
root_copy = root.clone()
replacement = base_rule()
if root_copy.kernels[i].name != replacement.name:
replace(root_copy.kernels[i], replacement)
else:
continue
yield root_copy
# EXPAND
for (base_rule, op_rule) in _ext_pairs:
root_copy = root.clone()
extend(root_copy.kernels[i], base_rule(), op_rule)
yield root_copy
# REMOVE
root_copy = root.clone()
if root_copy.kernels[i].is_toplevel:
continue
remove(root_copy.kernels[i])
yield root_copy
|
import graphene
from levels.types import Power
from levels.models import PowerModel
from badges.models import BadgeModel
from levels.types import Level, Host
from levels.models import LevelModel, HostModel
from common.fields import CustomMongoengineConnectionField
from pprint import pprint
from inspect import getmembers
class PowerQuery(graphene.ObjectType):
# powers = graphene.Field(Power)
powers = CustomMongoengineConnectionField(Power, minpower = graphene.Int())
# powers = graphene.List('levels.types.Power',
# blockchain=graphene.String(required=True),
# minpower=graphene.Int(),
# first=graphene.Int(),
# last=graphene.Int())
badges = graphene.List('badges.types.Badge', first=graphene.Int(),
last=graphene.Int(),
host=graphene.String()
)
def resolve_badges(self, info, host):
if (host):
qs = BadgeModel.objects(blockchain = self.blockchain, username = self.username, host = host)
else:
qs = BadgeModel.objects(blockchain = self.blockchain, username = self.username)
#TODO find badges and construct
return qs;
def resolve_powers(self, info, args):
minpower = args.get('minpower', 0)
# row = {f'power': {'$gte': minpower}}
# print(row)
qs = PowerModel.objects(__raw__={f'power': {'$gte': minpower}})
# qs = PowerModel.objects()
# qs = qs.filter(
# __raw__={f'power': {'$gte': power}}
# )
return qs;
class HostQuery(graphene.ObjectType):
hosts = CustomMongoengineConnectionField(Host)
def resolve_hosts(self, info, args):
qs = HostModel.objects()
return qs;
class LevelQuery(graphene.ObjectType):
level = graphene.Field(Level, username=graphene.String(), blockchain=graphene.String(required = True))
levels = CustomMongoengineConnectionField(Level)
# powers = CustomMongoengineConnectionField(Power)
# def resolve_powers(self, info, args):
# qs = PowerModel.objects()
# return qs;
# lev = graphene.Int(0)
# def resolve_accounts(self, info, args):
# qs = AccountModel.objects()
# meta = args.get('meta', {})
# not_null = meta.get('not_null')
# if not_null:
# qs = qs.filter(
# __raw__={f'json_metadata.{not_null}': {'$exists': True}}
# )
# return qs
# def resolve_lev(self, info, args):
# if self.lev:
# return self.lev + 1
# else:
# return 1
def resolve_level(self, info, username, blockchain):
return LevelModel.objects(username=username, blockchain = blockchain).first()
def resolve_levels(self, info, args):
qs = LevelModel.objects()
# sort_type = args.get('sorttype', 'is1min')
# qs = qs.filter(is1min = True)
# qs = qs.filter(
# __raw__={f'cost': {'$exists': True}}
# )
# print(qs)
# for key in qs:
# pprint(getmembers(key))
# # qs = qs.filter(
# # __raw__={f'json_metadata.{not_null}': {'$exists': True}}
# # )
return qs;
# def resolve_account_authority(self, info, account):
# return AccountAuthorityModel.objects(account=account).first()
|
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root):
def recursive_connect(node):
left, right = node.left, node.right
if left and right:
left.next = right
elif left is None and right is None:
# leaf, return.
return
# link 'to_link' to its next node in tree or
# None (i.e leave as is) if node.next is None
if uncle := node.next:
# ugly or cute?
(right or left).next = uncle.left or uncle.right
# visit children.
if left:
recursive_connect(left)
if right:
recursive_connect(right)
# recurse if root.
if root:
recursive_connect(root)
return root
|
import discord
import asyncio
import os
import json
import requests
from discord.ext import commands
from discord.ext.commands import has_permissions
import random
green = discord.Color.green()
class helpp(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command()
async def help(self,ctx):
embed = discord.Embed(title="Welcome to promerator", description="Here are the commands to use the bot. Make sure before each command to type ';;' Example: ;;help ", color=green)
embed.add_field(name="clear", value="Deletes the amount of messages you specify limit is 500 messages. To use this type: clear 20(You can replace 20 with whatver number less then 500.) (Only works if you have the Manage Messages role." , inline=False)
embed.add_field(name="Source", value="Source code for this bot is shown here." , inline=False)
embed.add_field(name="wiki_summary", value="Shows the first couple sentences of a Wikipedia article about your term, if nothing returns, it is likely that the term was not found as an article and try a similar word such as 'gaming' => 'video game' aliase wiki can be used.", inline=True)
embed.add_field(name="help_contact", value="How you can contact me for any support." , inline=True)
embed.add_field(name="search", value="Uses the WolframAplpha API to get the asnwer to your question that coem after. Example: ;;search popualation of Russia, or ;;search 5x+10=30" , inline=True)
embed.add_field(name="ping_check", value="Check latency of the bot" , inline=True)
embed.add_field(name="setprefix", value="Change the prefix that calls the bot." , inline=True)
embed.add_field(name="eight_ball, coin flip", value="Flip a coin or use an 8 ball to determine your decisions! (That was a joke, the 8ball decisions are purely random.)" , inline=True)
embed.add_field(name="quote", value="Returns a random quote." , inline=True)
embed.add_field(name="run,code,evaL", value="executes code in Jdoodle API and sends back to user, . EX: ;;run python3 . print('hello World') supported languages include Python3, C and C++(GCC 11.1.0),Ruby,Go,Scala,csharp(mono 4.2.2), Swift,Rust 1.10.0, And all version 4 languages on https://docs.jdoodle.com/compiler-api/compiler-api. Please type ;;help_code for details" , inline=False)
embed.add_field(name="latency",value=f'{round(self.client.latency * 1000)}ms')
await ctx.send(embed=embed)
@commands.command()
async def ping_check(self,ctx):
embed=discord.Embed(title="My ping is ",description=f'{round(self.client.latency * 1000)}ms',color=green)
await ctx.send(embed=embed)
@commands.command()
async def source(self,ctx):
embed=discord.Embed(title="Source", url="https://github.com/zt07/ZT-s-Music", color=green)
await ctx.send(embed=embed)
@commands.command(breif="test command", description="test commanddesc")
async def test(self,ctx):
embed= discord.Embed(title=f"Check!", color = green)
await ctx.send(embed=embed)
@commands.command()
async def help_contact(self,ctx):
embed= discord.Embed(title=f"Help Contact:",descritpion="For any help you can dm me at zut0_7 on Instagram, or email me at zaheeb072@gmial.com", color = green)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(helpp(client))
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="PyRestart",
version="1.0.0",
author="MXPSQL",
author_email="2000onetechguy@gmail.com",
description="Restart module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MXP2095onetechguy/PyRestart",
project_urls={
"Bug Tracker": "https://github.com/MXP2095onetechguy/PyRestartissues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.