hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8db2c52c5c18cdfa22b37a6b538f27da2efa22c
| 127,159
|
py
|
Python
|
mrcnn/model.py
|
ashvegeta/Mask-R-CNN
|
46921555869675e2518cff44d3861ad98f34cb37
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
ashvegeta/Mask-R-CNN
|
46921555869675e2518cff44d3861ad98f34cb37
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
ashvegeta/Mask-R-CNN
|
46921555869675e2518cff44d3861ad98f34cb37
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
from tensorflow.python.eager import context
import tensorflow.keras.models as KM
from mrcnn import utils
# Requires TensorFlow 2.0+
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("2.0")
tf.compat.v1.disable_eager_execution()
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def get_config(self):
config = super(ProposalLayer, self).get_config()
config["config"] = self.config.to_dict()
config["proposal_count"] = self.proposal_count
config["nms_threshold"] = self.nms_threshold
return config
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(input=anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(input=proposals)[0], 0)
proposals = tf.pad(tensor=proposals, paddings=[(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(None)
proposals.set_shape(out_shape)
return proposals
def compute_output_shape(self, input_shape):
return None, self.proposal_count, 4
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def get_config(self):
config = super(PyramidROIAlign, self).get_config()
config['pool_shape'] = self.pool_shape
return config
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.compat.v1.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(input=box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
input=box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(input=boxes)[:2], tf.shape(input=pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(input=boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(input=boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(input=boxes1)[0], tf.shape(input=boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(input=proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(tensor=gt_class_ids, mask=non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.compat.v1.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.compat.v1.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.compat.v1.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(input_tensor=crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(input_tensor=overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.compat.v1.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.compat.v1.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(input=positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
pred=tf.greater(tf.shape(input=positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(input=positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(a=gt_masks, perm=[2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(input=roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(input=negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(input=rois)[0], 0)
rois = tf.pad(tensor=rois, paddings=[(0, P), (0, 0)])
roi_gt_boxes = tf.pad(tensor=roi_gt_boxes, paddings=[(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(tensor=roi_gt_class_ids, paddings=[(0, N + P)])
deltas = tf.pad(tensor=deltas, paddings=[(0, N + P), (0, 0)])
masks = tf.pad(tensor=masks, paddings=[[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionTargetLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(input=probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.compat.v1.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.compat.v1.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.compat.v1.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=class_keep)[0]
class_keep = tf.pad(tensor=class_keep, paddings=[(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.compat.v1.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(input=class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.dtypes.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=detections)[0]
detections = tf.pad(tensor=detections, paddings=[(0, gap), (0, 0)], mode="CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
if s[1] is None:
mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
else:
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.compat.v1.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.compat.v1.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(input=pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(input_tensor=loss) / tf.reduce_sum(input_tensor=pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(input=target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(input=target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(input=pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(a=pred_masks, perm=[0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(input=y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augmentation=None):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if config.USE_MINI_MASK:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
"""An iterable that returns images and corresponding target class ids,
bounding box deltas, and masks. It inherits from keras.utils.Sequence to avoid data redundancy
when multiprocessing=True.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python iterable. Upon calling __getitem__() on it, the
iterable returns two lists, inputs and outputs. The contents
of the lists differ depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, detection_targets=False):
self.image_ids = np.copy(dataset.image_ids)
self.dataset = dataset
self.config = config
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = self.config.BATCH_SIZE
self.detection_targets = detection_targets
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0
image_index = -1
while b < self.batch_size:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
# Get GT bounding boxes and masks for image.
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,
augmentation=self.augmentation)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN(object):
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
# 원본 tf1 버전 코드
# anchors = tf.keras.layers.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
"""케라스에서 상수 레이어에 해당하는 레이어가 없어서 람다 레이어를 사용하는 편법이 있었는데
이제 통하지 않는다!
그건 그렇고 인풋 함수가 있는데 왜 저렇게 짠거지??? 아무튼 커스텀 레이어 작성해서 상수 텐서를 리턴해주기로 함.
"""
class ConstLayer(tf.keras.layers.Layer):
def __init__(self, x, name=None):
super(ConstLayer, self).__init__(name=name)
self.x = tf.Variable(x)
def call(self, input):
return self.x
anchors = ConstLayer(anchors, name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(input=w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name=name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
# Use string for regex since we might want to use pathlib.Path as model_path
m = re.match(regex, str(model_path))
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=workers > 1,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(input_tensor=tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(tensor=boxes, mask=non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.290839
| 115
| 0.613106
|
707383a933ad226a97f1a88b93f375b0163066db
| 3,203
|
py
|
Python
|
dedupsqlfs/db/mysql/table/hash_sizes.py
|
tabulon-ext/dedupsqlfs
|
9dfbed17450e7f2a499a7381e0368d08ae3c700d
|
[
"MIT"
] | 22
|
2015-04-09T09:00:00.000Z
|
2022-03-23T00:16:04.000Z
|
dedupsqlfs/db/mysql/table/hash_sizes.py
|
tabulon-ext/dedupsqlfs
|
9dfbed17450e7f2a499a7381e0368d08ae3c700d
|
[
"MIT"
] | 119
|
2015-02-11T21:39:27.000Z
|
2021-07-27T23:04:49.000Z
|
dedupsqlfs/db/mysql/table/hash_sizes.py
|
tabulon-ext/dedupsqlfs
|
9dfbed17450e7f2a499a7381e0368d08ae3c700d
|
[
"MIT"
] | 7
|
2016-03-16T11:53:45.000Z
|
2022-02-24T13:47:31.000Z
|
# -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
class TableHashSizes( Table ):
_table_name = "hash_sizes"
def create( self ):
cur = self.getCursor()
# Create table
cur.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self.getName()+
"`hash_id` BIGINT UNSIGNED PRIMARY KEY, "+
"`writed_size` INT UNSIGNED NOT NULL, "+
"`compressed_size` INT UNSIGNED NOT NULL "+
")"+
self._getCreationAppendString()
)
return
def insert( self, hash_id, writed_size, compressed_size):
"""
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"INSERT INTO `%s` " % self.getName()+
" (`hash_id`, `writed_size`, `compressed_size`) VALUES (%(id)s, %(ws)s, %(cs)s)",
{
"id": hash_id,
"ws": writed_size,
"cs": compressed_size
}
)
item = cur.lastrowid
self.stopTimer('insert')
return item
def update( self, hash_id, writed_size, compressed_size):
"""
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"UPDATE `%s` " % self.getName() +
" SET `compressed_size`=%(cs)s, `writed_size`=%(ws)s WHERE `hash_id`=%(id)s",
{
"cs": compressed_size,
"ws": writed_size,
"id": hash_id
}
)
count = cur.rowcount
self.stopTimer('update')
return count
def get( self, hash_id):
"""
:param hash_id: int
:return: Row
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT * FROM `%s` " % self.getName()+
" WHERE `hash_id`=%(id)s",
{
"id": hash_id
}
)
item = cur.fetchone()
self.stopTimer('get')
return item
def remove_by_ids(self, id_str):
self.startTimer()
count = 0
if id_str:
cur = self.getCursor()
cur.execute("DELETE FROM `%s` " % self.getName()+
" WHERE `hash_id` IN (%s)" % (id_str,))
count = cur.rowcount
self.stopTimer('remove_by_ids')
return count
def get_sizes_by_hash_ids(self, id_str):
self.startTimer()
items = {}
if id_str:
cur = self.getCursor()
cur.execute("SELECT * FROM `%s` " % self.getName()+
" WHERE `hash_id` IN (%s)" % (id_str,))
for _i in cur:
items[ _i["hash_id"] ] = (_i["writed_size"], _i["compressed_size"],)
self.stopTimer('get_sizes_by_hash_ids')
return items
def get_median_compressed_size(self):
self.startTimer()
self.stopTimer('get_median_compressed_size')
return 0
def get_mean_compressed_size(self):
self.startTimer()
self.stopTimer('get_mean_compressed_size')
return 0
pass
| 26.915966
| 93
| 0.491727
|
8686a23127283b6e792f75fef2a5d762de36176d
| 2,966
|
py
|
Python
|
MerakiGuestPW.py
|
donthor/MerakiGuestPW
|
1ecfd89a4e7681c11b4fe05dfdb4548635a97086
|
[
"BSD-3-Clause"
] | 1
|
2021-06-14T14:37:52.000Z
|
2021-06-14T14:37:52.000Z
|
MerakiGuestPW.py
|
donthor/MerakiGuestPW
|
1ecfd89a4e7681c11b4fe05dfdb4548635a97086
|
[
"BSD-3-Clause"
] | null | null | null |
MerakiGuestPW.py
|
donthor/MerakiGuestPW
|
1ecfd89a4e7681c11b4fe05dfdb4548635a97086
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/local/bin/python3
import requests
import json
import os
import click
import random
import string
import sys
meraki_key = os.environ.get('MERAKI_API_KEY')
merakinetwork = os.environ.get('MY_MERAKI_NETWORK')
webexBearerToken = os.environ.get('WEBEX_BEARER_TOKEN')
roomID = os.environ.get('WEBEX_PAROCKHO_BOT')
baseurl = "https://dashboard.meraki.com/api/v1/networks/"
def randomize_pw():
pw_chars = string.ascii_letters + string.digits
pw = ''.join(random.choice(pw_chars) for i in range(8))
return pw
@click.command()
@click.option('--pw', type=str, default='', help="Password for SSID - use 'random' to generate 8 character password")
@click.option('--ssid', type=int, default=14, help="SSID number (0-14)")
@click.option('--force/--no-force', default=False, help="Continue without y/n verification")
@click.option('--webex/--no-webex', default=False, help="Send new password to Webex room of your choice")
def changePass(pw, ssid, force, webex):
if pw == 'random':
pw = randomize_pw()
url = baseurl + str(merakinetwork) + '/wireless/ssids/'
payload = json.dumps(
{"psk": pw
})
headers = {
'X-Cisco-Meraki-API-Key': meraki_key,
'Content-Type': 'application/json'
}
response_get = requests.get(url + str(ssid), headers=headers)
data = response_get.json()
if force:
response = requests.request("PUT", url + str(ssid), headers=headers, data=payload)
else:
print(f'Please confirm change to SSID: {data["name"]} with password: {pw}')
while (answer := input("Do you want to continue? (Enter y/n)").lower() ) not in {"y", "n"}:
pass
if answer == 'y':
response = requests.request("PUT", url + str(ssid), headers=headers, data=payload)
# print(response.status_code)
if response.status_code == 200:
click.echo('Password Modified')
if answer == 'n':
sys.exit(0)
#Post in Webex using '--webex' option
if webex:
#get SSID and password and set it to the 'message' variable
message = f'The new password for SSID {data["name"]} is {pw}'
#set the 'url' variable to the webex url
url = "https://api.ciscospark.com/v1/messages"
#define body and header data
payload="{\r\n \"roomId\" : \"" + roomID + "\",\r\n \"text\" : \"" + message + "\"\r\n}"
headers = {
'Authorization': 'Bearer ' + webexBearerToken,
'Content-Type': 'application/json'
}
#make the API call and save the response into the 'response' variable
response = requests.request("POST", url, headers=headers, data=payload)
#evaluate if the response was successful
# if response.status_code == 200:
# print("Success!")
# else:
# print("Message failed. Code: ", response.status_code)
# print(response.text)
if __name__ == '__main__':
changePass()
| 38.519481
| 117
| 0.624073
|
bfbff30ac88dd01af6ec157d3740fa88f964783a
| 2,766
|
py
|
Python
|
examples/laserScanningMicroscope.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | null | null | null |
examples/laserScanningMicroscope.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | null | null | null |
examples/laserScanningMicroscope.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | null | null | null |
import envexamples
from raytracing import *
import matplotlib.pyplot as plt
import numpy as np
"""
In a laser scanning system, the scanning components define
the covered field-of-view (FOV) at the sample plane. Here,
we show a one-dimensional example with a polygonal mirror
of 36 facets that rotates rapidly to scan
the beam along the horizontal direction. It produces a meachnical
sweep of 10 degrees, or 0.1750 rad, between each facets.
Therefore, the laser beam covers a total optical scan angle of 20 degrees.
In the following example, the object is considered to be the
laser beam at the polygonal mirror plane.
The output profile shows on its x-axis the width of the FOV under the objective.
"""
# List of the scan angle of the ray making it throught the system.
thetas = []
# List of 1 corresponding to the number of elements in heights
# so that plt.plot() doesn't freak out.
positions = []
# Radius of the laser beam at the scanning element.
objectHalfHeight = 0.000250
# Angle produced by the scanning element.
scanAngle = 10*np.pi/180
# Number of total rays considered in these calculations.
nRays = 10000
# Production of rays in the angle range of the scanning element.
scanRays = UniformRays(yMax=0, thetaMax=scanAngle, M=1, N=nRays)
class UISUPLAPO60XW(Objective):
def __init__(self):
super(UISUPLAPO60XW, self).__init__(f=180/60,
NA=1.2,
focusToFocusLength=40,
backAperture=7,
workingDistance=0.28,
magnification=60,
fieldNumber=22,
label='UISUPLAPO60XW Objective')
def illuminationPath():
illumination = ImagingPath()
# The object in this situation is the laser beam at the scanning element.
illumination.objectHeight = objectHalfHeight*2
illumination.rayNumber = 3
illumination.fanNumber = 3
illumination.fanAngle = 0
illumination.append(System4f(f1=40, f2=75, diameter1=24.5, diameter2=24.5))
illumination.append(System4f(f1=100, f2=100, diameter1=24.5, diameter2=24.5))
illumination.append(Space(d=180/40))
illumination.append(UISUPLAPO60XW())
illumination.append(Space(d=180/40))
return illumination
path = illuminationPath()
outputRays = path.traceManyThrough(scanRays)
for i in range(len(outputRays)):
thetas.append(scanRays[i].theta*180/np.pi)
positions.append(outputRays[i].y*1000)
scanRays.displayProgress()
plt.plot(positions, thetas)
plt.ylabel('Scan angle (degrees)')
plt.xlabel('Scanning position of the focal spot (µm)')
plt.show()
| 28.8125
| 81
| 0.670644
|
d548bc779d8e42b407e3f34d848185eec318cd18
| 6,143
|
py
|
Python
|
comptox_ai/scripts/make_tabular_dataset.py
|
van-truong/comptox_ai
|
393b05c617822e30f54c967ef07ec53ba4b09688
|
[
"MIT"
] | 6
|
2020-03-09T17:27:34.000Z
|
2022-03-15T15:37:15.000Z
|
comptox_ai/scripts/make_tabular_dataset.py
|
van-truong/comptox_ai
|
393b05c617822e30f54c967ef07ec53ba4b09688
|
[
"MIT"
] | 11
|
2021-05-04T18:46:51.000Z
|
2022-03-01T01:06:49.000Z
|
comptox_ai/scripts/make_tabular_dataset.py
|
van-truong/comptox_ai
|
393b05c617822e30f54c967ef07ec53ba4b09688
|
[
"MIT"
] | 2
|
2020-02-23T14:11:10.000Z
|
2021-10-04T18:27:58.000Z
|
#!/usr/bin/env python3
"""
make_tabular_dataset.py
Standalone Python job to generate a tabular dataset for predictive toxicology.
Briefly, the user specifies a node type and node features, as well as a target
feature or relationship.
This returns a file that is suitable for QSAR or similar analyses.
The algorithm for extracting a tabular dataset is roughly as follows: First,
the user creates a new 'in memory' graph in the Neo4j GDS library's graph
catalog. This is basically a subgraph of the complete graph database,
originally meant for providing an environment in which users can efficiently
run graph algorithms on relevant portions of the database. However, we co-opt
this functionality to allow rapid exporting of those same subgraphs for use in
external data analysis frameworks (like scikit-learn, networkx, DGL, etc.).
Specifically, we perform a *native projection*, where we supply the entire set
of node types and relationship types in the desired subgraph, along with either
specific node features or all defined node features for those node types. We
then call a routine to stream the graph into local Python data structures.
"""
from comptox_ai.db.graph_db import Graph, GraphDB
from yaml import load, Loader
from pathlib import Path
import pandas as pd
import ipdb
import os
import datetime as dt
def _get_default_config_file():
root_dir = Path(__file__).resolve().parents[2]
if os.path.exists(os.path.join(root_dir, 'CONFIG.yaml')):
default_config_file = os.path.join(root_dir, 'CONFIG.yaml')
else:
default_config_file = os.path.join(root_dir, 'CONFIG-default.yaml')
return default_config_file
def _make_timestamped_output_directory(parent_dir, prefixes=['subgraph', 'tsv'], suffixes=None):
ts = dt.datetime.now().strftime('%Y%m%d_%H%M%S')
pre = f"{'-'.join(prefixes)}_" if prefixes else ""
post = f"_{'-'.join(suffixes)}" if suffixes else ""
dirname = f"{pre}{ts}{post}"
full_path = os.path.join(parent_dir, dirname)
os.makedirs(full_path)
return full_path
def make_node_table(db, node_label, node_properties = None):
if node_properties is None:
# No filters on properties, so we will just fetch all of them
res = db.fetch_nodes(node_label)
res_df = pd.DataFrame(res)
del(res)
else:
raise NotImplementedError
return res_df
def make_relationship_table(db, relationship_type, from_label, to_label, relationship_properties = None):
if relationship_properties is not None:
# TODO: Figure out how/if to handle relationship properties if it ever
# looks like it would be useful.
raise NotImplementedError
res = db.fetch_relationships(relationship_type, from_label, to_label)
res_df = pd.DataFrame(res, columns=['s', 'r', 'o'])
del(res)
return res_df
# connect to running Neo4j instance
db = GraphDB()
# Ideally, we'd be able to tap into GDS' subgraph projection functionality, but
# it unfortunately isn't compatible with non-numeric node or relationship
# properties. E.g., we store MACCS bitstrings as a string, because Neo4j
# doesn't have a better datatype. Therefore, we couldn't export them using GDS
# alone. We'll be keeping an eye on this, because it should drastically speed
# up the subgraph dataset generation procedure if they can get this to work.
# # Build the graph using a native projection
# TEST_NODE_PROJ = ['Chemical', 'Gene', 'Assay']
# TEST_REL_PROJ = ['CHEMICALBINDSGENE', 'CHEMICALDECREASESEXPRESSION', 'CHEMICALHASACTIVEASSAY']
# db.build_graph_native_projection(
# 'testgraph',
# TEST_NODE_PROJ,
# TEST_REL_PROJ
# )
NODE_LABELS = [
'Gene',
'Pathway',
'Assay'
]
OUTPUT_TYPE = 'tsv'
# Instead of what we described above, we will instead let Python do much of the
# heavy lifting. First we get the meta structure of the graph using APOC, we
# then determine the minimum spanning subtree over the metagraph, and then
# export all of the node types and relationship types contained in that tree.
metagraph = db.get_metagraph()
node_tables = dict()
rel_tables = dict()
# get node data
for nl in metagraph.node_labels:
if nl in NODE_LABELS:
# print(f"Loading {nl} nodes...")
node_table = make_node_table(db, nl)
print(f" Adding node table: {nl}")
node_tables[nl] = node_table
# get relationship data
for rt in metagraph.relationship_types:
this_rel_table = pd.DataFrame(columns=['s', 'r', 'o'])
for rt_s in metagraph.relationship_path_schema[rt]:
from_label = rt_s['from']
to_label = rt_s['to']
# The induced subgraph only includes edges where both the subject and the
# object of the relationship are members of NODE_LABELS
if (from_label in NODE_LABELS) and (to_label in NODE_LABELS):
print(f" Loading relationships for rel type {rt}...")
rel_table = make_relationship_table(db, rt, from_label, to_label)
# Note that we need to make a copy of the data and then reassign. Pandas
# doesn't have decent support for large-scale in-place operations. A
# possible solution is to somehow use HDFStore tables during the process.
this_rel_table = this_rel_table.append(rel_table, ignore_index=True)
# Only hold onto the dataframe if it contains rows
if this_rel_table.shape[0] > 0:
print("Adding relationship table: {from_label}-[{rt}]->{to_label}")
rel_tables[rt] = this_rel_table
else:
del(this_rel_table)
print()
print("Finished parsing nodes and relationships; now writing to disk...")
# export the results
config_file_path = _get_default_config_file()
with open(config_file_path, 'r') as fp:
cnf = load(fp, Loader=Loader)
output_directory = cnf['data']['output_dir']
if OUTPUT_TYPE == 'tsv':
output_dir = _make_timestamped_output_directory(output_directory)
for k_node, v_node in node_tables.items():
node_fname = os.path.join(output_dir, f"node_{k_node}.tsv")
v_node.to_csv(node_fname, sep="\t", index=None)
print(f"Wrote node file: {node_fname}")
for k_rel, v_rel in rel_tables.items():
rel_fname = os.path.join(output_dir, f"edge_{k_rel}.tsv")
v_rel.to_csv(rel_fname, sep="\t", index=None)
print(f"Wrote edge file: {rel_fname}")
| 35.304598
| 105
| 0.740843
|
a6955237a979d7b679ae37164f1e054c247c6940
| 3,775
|
py
|
Python
|
indico/modules/events/persons/util_test.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 1
|
2021-12-27T17:51:27.000Z
|
2021-12-27T17:51:27.000Z
|
indico/modules/events/persons/util_test.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 5
|
2021-04-08T19:26:47.000Z
|
2022-01-24T16:30:18.000Z
|
indico/modules/events/persons/util_test.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 2
|
2019-02-24T17:29:10.000Z
|
2021-04-08T19:23:27.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from indico.modules.events.persons.util import create_event_person, get_event_person, get_event_person_for_user
def test_get_person_for_user(db, dummy_event, dummy_user):
person = get_event_person_for_user(dummy_event, dummy_user)
# EventPerson created from scratch
assert person.id is None
assert person.user == dummy_user
db.session.add(person)
db.session.flush()
person = get_event_person_for_user(dummy_event, dummy_user)
# Already existing EventPerson
assert person.id is not None
assert person.user == dummy_user
def test_create_event_person(db, dummy_event):
data = {
'email': 'test@acme.com',
'firstName': 'John',
'familyName': 'Doe',
'affiliation': 'ACME Inc.'
}
person = create_event_person(dummy_event, **data)
assert person.event == dummy_event
assert person.email == 'test@acme.com'
assert person.full_name == 'John Doe'
assert person.affiliation == 'ACME Inc.'
def test_get_event_person(db, dummy_event, dummy_user):
data = {
'email': 'test@acme.com',
'firstName': 'John',
'familyName': 'Doe',
'affiliation': 'ACME Inc.'
}
person_1 = get_event_person(dummy_event, data)
# Person doesn't exist in the DB
assert person_1.id is None
# User neither
assert person_1.user is None
# save in the DB for later
db.session.add(person_1)
db.session.flush()
data = {
'email': '1337@example.com',
'firstName': 'Sea',
'familyName': 'Pig',
'affiliation': 'ACME Inc.'
}
person_2 = get_event_person(dummy_event, data)
# Person not in the DB either
assert person_2.id is None
# User exists, however (matched by e-mail)
assert person_2.user == dummy_user
assert person_2.full_name == 'Guinea Pig'
db.session.add(person_2)
db.session.flush()
person = get_event_person(dummy_event, data)
# Retrieved person should now be in the DB
assert person.id == person_2.id
# User for whom there is already an EventPerson in this event
data = {
'email': 'test@acme.com',
'firstName': 'JOHN',
'familyName': 'DOE',
'affiliation': 'ACME'
}
person_3 = get_event_person(dummy_event, data)
# We should get the first person
assert person_3.id == person_1.id
assert person_3.user is None
assert person_3.full_name == 'John Doe'
data = {
'firstName': 'Foo',
'familyName': 'Bar'
}
person_4 = get_event_person(dummy_event, data)
# We should get a new person
assert person_4.id is None
assert person_4.user is None
assert person_4.email == ''
assert person_4.full_name == 'Foo Bar'
def test_get_event_person_edit(db, dummy_event, dummy_user):
data = {
'email': 'test@acme.com',
'firstName': 'John',
'familyName': 'Doe',
'affiliation': 'ACME Inc.'
}
person_1 = get_event_person(dummy_event, dict(data, _type='Avatar', identifier=f'User:{dummy_user.id}'))
assert person_1.id is None
assert person_1.user == dummy_user
db.session.add(person_1)
db.session.flush()
person_2 = get_event_person(dummy_event, dict(data, _type='EventPerson', id=person_1.id))
assert person_2 == person_1
person_3 = get_event_person(dummy_event, dict(data, _type='PersonLink', personId=person_1.id))
assert person_3 == person_1
with pytest.raises(ValueError):
get_event_person(dummy_event, dict(data, _type='UnsupportedPersonType'))
| 31.198347
| 111
| 0.664901
|
7a4eaa2838241d97035cb71f36eee3cc992ca115
| 2,698
|
py
|
Python
|
vision.py
|
cemakd/makeathon-autonomous-vehicle
|
480e46d5e351c02ead1b5980f8c421b57c2f39fd
|
[
"MIT"
] | null | null | null |
vision.py
|
cemakd/makeathon-autonomous-vehicle
|
480e46d5e351c02ead1b5980f8c421b57c2f39fd
|
[
"MIT"
] | null | null | null |
vision.py
|
cemakd/makeathon-autonomous-vehicle
|
480e46d5e351c02ead1b5980f8c421b57c2f39fd
|
[
"MIT"
] | null | null | null |
'''
Makeathon National Instruments Autonomous Vehicle Challenge
Visual Traffic Signal and Sign Recognition
'''
#!/usr/bin/env python
import sys
import time
from array import array
import numpy as np
import argparse
import imutils
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
import argparse
import time
import glob
class Vision(object):
"""docstring for Vision."""
cap = None
show_mode = False
current_ret = None
current_frame = None
blob_detector = None
blob_params = None
prototypeImg = cv2.imread("./stopPrototype.png")
def __init__(self):
super(Vision, self).__init__()
# Blob Detector Parameters:
self.blob_params = cv2.SimpleBlobDetector_Params()
# Change thresholds
self.blob_params.minThreshold = 10
self.blob_params.maxThreshold = 200
# Filter by Area.
self.blob_params.filterByArea = True
self.blob_params.minArea = 500
# Filter by Circularity
self.blob_params.filterByCircularity = True
self.blob_params.minCircularity = 0.80
# Filter by Convexity
self.blob_params.filterByConvexity = True
self.blob_params.minConvexity = 0.80
# Filter by Inertia
self.blob_params.filterByInertia = True
self.blob_params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
self.blob_detector = cv2.SimpleBlobDetector(self.blob_params)
else :
self.blob_detector = cv2.SimpleBlobDetector_create(self.blob_params)
def __del__(self):
# When everything done, release the capture
self.cap.release()
cv2.destroyAllWindows()
def connect(self):
# Connect to PS3 Eye:
print("Connection to camera")
self.cap = cv2.VideoCapture(1)
print("Connection Established")
def check_sign(self):
print("Checking Sign")
if self.current_ret == False:
self.connect()
# Read:
self.current_ret, self.current_frame = self.cap.read(1)
self.current_frame = cv2.medianBlur(self.current_frame, 5)
# Detect blobs.
keypoints = self.blob_detector.detect(self.current_frame)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
# the size of the circle corresponds to the size of blob
print(keypoints)
im_with_keypoints = cv2.drawKeypoints(self.current_frame, keypoints, np.array([]), (0,255,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show blobs
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
# Show:
if self.show_mode:
cv2.imshow('current_frame', self.current_frame)
waitKey(0)
def check_light(self):
#
i = 1
def set_show(self, arg):
show_mode = arg
v = Vision()
v.connect()
v.set_show(True)
v.check_sign()
destroyAllWindows()
| 22.483333
| 139
| 0.742031
|
bf2d6dd9bbfcdc36dc39e0b3f451cb3333a04eb5
| 100
|
py
|
Python
|
tweet_display/apps.py
|
aashaka/twitter-analyser
|
313ea4e342d100dac78b0fd07822cac7457450f9
|
[
"MIT"
] | null | null | null |
tweet_display/apps.py
|
aashaka/twitter-analyser
|
313ea4e342d100dac78b0fd07822cac7457450f9
|
[
"MIT"
] | null | null | null |
tweet_display/apps.py
|
aashaka/twitter-analyser
|
313ea4e342d100dac78b0fd07822cac7457450f9
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TweetDisplayConfig(AppConfig):
name = 'tweet_display'
| 16.666667
| 36
| 0.78
|
f6c4edf923f89dd360215125bc4ed2b2bb20d9bc
| 9,731
|
py
|
Python
|
src/pandas_transformers/transformers.py
|
NedimB911/pandas-transformers
|
9f5f29c9aebe103b9ca81889d51b144ea21ee180
|
[
"MIT"
] | null | null | null |
src/pandas_transformers/transformers.py
|
NedimB911/pandas-transformers
|
9f5f29c9aebe103b9ca81889d51b144ea21ee180
|
[
"MIT"
] | null | null | null |
src/pandas_transformers/transformers.py
|
NedimB911/pandas-transformers
|
9f5f29c9aebe103b9ca81889d51b144ea21ee180
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-function-docstring
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
from typing import List, Iterable
from sklearn.utils.validation import check_is_fitted
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import numpy as np
from .utils import check_columns_exist, check_if_dataframe
def test():
print("hello world")
class PandasOneHotEncoder(TransformerMixin):
"""
This one-hot encoder preserves the dataframe structure.
It works the same as pd.get_dummies(), however pd.get_dummies() cannot deal with
unseen categories during transformation (e.g. when a category appears in the
test set but not in the train set)
https://github.com/pandas-dev/pandas/issues/8918
https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
Parameters
----------
min_frequency :
Any category of a categorical column that appears less than 'min_frequency',
will be ignored (no extra dummy column for that category)
columns :
The list of columns to one-hot-encode. The default is None, in which
case all columns of type 'object' or 'category' will be one-hot-encoded.
dummy_na :
Add a column to indicate NaNs, if False NaNs are ignored (default)
drop_first:
Whether to get k-1 dummies out of k categorical levels by removing
the first level.
"""
def __init__(
self,
min_frequency: int = -1,
columns: List[str] = None,
dummy_na: bool = False,
drop_first: bool = False,
handle_unknown: str = "ignore",
):
super().__init__()
self.min_frequency = min_frequency
self.dummy_na = dummy_na
self.drop_first = drop_first
if handle_unknown not in {"ignore", "error"}:
raise ValueError(
"handle_unknown must be either 'ignore' or 'error'."
f" Got {handle_unknown}."
)
self.handle_unknown = handle_unknown
if (columns is not None) & (not isinstance(columns, list)):
raise ValueError(
f"'columns' must be a list (of strings). Got {type(columns)}"
)
self.columns = columns
def fit(self, X: pd.DataFrame, y: pd.Series = None) -> "PandasOneHotEncoder":
dtypes_to_encode = ["object", "category"]
check_if_dataframe(X)
if self.columns is None:
self.columns = X.select_dtypes(include=dtypes_to_encode).columns.tolist()
else:
check_columns_exist(X, self.columns)
self.categories_ = {}
self.categories_unfiltered_ = {}
for col in self.columns:
counts = X[col].value_counts(dropna=True)
self.categories_[col] = list(
set(counts[counts >= self.min_frequency].index.tolist())
)
self.categories_unfiltered_[col] = set(counts.index.tolist())
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
check_is_fitted(self)
check_if_dataframe(X)
check_columns_exist(X, self.columns)
if self.handle_unknown == "error":
self._check_unknown_categories_all_columns(X)
# pylint: disable=cell-var-from-loop
for col in self.columns:
cat = pd.CategoricalDtype(self.categories_[col], ordered=True)
X = X.assign(**{f"{col}": lambda _df: _df[col].astype(cat)})
return pd.get_dummies(
X, columns=self.columns, dummy_na=self.dummy_na, drop_first=self.drop_first
)
def _find_unseen_categories(self, X, col):
"""
We check whether X has any categories that were not seen during training
for a single column.
NOTE: We also consider categories that we have ignored during training
due to 'min_frequency'.
"""
seen_categories = set(self.categories_unfiltered_[col])
new_categories = set(X[col].value_counts(dropna=False).index)
unseen_categories = new_categories - seen_categories
return unseen_categories
def _check_unknown_categories_all_columns(self, X):
"""
We check whether X has any categories that were not seen during training
for *ALL* columns.
NOTE: We also consider categories that we have ignored during training
due to 'min_frequency'.
"""
unseen_categories_dict = {}
for col in self.columns:
unseen_categories = self._find_unseen_categories(X, col)
if unseen_categories:
unseen_categories_dict[col] = unseen_categories
if unseen_categories_dict:
raise ValueError(
f"Encountered categories not seen during fitting:"
f"{unseen_categories_dict}"
)
class PandasTfidfVectorizer(TfidfVectorizer):
"""
PandasTfidfVectorizer
A pandas version for sklearn's tf-idf vectorizer. The PandasTfidfVectorizer
converts the sparse array returned by sklearn's tf-idf vectorizer into a dense
array and uses the feature names to convert it into a dataframe.
# https://www.datacamp.com/community/tutorials
# /super-multiple-inheritance-diamond-problem
# https://stackoverflow.com/questions/3277367
# /how-does-pythons-super-work-with-multiple-inheritance
Parameters
----------
column: str (optional)
Column which we wish to apply the tf-idf vectorizer to. If no column is given,
then the input dataframe should be a pd.Series or a np.ndarray.
**kwargs
Keyword arguments for sklearn.feature_extraction.text.TfidfVectorizer
"""
def __init__(self, column: str = None, **kwargs):
super().__init__(**kwargs)
self.column = column
# pylint: disable = arguments-differ
def fit(self, X: pd.DataFrame, y=None):
"""
Fits the tf-idf vectorizer.
Parameters
----------
X : pd.DataFrame or 1-d Iterable[str]
y : optional
"""
if self.column is not None:
# In this case the input must be a dataframe
self._init_input_validation()
check_if_dataframe(X)
check_columns_exist(X, [self.column])
raw_documents = X[self.column]
else:
# if no column is given, the input must be a 1-d iterable
# (pd.Series or np array)
self._check_if_1d_series_or_np_array(X)
raw_documents = X
self._check_missing(raw_documents)
return super().fit(raw_documents, y)
# pylint: disable = arguments-differ
def transform(self, X: pd.DataFrame):
"""
Transforms the input dataframe using the fitted tf-idf vectorizer.
Parameters
----------
X : pd.DataFrame
Returns
-------
pd.DataFrame
"""
if self.column is not None:
# In this case the input must be a dataframe
check_if_dataframe(X)
check_columns_exist(X, [self.column])
raw_documents = X[self.column]
else:
# if no column is given, the input must be a 1-d iterable
# (pd.Series or np array)
self._check_if_1d_series_or_np_array(X)
raw_documents = X
self._check_missing(raw_documents)
transformed = super().transform(raw_documents)
transformed_df = pd.DataFrame(
transformed.toarray(), columns=self.get_feature_names(), index=X.index
)
if self.column is not None:
transformed_df = pd.concat(
[X.drop(columns=[self.column]), transformed_df], axis=1
)
return transformed_df
def fit_transform(self, X: pd.DataFrame, y=None):
"""
fit_transform
Parameters
----------
X : pd.DataFrame
y : optional
Returns
-------
pd.DataFrame
"""
self.fit(X, y)
return self.transform(X)
def _init_input_validation(self):
"""
Validates the __init__() inputs.
"""
if not isinstance(self.column, str):
raise TypeError(
f"'column' argument should be a string. Got {type(self.column)}"
)
def _check_missing(self, raw_documents: Iterable[str]):
"""
Checks whether the raw_documents have any missing values. If so, return
a ValueError.
Parameters
----------
raw_documents : Iterable[str]
Raises
------
ValueError
"""
if raw_documents.isnull().any():
raise ValueError(
f"The {self.column} column contains None's. TfidfVectorizer requires"
" the column to not have any None's."
)
def _check_if_1d_series_or_np_array(self, obj):
if isinstance(obj, (pd.Series, np.ndarray)):
if obj.ndim == 1:
return True
raise TypeError(
"Input is of the correct type (pd.Series or np.ndarray)"
" However, not of the correct dimension. It should be 1d."
)
raise TypeError(
f"If no column is given, the input should be either a pd.Series or a "
f"np.ndarray. Got {type(obj)} instead."
)
def _get_param_names(self):
tfidf_param_names = TfidfVectorizer._get_param_names()
current_param_names = super()._get_param_names()
param_names = tfidf_param_names + current_param_names
return param_names
| 31.089457
| 87
| 0.611653
|
4f7febed329bdfd42b8e3b29ac1840a4a010dc3f
| 351
|
py
|
Python
|
src/airfly/_vendor/airflow/providers/apache/hive/transfers/vertica_to_hive.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | 7
|
2021-09-27T11:38:48.000Z
|
2022-02-01T06:06:24.000Z
|
src/airfly/_vendor/airflow/providers/apache/hive/transfers/vertica_to_hive.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
src/airfly/_vendor/airflow/providers/apache/hive/transfers/vertica_to_hive.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
class VerticaToHiveOperator(BaseOperator):
sql: "_empty"
hive_table: "_empty"
create: "_empty"
recreate: "_empty"
partition: "_empty"
delimiter: "_empty"
vertica_conn_id: "_empty"
hive_cli_conn_id: "_empty"
| 25.071429
| 67
| 0.723647
|
50fae1f0b12d2448f5766a0c02738d794ad928e9
| 16,638
|
py
|
Python
|
dataset/tiny_vid.py
|
pengzhiliang/object-localization
|
a1472b69e33af5be3aea0476f9f467c3994be4d4
|
[
"MIT"
] | 3
|
2019-03-29T07:41:00.000Z
|
2021-08-15T06:48:57.000Z
|
dataset/tiny_vid.py
|
pengzhiliang/object-localization
|
a1472b69e33af5be3aea0476f9f467c3994be4d4
|
[
"MIT"
] | null | null | null |
dataset/tiny_vid.py
|
pengzhiliang/object-localization
|
a1472b69e33af5be3aea0476f9f467c3994be4d4
|
[
"MIT"
] | 3
|
2019-09-20T03:17:26.000Z
|
2021-08-15T06:48:58.000Z
|
#-*-coding:utf-8-*-
'''
Created on Oct 29,2018
@author: pengzhiliang
'''
from __future__ import print_function
import torch,os,sys,random,cv2
import numpy as np
from PIL import Image,ImageFont,ImageDraw
from matplotlib import pyplot as plt
from encoder import DataEncoder
from torch.utils import data
from torchvision import transforms
from os.path import join as pjoin
import sys
sys.path.insert(0,'../')
#=============================================================================
#
# Create train_images.txt and test_images.txt
#
#=============================================================================
def train_test_txt(defualt_path='/home/pzl/Data/tiny_vid'):
"""
将如下格式存入文件:
/home/pzl/Data/tiny_vid/turtle/000151.JPEG 1 29 38 108 84 2
其中参数:
/home/pzl/Data/tiny_vid/turtle/000151.JPEG:图片路径
1 : 一个物体(方便使用ssd架构)
29 38 108 84:分别为 xmin, ymin, xmax, ymax,表示bounding box的位置
2 : 种类
"""
classes = {'car':0, 'bird':1, 'turtle':2, 'dog':3, 'lizard':4}
for dirname in classes.keys():
bbox_dic = {}
with open(pjoin(defualt_path,dirname+'_gt.txt'),'r') as f:
for n,line in enumerate(f.readlines()):
line = line.strip().split()
bbox_dic[line[0]] = line[1:]
if n == 179:
break
with open(pjoin(defualt_path,'train_images.txt'),'a') as f:
for i in range(1,151):
imgname = '000000'
pad0 = 6 - len(str(i))
imgname = imgname[:pad0]+str(i)+'.JPEG'
imgpath = pjoin(pjoin(defualt_path,dirname),imgname)
imageclass = str(classes[dirname])
imgbbox = ' '.join(bbox_dic[str(i)])
f.write('\t'.join([imgpath,'1',imgbbox,imageclass])+'\n')
with open(pjoin(defualt_path,'test_images.txt'),'a') as f:
for i in range(151,181):
imgname = '000000'
pad0 = 6 - len(str(i))
imgname = imgname[:pad0]+str(i)+'.JPEG'
imgpath = pjoin(pjoin(defualt_path,dirname),imgname)
imageclass = str(classes[dirname])
imgbbox = ' '.join(bbox_dic[str(i)])
f.write('\t'.join([imgpath,'1',imgbbox,imageclass])+'\n')
#=============================================================================
#
# Create a data loader for two-output net(not ssd),don't need to encode
#
#=============================================================================
class tiny_vid_loader(data.Dataset):
"""
功能:
构造一个用于tiny_vid数据集的迭代器
参数:
"""
img_size =128
def __init__(self,defualt_path='/home/pzl/Data/tiny_vid',mode='train',transform='some augmentation'):
"""
defualt_path: 如'/home/pzl/Data/tiny_vid'
mode : 'train' or 'test'
"""
if not (os.path.exists(pjoin(defualt_path,'train_images.txt')) and os.path.exists(pjoin(defualt_path,'test_images.txt'))):
train_test_txt(defualt_path)
self.filelist=[]
self.class_coor = []
self.mode = True if mode =='train' else False
# /home/pzl/Data/tiny_vid/turtle/000151.JPEG 1 29 38 108 84 2
with open(pjoin(defualt_path,mode+'_images.txt')) as f:
for line in f.readlines():
line = line.strip().split()
self.filelist.append(line[0])
self.class_coor.append([int(i) for i in line[2:]])
self.ToTensor = transforms.ToTensor()
self.Normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
self.data_encoder = DataEncoder()
self.transform = transform
def random_distort( self,
img,
brightness_delta=32/255.,
contrast_delta=0.5,
saturation_delta=0.5,
hue_delta=0.1):
'''A color related data augmentation used in SSD.
Args:
img: (PIL.Image) image to be color augmented.
brightness_delta: (float) shift of brightness, range from [1-delta,1+delta].
contrast_delta: (float) shift of contrast, range from [1-delta,1+delta].
saturation_delta: (float) shift of saturation, range from [1-delta,1+delta].
hue_delta: (float) shift of hue, range from [-delta,delta].
Returns:
img: (PIL.Image) color augmented image.
'''
def brightness(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(brightness=delta)(img)
return img
def contrast(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(contrast=delta)(img)
return img
def saturation(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(saturation=delta)(img)
return img
def hue(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(hue=delta)(img)
return img
img = brightness(img, brightness_delta)
if random.random() < 0.5:
img = contrast(img, contrast_delta)
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
else:
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
img = contrast(img, contrast_delta)
return img
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
只在水平方向翻转
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[2]
xmax = w - boxes[0]
boxes[0] = xmin
boxes[2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [4,].
labels: (tensor) bbox labels, sized [1,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
boxes = torch.unsqueeze(boxes,dim=0) # expand [1,4]
# print(boxes)
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
# random.randrange(min,max)包含min 不包含max
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
# print(selected_boxes, mask)
return img, selected_boxes, labels#labels[mask]
def __getitem__(self, index):
imgpath = self.filelist[index]
gt_class = np.array(self.class_coor[index][-1],dtype = np.float32)
gt_bbox = np.array(self.class_coor[index][:-1],dtype = np.float32)
# print('1:',gt_bbox)
img = Image.open(imgpath).convert('RGB')
if self.transform is not None:
gt_class , gt_bbox = torch.Tensor(gt_class),torch.Tensor(gt_bbox)
# print('2:',gt_class)
if self.mode:
img = self.random_distort(img)
img , gt_bbox = self.random_flip(img,gt_bbox)
img, gt_bbox, gt_class = self.random_crop(img, gt_bbox, gt_class)
w,h = img.size
gt_bbox /= torch.Tensor([w,h,w,h]).expand_as(gt_bbox)
# print('3:',gt_bbox*128)
img = transforms.Resize((128,128))(img)
img = self.ToTensor(img)
img = self.Normalize(img)
else:
img,gt_class , gt_bbox = self.ToTensor(img),torch.Tensor(gt_class),torch.Tensor(gt_bbox/128.)
img = self.Normalize(img)
return img,gt_class.long(),(gt_bbox*128).squeeze()
def __len__(self):
return len(self.filelist)
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(fname)
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img =self.random_distort(img)
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target,boxes,labels
def random_distort(self,
img,
brightness_delta=32/255.,
contrast_delta=0.5,
saturation_delta=0.5,
hue_delta=0.1):
'''A color related data augmentation used in SSD.
Args:
img: (PIL.Image) image to be color augmented.
brightness_delta: (float) shift of brightness, range from [1-delta,1+delta].
contrast_delta: (float) shift of contrast, range from [1-delta,1+delta].
saturation_delta: (float) shift of saturation, range from [1-delta,1+delta].
hue_delta: (float) shift of hue, range from [-delta,delta].
Returns:
img: (PIL.Image) color augmented image.
'''
def brightness(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(brightness=delta)(img)
return img
def contrast(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(contrast=delta)(img)
return img
def saturation(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(saturation=delta)(img)
return img
def hue(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(hue=delta)(img)
return img
img = brightness(img, brightness_delta)
if random.random() < 0.5:
img = contrast(img, contrast_delta)
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
else:
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
img = contrast(img, contrast_delta)
return img
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
# random.randrange(min,max)包含min 不包含max
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
| 36.169565
| 130
| 0.527888
|
8d4d8b72d469e693a8dc373e1611aa8109966b9c
| 3,652
|
py
|
Python
|
dispatcher/qube.py
|
plinecom/JobManager
|
ce2c85fa740d5dce2d582e694bb3adc9176101d7
|
[
"MIT"
] | null | null | null |
dispatcher/qube.py
|
plinecom/JobManager
|
ce2c85fa740d5dce2d582e694bb3adc9176101d7
|
[
"MIT"
] | null | null | null |
dispatcher/qube.py
|
plinecom/JobManager
|
ce2c85fa740d5dce2d582e694bb3adc9176101d7
|
[
"MIT"
] | null | null | null |
import abstract
import subprocess
import xml.etree.ElementTree
class Qube(abstract.DispatcherBase):
def __init__(self):
abstract.DispatcherBase.__init__(self)
class Qube6(Qube):
def __init__(self, configdict):
Qube.__init__(self)
print configdict
self._configdict = configdict
self.setvalue("executable", self._configdict["submitter"])
self.setvalue("server", "172.29.115.99")
self.setvalue("dispatch_software", "Qube6")
cmd = self._configdict["qbhosts"]+" --xml"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = p.communicate()
qb_xml_document = xml.etree.ElementTree.fromstring("<doc>"+stdout_data+"</doc>")
# print stdout_data
group_dic = {}
for e in qb_xml_document.findall("object/item/object/groups"):
if e.text is not None:
group_dic[e.text.encode("utf-8")] = "1"
self.setvalue("groups", sorted(group_dic.keys()))
# print group_dic.keys()
cluster_dic = {}
for e in qb_xml_document.findall("object/item/object/cluster"):
cluster_dic[e.text.encode("utf-8")] = "1"
cluster_dic2 = {}
for cluster in cluster_dic.keys():
cluster_level = cluster.split("/")
for i in range(0,len(cluster_level)+1):
cluster_dic2["/".join(cluster_level[0:i])] = 1
# print cluster_dic2
# group_dic[e.text]="1"
self.setvalue("pools", sorted(cluster_dic2.keys()))
# print cluster_dic.keys()
# self.setvalue("dispatherObj", self)
def addJob(self, job):
self._job.append(job)
def submmision(self):
self._buildCmd()
def getDispatcherName(self):
return "Qube 6"
def submit(self, jobObj):
print jobObj.getparam()
# job = jobObj.getparam();
cmd = self.getvalue("executable") + ' --name ' + jobObj.getvalue("jobName")\
+ ' --priority '+jobObj.getvalue("priority") \
+ ' --range '+jobObj.getvalue("startFrame") + '-' + jobObj.getvalue("endFrame")\
+ ' --chunk 10'
if isinstance(jobObj.getvalue("selected_groups"), list):
if len(jobObj.getvalue("selected_groups"))>0:
cmd += ' --groups '+','.join(jobObj.getvalue("selected_groups"))
cmd += ' /usr/autodesk/maya2014-x64/bin/Render -s QB_FRAME_START -e QB_FRAME_END ' \
+ jobObj.getvalue("filePath")
# --groups string
# --cluster string
# --processors int
# --supervisor
# --range 1-100 --chunk 10
print cmd
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = p.communicate()
print stdout_data
print stderr_data
def _buildCmd(self):
for job in self._job:
# if job.__class__.__name__ == "JobMayaSw":
cmd = self.getvalue("executable") + ' -s ' + self.getvalue("server") + ' -u admin -b -department ' + getpass.getuser() + ' -e 566 -n ' + job.getvalue("job") + ' -f ' + job.getvalue("filePath") + ' -proj ' + job.getvalue("proj") + ' -sf ' + job.getvalue("startFrame") + " -ef " + job.getvalue("endFrame") + " -bf " + job.getvalue("byFrameSteps") + " -se 0 -st 1 -attr MAYADIGITS 10 1 -max 1"
print cmd
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(stdout_data, stderr_data) = p.communicate()
print stdout_data
print stderr_data
| 37.265306
| 407
| 0.588719
|
576044306c647959738d6a9eab632407e88a3740
| 1,251
|
py
|
Python
|
src/health/admin.py
|
kensonman/health
|
d2b54faa2f6b6c7ce1a21ca64622737739797c66
|
[
"Apache-2.0"
] | null | null | null |
src/health/admin.py
|
kensonman/health
|
d2b54faa2f6b6c7ce1a21ca64622737739797c66
|
[
"Apache-2.0"
] | null | null | null |
src/health/admin.py
|
kensonman/health
|
d2b54faa2f6b6c7ce1a21ca64622737739797c66
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# File: health/admin.py
# Date: 2019-03-04 20:52
# Author: Kenson Man <kenson@kenson.idv.hk>
# Desc: Define the admin-tools for the health project;
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import *
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display=('id', 'name', 'desc', 'fmt', 'minimum', 'maximum', 'user', 'lmb', 'lmd')
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display=('id', 'category_user', 'category_name', 'name', 'lmb', 'lmd')
def category_user(self, record):
return record.category.user
category_user.short_description=_('Category.user')
def category_name(self, record):
return record.category.name
category_name.short_description=_('Category.name')
@admin.register(Index)
class IndexAdmin(admin.ModelAdmin):
list_display=('id', 'category_user', 'category_name', 'value', 'lmb', 'lmd')
def category_user(self, record):
return record.category.user
category_user.short_description=_('Category.user')
def category_name(self, record):
return record.category.name
category_name.short_description=_('Category.name')
| 32.076923
| 89
| 0.709832
|
126d2f097f5d88423eb0290e061608d2dc0ad56f
| 21,850
|
py
|
Python
|
heat/tests/openstack/aodh/test_gnocchi_alarm.py
|
grebennikov/heat1
|
6a11bd0b5984c8f880d1a24ed324620020032b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/openstack/aodh/test_gnocchi_alarm.py
|
grebennikov/heat1
|
6a11bd0b5984c8f880d1a24ed324620020032b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/openstack/aodh/test_gnocchi_alarm.py
|
grebennikov/heat1
|
6a11bd0b5984c8f880d1a24ed324620020032b5a
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import aodh
from heat.engine.resources.openstack.aodh.gnocchi import (
alarm as gnocchi)
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
gnocchi_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Resources Alarm Test
resources:
GnoResAlarm:
type: OS::Aodh::GnocchiResourcesAlarm
properties:
description: Do stuff with gnocchi
metric: cpu_util
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
resource_type: instance
resource_id: 5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a
comparison_operator: gt
'''
gnocchi_aggregation_by_metrics_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Metrics Alarm Test
resources:
GnoAggregationByMetricsAlarm:
type: OS::Aodh::GnocchiAggregationByMetricsAlarm
properties:
description: Do stuff with gnocchi metrics
metrics: ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"]
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
comparison_operator: gt
'''
gnocchi_aggregation_by_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Resources Alarm Test
resources:
GnoAggregationByResourcesAlarm:
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
properties:
description: Do stuff with gnocchi aggregation by resource
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
metric: cpu_util
alarm_actions: []
resource_type: instance
query: '{"=": {"server_group": "my_autoscaling_group"}}'
comparison_operator: gt
'''
FakeAodhAlarm = {'other_attrs': 'val',
'alarm_id': 'foo'}
class GnocchiResourcesAlarmTest(common.HeatTestCase):
def setUp(self):
super(GnocchiResourcesAlarmTest, self).setUp()
self.fc = mock.Mock()
def create_alarm(self):
self.patchobject(aodh.AodhClientPlugin,
'_create').return_value = self.fc
self.m.StubOutWithMock(self.fc.alarm, 'create')
self.fc.alarm.create(
{
'alarm_actions': [],
'description': u'Do stuff with gnocchi',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'name': mox.IgnoreArg(),
'type': 'gnocchi_resources_threshold',
'repeat_actions': True,
'gnocchi_resources_threshold_rule': {
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"resource_type": "instance",
"resource_id": "5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a",
"comparison_operator": "gt",
},
'time_constraints': [],
'severity': 'low'
}).AndReturn(FakeAodhAlarm)
self.tmpl = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(self.tmpl)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.AodhGnocchiResourcesAlarm(
'GnoResAlarm', resource_defns['GnoResAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarm, 'update')
self.fc.alarm.update(
'foo',
{
'alarm_actions': [],
'description': u'Do stuff with gnocchi',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'repeat_actions': True,
'gnocchi_resources_threshold_rule': {
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"resource_type": "instance",
"resource_id": "d3d6c642-921e-4fc2-9c5f-15d9a5afb598",
"comparison_operator": "gt",
},
'time_constraints': [],
'severity': 'low'
}
)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
props = self.tmpl['resources']['GnoResAlarm']['properties']
props['resource_id'] = 'd3d6c642-921e-4fc2-9c5f-15d9a5afb598'
update_template = rsrc.t.freeze(properties=props)
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoResAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarm.get.return_value = mock_alarm
return res
def test_create(self):
rsrc = self.create_alarm()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('foo', rsrc.resource_id)
self.m.VerifyAll()
def test_suspend(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarm, 'update')
self.fc.alarm.update('foo', {'enabled': False})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_resume(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarm, 'update')
self.fc.alarm.update('foo', {'enabled': True})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_check(self):
res = self._prepare_check_resource()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_check_failure(self):
res = self._prepare_check_resource()
res.client().alarm.get.side_effect = Exception('Boom')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('Boom', res.status_reason)
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarm.create.return_value = FakeAodhAlarm
res.client().alarm.get.return_value = FakeAodhAlarm
scheduler.TaskRunner(res.create)()
self.assertEqual(FakeAodhAlarm, res.FnGetAtt('show'))
def test_gnocchi_alarm_live_state(self):
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['GnoResAlarm']
self.client = mock.Mock()
self.patchobject(gnocchi.AodhGnocchiResourcesAlarm, 'client',
return_value=self.client)
alarm_res = gnocchi.AodhGnocchiResourcesAlarm(
'alarm', self.rsrc_defn, self.stack)
alarm_res.create()
value = {
'description': 'Do stuff with gnocchi',
'alarm_actions': [],
'time_constraints': [],
'gnocchi_resources_threshold_rule': {
'resource_id': '5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a',
'metric': 'cpu_util',
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt',
'resource_type': 'instance'
}
}
self.client.alarm.get.return_value = value
expected_data = {
'description': 'Do stuff with gnocchi',
'alarm_actions': [],
'resource_id': '5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a',
'metric': 'cpu_util',
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt',
'resource_type': 'instance',
'insufficient_data_actions': None,
'enabled': None,
'ok_actions': None,
'repeat_actions': None,
'severity': None
}
reality = alarm_res.get_live_state(alarm_res.properties)
self.assertEqual(expected_data, reality)
class GnocchiAggregationByMetricsAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.patchobject(aodh.AodhClientPlugin,
'_create').return_value = self.fc
self.m.StubOutWithMock(self.fc.alarm, 'create')
self.fc.alarm.create(
{
'alarm_actions': [],
'description': u'Do stuff with gnocchi metrics',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'name': mox.IgnoreArg(),
'type': 'gnocchi_aggregation_by_metrics_threshold',
'repeat_actions': True,
'gnocchi_aggregation_by_metrics_threshold_rule': {
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metrics": ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"],
},
'time_constraints': [],
'severity': 'low'}
).AndReturn(FakeAodhAlarm)
self.tmpl = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(self.tmpl)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.AodhGnocchiAggregationByMetricsAlarm(
'GnoAggregationByMetricsAlarm',
resource_defns['GnoAggregationByMetricsAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarm, 'update')
self.fc.alarm.update(
'foo',
{
'alarm_actions': [],
'description': u'Do stuff with gnocchi metrics',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'repeat_actions': True,
'gnocchi_aggregation_by_metrics_threshold_rule': {
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
'metrics': ['d3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']
},
'time_constraints': [],
'severity': 'low'
}
)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
snippet = self.tmpl['resources']['GnoAggregationByMetricsAlarm']
props = snippet['properties'].copy()
props['metrics'] = ['d3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']
update_template = rsrc.t.freeze(properties=props)
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByMetricsAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarm.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarm.create.return_value = FakeAodhAlarm
res.client().alarm.get.return_value = FakeAodhAlarm
scheduler.TaskRunner(res.create)()
self.assertEqual(FakeAodhAlarm, res.FnGetAtt('show'))
def test_gnocchi_alarm_aggr_by_metrics_live_state(self):
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['GnoAggregationByMetricsAlarm']
self.client = mock.Mock()
self.patchobject(gnocchi.AodhGnocchiAggregationByMetricsAlarm,
'client', return_value=self.client)
alarm_res = gnocchi.AodhGnocchiAggregationByMetricsAlarm(
'alarm', self.rsrc_defn, self.stack)
alarm_res.create()
value = {
'description': 'Do stuff with gnocchi metrics',
'alarm_actions': [],
'time_constraints': [],
'gnocchi_aggregation_by_metrics_threshold_rule': {
'metrics': ['911fce07-e0d7-4210-8c8c-4a9d811fcabc',
'2543d435-fe93-4443-9351-fb0156930f94'],
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt'
}
}
self.client.alarm.get.return_value = value
expected_data = {
'description': 'Do stuff with gnocchi metrics',
'alarm_actions': [],
'metrics': ['911fce07-e0d7-4210-8c8c-4a9d811fcabc',
'2543d435-fe93-4443-9351-fb0156930f94'],
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt',
'insufficient_data_actions': None,
'enabled': None,
'ok_actions': None,
'repeat_actions': None,
'severity': None
}
reality = alarm_res.get_live_state(alarm_res.properties)
self.assertEqual(expected_data, reality)
class GnocchiAggregationByResourcesAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.patchobject(aodh.AodhClientPlugin,
'_create').return_value = self.fc
self.m.StubOutWithMock(self.fc.alarm, 'create')
self.fc.alarm.create(
{
'alarm_actions': [],
'description': 'Do stuff with gnocchi aggregation by resource',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'name': mox.IgnoreArg(),
'type': 'gnocchi_aggregation_by_resources_threshold',
'repeat_actions': True,
'gnocchi_aggregation_by_resources_threshold_rule': {
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metric": "cpu_util",
"resource_type": "instance",
"query": '{"=": {"server_group": "my_autoscaling_group"}}',
},
'time_constraints': [],
'severity': 'low'}
).AndReturn(FakeAodhAlarm)
self.tmpl = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(self.tmpl)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.AodhGnocchiAggregationByResourcesAlarm(
'GnoAggregationByResourcesAlarm',
resource_defns['GnoAggregationByResourcesAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarm, 'update')
self.fc.alarm.update(
'foo',
{
'alarm_actions': [],
'description': 'Do stuff with gnocchi aggregation by resource',
'enabled': True,
'insufficient_data_actions': [],
'ok_actions': [],
'repeat_actions': True,
'gnocchi_aggregation_by_resources_threshold_rule': {
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metric": "cpu_util",
"resource_type": "instance",
"query": '{"=": {"server_group": "my_new_group"}}',
},
'time_constraints': [],
'severity': 'low'
}
)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
snippet = self.tmpl['resources']['GnoAggregationByResourcesAlarm']
props = snippet['properties'].copy()
props['query'] = '{"=": {"server_group": "my_new_group"}}'
update_template = rsrc.t.freeze(properties=props)
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByResourcesAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarm.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarm.create.return_value = FakeAodhAlarm
res.client().alarm.get.return_value = FakeAodhAlarm
scheduler.TaskRunner(res.create)()
self.assertEqual(FakeAodhAlarm, res.FnGetAtt('show'))
def test_gnocchi_alarm_aggr_by_resources_live_state(self):
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['GnoAggregationByResourcesAlarm']
self.client = mock.Mock()
self.patchobject(gnocchi.AodhGnocchiAggregationByResourcesAlarm,
'client', return_value=self.client)
alarm_res = gnocchi.AodhGnocchiAggregationByResourcesAlarm(
'alarm', self.rsrc_defn, self.stack)
alarm_res.create()
value = {
'description': 'Do stuff with gnocchi aggregation by resource',
'alarm_actions': [],
'time_constraints': [],
'gnocchi_aggregation_by_resources_threshold_rule': {
'metric': 'cpu_util',
'resource_type': 'instance',
'query': "{'=': {'server_group': 'my_autoscaling_group'}}",
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt'
}
}
self.client.alarm.get.return_value = value
expected_data = {
'description': 'Do stuff with gnocchi aggregation by resource',
'alarm_actions': [],
'metric': 'cpu_util',
'resource_type': 'instance',
'query': "{'=': {'server_group': 'my_autoscaling_group'}}",
'evaluation_periods': 1,
'aggregation_method': 'mean',
'granularity': 60,
'threshold': 50,
'comparison_operator': 'gt',
'insufficient_data_actions': None,
'enabled': None,
'ok_actions': None,
'repeat_actions': None,
'severity': None
}
reality = alarm_res.get_live_state(alarm_res.properties)
self.assertEqual(expected_data, reality)
| 39.017857
| 79
| 0.585538
|
e8631ada3867bf61e8ad20aa120af6ab2af76242
| 659
|
py
|
Python
|
modules/common/age_filter.py
|
BurraAbhishek/VirtualElections_v2
|
db95f58d09ee5ed9755a3910aebcbfb48302b04e
|
[
"Apache-2.0"
] | 1
|
2022-01-30T19:55:47.000Z
|
2022-01-30T19:55:47.000Z
|
modules/common/age_filter.py
|
BurraAbhishek/VirtualElections_v2
|
db95f58d09ee5ed9755a3910aebcbfb48302b04e
|
[
"Apache-2.0"
] | null | null | null |
modules/common/age_filter.py
|
BurraAbhishek/VirtualElections_v2
|
db95f58d09ee5ed9755a3910aebcbfb48302b04e
|
[
"Apache-2.0"
] | null | null | null |
def user_age_filter(age: int, setting: dict) -> bool:
if not setting["boolRequired"]:
return True
elif setting["minAge"] <= 0 and setting["maxAge"] >= 100:
return True
else:
if setting["minAge"] <= 0:
if age <= setting["maxAge"]:
return True
else:
return False
elif setting["maxAge"] >= 100:
if age >= setting["minAge"]:
return True
else:
return False
else:
if setting["minAge"] <= age <= setting["maxAge"]:
return True
else:
return False
| 29.954545
| 61
| 0.46434
|
eca389721a0b959b621e2bb557f742f17f735794
| 3,764
|
py
|
Python
|
devlib/utils/serial_port.py
|
valschneider/devlib
|
cafc0a4bc0de956bc80a97bbf478aa79221e5257
|
[
"Apache-2.0"
] | null | null | null |
devlib/utils/serial_port.py
|
valschneider/devlib
|
cafc0a4bc0de956bc80a97bbf478aa79221e5257
|
[
"Apache-2.0"
] | null | null | null |
devlib/utils/serial_port.py
|
valschneider/devlib
|
cafc0a4bc0de956bc80a97bbf478aa79221e5257
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from contextlib import contextmanager
from logging import Logger
import serial
import pexpect
from distutils.version import StrictVersion as V
if V(pexpect.__version__) < V('4.0.0'):
import fdpexpect
else:
from pexpect import fdpexpect
# Adding pexpect exceptions into this module's namespace
from pexpect import EOF, TIMEOUT # NOQA pylint: disable=W0611
from devlib.exception import HostError
class SerialLogger(Logger):
write = Logger.debug
def flush(self):
pass
def pulse_dtr(conn, state=True, duration=0.1):
"""Set the DTR line of the specified serial connection to the specified state
for the specified duration (note: the initial state of the line is *not* checked."""
conn.setDTR(state)
time.sleep(duration)
conn.setDTR(not state)
def get_connection(timeout, init_dtr=None, logcls=SerialLogger,
*args, **kwargs):
if init_dtr is not None:
kwargs['dsrdtr'] = True
try:
conn = serial.Serial(*args, **kwargs)
except serial.SerialException as e:
raise HostError(e.message)
if init_dtr is not None:
conn.setDTR(init_dtr)
conn.nonblocking()
conn.flushOutput()
target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout)
target.logfile_read = logcls('read')
target.logfile_send = logcls('send')
# Monkey-patching sendline to introduce a short delay after
# chacters are sent to the serial. If two sendline s are issued
# one after another the second one might start putting characters
# into the serial device before the first one has finished, causing
# corruption. The delay prevents that.
tsln = target.sendline
def sendline(x):
tsln(x)
time.sleep(0.1)
target.sendline = sendline
return target, conn
def write_characters(conn, line, delay=0.05):
"""Write a single line out to serial charcter-by-character. This will ensure that nothing will
be dropped for longer lines."""
line = line.rstrip('\r\n')
for c in line:
conn.send(c)
time.sleep(delay)
conn.sendline('')
@contextmanager
def open_serial_connection(timeout, get_conn=False, init_dtr=None,
logcls=SerialLogger, *args, **kwargs):
"""
Opens a serial connection to a device.
:param timeout: timeout for the fdpexpect spawn object.
:param conn: ``bool`` that specfies whether the underlying connection
object should be yielded as well.
:param init_dtr: specifies the initial DTR state stat should be set.
All arguments are passed into the __init__ of serial.Serial. See
pyserial documentation for details:
http://pyserial.sourceforge.net/pyserial_api.html#serial.Serial
:returns: a pexpect spawn object connected to the device.
See: http://pexpect.sourceforge.net/pexpect.html
"""
target, conn = get_connection(timeout, init_dtr=init_dtr,
logcls=logcls, *args, **kwargs)
if get_conn:
yield target, conn
else:
yield target
target.close() # Closes the file descriptor used by the conn.
del conn
| 30.852459
| 98
| 0.691552
|
27424bf8c8a7b8fd48477c1dc460d36c51e03513
| 20,604
|
py
|
Python
|
tests/test_cloudformation/test_cloudformation_stack_crud.py
|
harveywi/moto
|
3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8
|
[
"Apache-2.0"
] | 1
|
2019-10-23T02:58:29.000Z
|
2019-10-23T02:58:29.000Z
|
tests/test_cloudformation/test_cloudformation_stack_crud.py
|
harveywi/moto
|
3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8
|
[
"Apache-2.0"
] | 2
|
2021-03-31T20:15:51.000Z
|
2021-12-13T20:50:52.000Z
|
tests/test_cloudformation/test_cloudformation_stack_crud.py
|
harveywi/moto
|
3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import os
import json
import boto
import boto.s3
import boto.s3.key
import boto.cloudformation
from boto.exception import BotoServerError
import sure # noqa
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated
from moto.cloudformation import cloudformation_backends
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
}
dummy_template2 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 2",
"Resources": {},
}
# template with resource which has no delete attribute defined
dummy_template3 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 3",
"Resources": {
"VPC": {
"Properties": {
"CidrBlock": "192.168.0.0/16",
},
"Type": "AWS::EC2::VPC"
}
},
}
dummy_template_json = json.dumps(dummy_template)
dummy_template_json2 = json.dumps(dummy_template2)
dummy_template_json3 = json.dumps(dummy_template3)
@mock_cloudformation_deprecated
def test_create_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('test_stack')
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
@mock_route53_deprecated
def test_create_stack_hosted_zone_by_id():
conn = boto.connect_cloudformation()
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Parameters": {
},
"Resources": {
"Bar": {
"Type" : "AWS::Route53::HostedZone",
"Properties" : {
"Name" : "foo.bar.baz",
}
},
},
}
dummy_template2 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 2",
"Parameters": {
"ZoneId": { "Type": "String" }
},
"Resources": {
"Foo": {
"Properties": {
"HostedZoneId": {"Ref": "ZoneId"},
"RecordSets": []
},
"Type": "AWS::Route53::RecordSetGroup"
}
},
}
conn.create_stack(
"test_stack",
template_body=json.dumps(dummy_template),
parameters={}.items()
)
r53_conn = boto.connect_route53()
zone_id = r53_conn.get_zones()[0].id
conn.create_stack(
"test_stack",
template_body=json.dumps(dummy_template2),
parameters={"ZoneId": zone_id}.items()
)
stack = conn.describe_stacks()[0]
assert stack.list_resources()
@mock_cloudformation_deprecated
def test_creating_stacks_across_regions():
west1_conn = boto.cloudformation.connect_to_region("us-west-1")
west1_conn.create_stack("test_stack", template_body=dummy_template_json)
west2_conn = boto.cloudformation.connect_to_region("us-west-2")
west2_conn.create_stack("test_stack", template_body=dummy_template_json)
list(west1_conn.describe_stacks()).should.have.length_of(1)
list(west2_conn.describe_stacks()).should.have.length_of(1)
@mock_cloudformation_deprecated
def test_create_stack_with_notification_arn():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack_with_notifications",
template_body=dummy_template_json,
notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue'
)
stack = conn.describe_stacks()[0]
[n.value for n in stack.notification_arns].should.contain(
'arn:aws:sns:us-east-1:123456789012:fake-queue')
@mock_cloudformation_deprecated
@mock_s3_deprecated
def test_create_stack_from_s3_url():
s3_conn = boto.s3.connect_to_region('us-west-1')
bucket = s3_conn.create_bucket("foobar")
key = boto.s3.key.Key(bucket)
key.key = "template-key"
key.set_contents_from_string(dummy_template_json)
key_url = key.generate_url(expires_in=0, query_auth=False)
conn = boto.cloudformation.connect_to_region('us-west-1')
conn.create_stack('new-stack', template_url=key_url)
stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('new-stack')
stack.get_template().should.equal(
{
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_describe_stack_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack.stack_name.should.equal('test_stack')
@mock_cloudformation_deprecated
def test_describe_stack_by_stack_id():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack_by_id = conn.describe_stacks(stack.stack_id)[0]
stack_by_id.stack_id.should.equal(stack.stack_id)
stack_by_id.stack_name.should.equal("test_stack")
@mock_cloudformation_deprecated
def test_describe_deleted_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack_id = stack.stack_id
conn.delete_stack(stack.stack_id)
stack_by_id = conn.describe_stacks(stack_id)[0]
stack_by_id.stack_id.should.equal(stack.stack_id)
stack_by_id.stack_name.should.equal("test_stack")
stack_by_id.stack_status.should.equal("DELETE_COMPLETE")
@mock_cloudformation_deprecated
def test_get_template_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
template = conn.get_template("test_stack")
template.should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_list_stacks():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.create_stack(
"test_stack2",
template_body=dummy_template_json,
)
stacks = conn.list_stacks()
stacks.should.have.length_of(2)
stacks[0].template_description.should.equal("Stack 1")
@mock_cloudformation_deprecated
def test_delete_stack_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.describe_stacks().should.have.length_of(1)
conn.delete_stack("test_stack")
conn.describe_stacks().should.have.length_of(0)
@mock_cloudformation_deprecated
def test_delete_stack_by_id():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.describe_stacks().should.have.length_of(1)
conn.delete_stack(stack_id)
conn.describe_stacks().should.have.length_of(0)
with assert_raises(BotoServerError):
conn.describe_stacks("test_stack")
conn.describe_stacks(stack_id).should.have.length_of(1)
@mock_cloudformation_deprecated
def test_delete_stack_with_resource_missing_delete_attr():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json3,
)
conn.describe_stacks().should.have.length_of(1)
conn.delete_stack("test_stack")
conn.describe_stacks().should.have.length_of(0)
@mock_cloudformation_deprecated
def test_bad_describe_stack():
conn = boto.connect_cloudformation()
with assert_raises(BotoServerError):
conn.describe_stacks("bad_stack")
@mock_cloudformation_deprecated()
def test_cloudformation_params():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
"Parameters": {
"APPNAME": {
"Default": "app-name",
"Description": "The name of the app",
"Type": "String"
}
}
}
dummy_template_json = json.dumps(dummy_template)
cfn = boto.connect_cloudformation()
cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[
('APPNAME', 'testing123')])
stack = cfn.describe_stacks('test_stack1')[0]
stack.parameters.should.have.length_of(1)
param = stack.parameters[0]
param.key.should.equal('APPNAME')
param.value.should.equal('testing123')
@mock_cloudformation_deprecated
def test_cloudformation_params_conditions_and_resources_are_distinct():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Conditions": {
"FooEnabled": {
"Fn::Equals": [
{
"Ref": "FooEnabled"
},
"true"
]
},
"FooDisabled": {
"Fn::Not": [
{
"Fn::Equals": [
{
"Ref": "FooEnabled"
},
"true"
]
}
]
}
},
"Parameters": {
"FooEnabled": {
"Type": "String",
"AllowedValues": [
"true",
"false"
]
}
},
"Resources": {
"Bar": {
"Properties": {
"CidrBlock": "192.168.0.0/16",
},
"Condition": "FooDisabled",
"Type": "AWS::EC2::VPC"
}
}
}
dummy_template_json = json.dumps(dummy_template)
cfn = boto.connect_cloudformation()
cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')])
stack = cfn.describe_stacks('test_stack1')[0]
resources = stack.list_resources()
assert not [resource for resource in resources if resource.logical_resource_id == 'Bar']
@mock_cloudformation_deprecated
def test_stack_tags():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "bar", "baz": "bleh"},
)
stack = conn.describe_stacks()[0]
dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"})
@mock_cloudformation_deprecated
def test_update_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.update_stack("test_stack", dummy_template_json2)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json2,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_update_stack_with_previous_template():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.update_stack("test_stack", use_previous_template=True)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_update_stack_with_parameters():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack",
"Resources": {
"VPC": {
"Properties": {
"CidrBlock": {"Ref": "Bar"}
},
"Type": "AWS::EC2::VPC"
}
},
"Parameters": {
"Bar": {
"Type": "String"
}
}
}
dummy_template_json = json.dumps(dummy_template)
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
parameters=[("Bar", "192.168.0.0/16")]
)
conn.update_stack(
"test_stack",
template_body=dummy_template_json,
parameters=[("Bar", "192.168.0.1/16")]
)
stack = conn.describe_stacks()[0]
assert stack.parameters[0].value == "192.168.0.1/16"
@mock_cloudformation_deprecated
def test_update_stack_replace_tags():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "bar"},
)
conn.update_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "baz"},
)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
# since there is one tag it doesn't come out as a list
dict(stack.tags).should.equal({"foo": "baz"})
@mock_cloudformation_deprecated
def test_update_stack_when_rolled_back():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack", template_body=dummy_template_json)
cloudformation_backends[conn.region.name].stacks[
stack_id].status = 'ROLLBACK_COMPLETE'
with assert_raises(BotoServerError) as err:
conn.update_stack("test_stack", dummy_template_json)
ex = err.exception
ex.body.should.match(
r'is in ROLLBACK_COMPLETE state and can not be updated')
ex.error_code.should.equal('ValidationError')
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_cloudformation_deprecated
def test_describe_stack_events_shows_create_update_and_delete():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack", template_body=dummy_template_json)
conn.update_stack(stack_id, template_body=dummy_template_json2)
conn.delete_stack(stack_id)
# assert begins and ends with stack events
events = conn.describe_stack_events(stack_id)
events[0].resource_type.should.equal("AWS::CloudFormation::Stack")
events[-1].resource_type.should.equal("AWS::CloudFormation::Stack")
# testing ordering of stack events without assuming resource events will not exist
# the AWS API returns events in reverse chronological order
stack_events_to_look_for = iter([
("DELETE_COMPLETE", None),
("DELETE_IN_PROGRESS", "User Initiated"),
("UPDATE_COMPLETE", None),
("UPDATE_IN_PROGRESS", "User Initiated"),
("CREATE_COMPLETE", None),
("CREATE_IN_PROGRESS", "User Initiated"),
])
try:
for event in events:
event.stack_id.should.equal(stack_id)
event.stack_name.should.equal("test_stack")
event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
if event.resource_type == "AWS::CloudFormation::Stack":
event.logical_resource_id.should.equal("test_stack")
event.physical_resource_id.should.equal(stack_id)
status_to_look_for, reason_to_look_for = next(
stack_events_to_look_for)
event.resource_status.should.equal(status_to_look_for)
if reason_to_look_for is not None:
event.resource_status_reason.should.equal(
reason_to_look_for)
except StopIteration:
assert False, "Too many stack events"
list(stack_events_to_look_for).should.be.empty
@mock_cloudformation_deprecated
def test_create_stack_lambda_and_dynamodb():
conn = boto.connect_cloudformation()
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack Lambda Test 1",
"Parameters": {
},
"Resources": {
"func1": {
"Type" : "AWS::Lambda::Function",
"Properties" : {
"Code": {
"S3Bucket": "bucket_123",
"S3Key": "key_123"
},
"FunctionName": "func1",
"Handler": "handler.handler",
"Role": "role1",
"Runtime": "python2.7",
"Description": "descr",
"MemorySize": 12345,
}
},
"func1version": {
"Type": "AWS::Lambda::Version",
"Properties": {
"FunctionName": {
"Ref": "func1"
}
}
},
"tab1": {
"Type" : "AWS::DynamoDB::Table",
"Properties" : {
"TableName": "tab1",
"KeySchema": [{
"AttributeName": "attr1",
"KeyType": "HASH"
}],
"AttributeDefinitions": [{
"AttributeName": "attr1",
"AttributeType": "string"
}],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
}
},
"func1mapping": {
"Type": "AWS::Lambda::EventSourceMapping",
"Properties": {
"FunctionName": {
"Ref": "func1"
},
"EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000",
"StartingPosition": "0",
"BatchSize": 100,
"Enabled": True
}
}
},
}
validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '')
try:
os.environ['VALIDATE_LAMBDA_S3'] = 'false'
conn.create_stack(
"test_stack_lambda_1",
template_body=json.dumps(dummy_template),
parameters={}.items()
)
finally:
os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before
stack = conn.describe_stacks()[0]
resources = stack.list_resources()
assert len(resources) == 4
@mock_cloudformation_deprecated
def test_create_stack_kinesis():
conn = boto.connect_cloudformation()
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack Kinesis Test 1",
"Parameters": {},
"Resources": {
"stream1": {
"Type" : "AWS::Kinesis::Stream",
"Properties" : {
"Name": "stream1",
"ShardCount": 2
}
}
}
}
conn.create_stack(
"test_stack_kinesis_1",
template_body=json.dumps(dummy_template),
parameters={}.items()
)
stack = conn.describe_stacks()[0]
resources = stack.list_resources()
assert len(resources) == 1
| 30.434269
| 107
| 0.592118
|
12a3ed4de4ebd1b11a9391998dea31bfa12159ec
| 3,412
|
py
|
Python
|
app/app/settings.py
|
bigmaho/recipe-app-api
|
835fcc457474d49544455407d0866bf0d29517cd
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
bigmaho/recipe-app-api
|
835fcc457474d49544455407d0866bf0d29517cd
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
bigmaho/recipe-app-api
|
835fcc457474d49544455407d0866bf0d29517cd
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j#g+f@s6+t3h%t8#*%v1j^x-gh=q)rqq55w#%ubw=07*adss7d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| 25.274074
| 91
| 0.684642
|
c4d4ce780be2fa5a2617874ddb608e41edf70c36
| 10,679
|
py
|
Python
|
tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71
|
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 26
|
2017-04-12T16:25:44.000Z
|
2018-10-30T10:10:15.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.{,parallel_}dynamic_stitch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicStitchTestBase(object):
def __init__(self, stitch_op):
self.stitch_op = stitch_op
def testScalar(self):
with self.test_session(use_gpu=True):
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
stitched_t = self.stitch_op(indices[::step], data)
stitched_val = stitched_t.eval()
self.assertAllEqual([40, 60][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
def testShapeInferenceForScalarWithNonConstantIndices(self):
with self.test_session(use_gpu=True):
indices = [
array_ops.placeholder(dtype=dtypes.int32),
constant_op.constant(1)
]
data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
stitched_t = self.stitch_op(indices[::step], data)
# Dimension 0 is max(flatten(indices))+1, but the first indices input is
# not a constant tensor, so we can only infer it as a vector of unknown
# length.
self.assertEqual([None], stitched_t.get_shape().as_list())
def testSimpleOneDimensional(self):
with self.test_session(use_gpu=True):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
constant_op.constant([0, 40, 70]),
constant_op.constant([10, 60, 20, 30, 50])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
def testOneListOneDimensional(self):
with self.test_session(use_gpu=True):
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.test_session(use_gpu=True):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6]),
constant_op.constant([2, 3, 5])
]
data = [
constant_op.constant([[0, 1], [40, 41], [70, 71]]),
constant_op.constant([[10, 11], [60, 61]]),
constant_op.constant([[20, 21], [30, 31], [50, 51]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
[50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8, 2], stitched_t.get_shape().as_list())
def testHigherRank(self):
with self.test_session(use_gpu=True) as sess:
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61., 62.]),
constant_op.constant([[41., 42.], [11., 12.]]),
constant_op.constant([[[51., 52.], [21., 22.]],
[[1., 2.], [31., 32.]]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
correct = 10. * np.arange(7)[:, None] + [1., 2.]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7. * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7. * datum.eval(), grad)
def testErrorIndicesMultiDimensional(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([[1, 6, 2, 3, 5]])
]
data = [
constant_op.constant([[0, 40, 70]]),
constant_op.constant([10, 60, 20, 30, 50])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
def testErrorDataNumDimsMismatch(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
constant_op.constant([0, 40, 70]),
constant_op.constant([[10, 60, 20, 30, 50]])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
def testErrorDataDimSizeMismatch(self):
indices = [
constant_op.constant([0, 4, 5]),
constant_op.constant([1, 6, 2, 3])
]
data = [
constant_op.constant([[0], [40], [70]]),
constant_op.constant([[10, 11], [60, 61], [20, 21], [30, 31]])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
def testErrorDataAndIndicesSizeMismatch(self):
indices = [
constant_op.constant([0, 4, 7]),
constant_op.constant([1, 6, 2, 3, 5])
]
data = [
constant_op.constant([0, 40, 70]),
constant_op.constant([10, 60, 20, 30])
]
with self.assertRaises(ValueError):
self.stitch_op(indices, data)
class DynamicStitchTest(DynamicStitchTestBase, test.TestCase):
def __init__(self, *test_case_args):
test.TestCase.__init__(self, *test_case_args)
DynamicStitchTestBase.__init__(self, data_flow_ops.dynamic_stitch)
class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
def __init__(self, *test_case_args):
test.TestCase.__init__(self, *test_case_args)
DynamicStitchTestBase.__init__(self, data_flow_ops.parallel_dynamic_stitch)
def testScalar(self):
with self.test_session(use_gpu=True):
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
def testHigherRank(self):
with self.test_session(use_gpu=True) as sess:
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7.0 * datum.eval(), grad)
# GPU version unit tests
def testScalarGPU(self):
with self.test_session():
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
def testHigherRankGPU(self):
with self.test_session() as sess:
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7.0 * datum.eval(), grad)
if __name__ == "__main__":
test.main()
| 39.117216
| 80
| 0.62459
|
cb8617c6015c3c5b01f17e7a53d5e728843f42f0
| 617
|
py
|
Python
|
exercicio103.py
|
juniooor/Exercicios-python
|
aed87da4f93d0e6083b1a8c3af4081a028f145de
|
[
"MIT"
] | null | null | null |
exercicio103.py
|
juniooor/Exercicios-python
|
aed87da4f93d0e6083b1a8c3af4081a028f145de
|
[
"MIT"
] | null | null | null |
exercicio103.py
|
juniooor/Exercicios-python
|
aed87da4f93d0e6083b1a8c3af4081a028f145de
|
[
"MIT"
] | null | null | null |
#Faça um programa com uma função chamada somaImposto. A função possui dois parâmetros formais: taxaImposto, que é a quantia de imposto sobre vendas expressa em porcentagem e custo, que é o custo de um item antes do imposto. A função “altera” o valor de custo para incluir o imposto sobre vendas.
print('-----DESAFIO 103-----')
def somaimposto(taxa,prod):
taxa=taxa/100
juros=prod*taxa
soma=juros+prod
return soma
produto=float(input('Digite o valor do produto: '))
taxa=float(input('Digite o valor do imposto [%]: '))
result=somaimposto(taxa,produto)
print(f'O produto com imposto fica R${result}')
| 44.071429
| 295
| 0.73906
|
0ed8bd55456c710ea2d7e025552a71e1556b3d37
| 1,225
|
py
|
Python
|
server/view/auth/signUp.py
|
DSMFOREST/Forest.NewBackend
|
7bfb8e18660af00ef45a6453d120b72446d2eff0
|
[
"MIT"
] | null | null | null |
server/view/auth/signUp.py
|
DSMFOREST/Forest.NewBackend
|
7bfb8e18660af00ef45a6453d120b72446d2eff0
|
[
"MIT"
] | null | null | null |
server/view/auth/signUp.py
|
DSMFOREST/Forest.NewBackend
|
7bfb8e18660af00ef45a6453d120b72446d2eff0
|
[
"MIT"
] | null | null | null |
import random
from flask import request, Response, url_for, render_template
from flasgger import swag_from
from flask_restful import Resource
from view import send_email
from view.auth.confirm import Confirm
from docs.auth import SIGN_UP_POST
from model.user import UserModel
class SignUp(Resource):
@swag_from(SIGN_UP_POST)
def post(self):
payload = request.json
ID = payload['userId'] # 아이디
EM = payload['email']
PW = payload['password']
if UserModel.objects(userId=ID).first():
return {"status": "The ID already exists."}, 409
else:
UserModel(
userId=ID,
password=PW,
name=payload['name'],
student_number=payload['student_number'],
email=EM,
token=int(random.randint(100000, 999999)),
confirmed=False,
admin=False
).save()
code = UserModel.objects(userId=ID).first().token
send_email(
to=EM,
link="http://aws.jaehoon.kim:5002/confirm/" + str(ID) + "." + str(code)
)
return {"status": "successfully processed!"}, 201
| 27.840909
| 87
| 0.570612
|
9403b60c56f4651c882f09e50818ba3b281951e8
| 83
|
py
|
Python
|
cicl/apps.py
|
chitzuchang/CICL
|
561e647e8651a55b64f861036f1c031e24b58f1c
|
[
"MIT"
] | 1
|
2021-07-07T20:15:37.000Z
|
2021-07-07T20:15:37.000Z
|
cicl/apps.py
|
chitzuchang/CICL
|
561e647e8651a55b64f861036f1c031e24b58f1c
|
[
"MIT"
] | null | null | null |
cicl/apps.py
|
chitzuchang/CICL
|
561e647e8651a55b64f861036f1c031e24b58f1c
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CICLConfig(AppConfig):
name = "cicl"
| 13.833333
| 33
| 0.73494
|
64e683d132bf109720e6c710637d443d111fe2c9
| 3,232
|
bzl
|
Python
|
python/pip.bzl
|
Cecilwang/rules_python
|
9e027001f7821aafa082a8c06e7c672342c429b9
|
[
"Apache-2.0"
] | null | null | null |
python/pip.bzl
|
Cecilwang/rules_python
|
9e027001f7821aafa082a8c06e7c672342c429b9
|
[
"Apache-2.0"
] | null | null | null |
python/pip.bzl
|
Cecilwang/rules_python
|
9e027001f7821aafa082a8c06e7c672342c429b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import pip requirements into Bazel."""
def _pip_import_impl(repository_ctx):
"""Core implementation of pip_import."""
# Add an empty top-level BUILD file.
# This is because Bazel requires BUILD files along all paths accessed
# via //this/sort/of:path and we wouldn't be able to load our generated
# requirements.bzl without it.
repository_ctx.file("BUILD", "")
# To see the output, pass: quiet=False
result = repository_ctx.execute([
repository_ctx.attr.python_interpreter,
repository_ctx.path(repository_ctx.attr._script),
"--python_interpreter",
repository_ctx.attr.python_interpreter,
"--name",
repository_ctx.attr.name,
"--input",
repository_ctx.path(repository_ctx.attr.requirements),
"--output",
repository_ctx.path("requirements.bzl"),
"--directory",
repository_ctx.path(""),
])
if result.return_code:
fail("pip_import failed: %s (%s)" % (result.stdout, result.stderr))
pip_import = repository_rule(
attrs = {
"python_interpreter": attr.string(default="python"),
"requirements": attr.label(
mandatory = True,
allow_single_file = True,
),
"_script": attr.label(
executable = True,
default = Label("//tools:piptool.par"),
cfg = "host",
),
},
implementation = _pip_import_impl,
)
"""A rule for importing <code>requirements.txt</code> dependencies into Bazel.
This rule imports a <code>requirements.txt</code> file and generates a new
<code>requirements.bzl</code> file. This is used via the <code>WORKSPACE</code>
pattern:
<pre><code>pip_import(
name = "foo",
requirements = ":requirements.txt",
)
load("@foo//:requirements.bzl", "pip_install")
pip_install()
</code></pre>
You can then reference imported dependencies from your <code>BUILD</code>
file with:
<pre><code>load("@foo//:requirements.bzl", "requirement")
py_library(
name = "bar",
...
deps = [
"//my/other:dep",
requirement("futures"),
requirement("mock"),
],
)
</code></pre>
Or alternatively:
<pre><code>load("@foo//:requirements.bzl", "all_requirements")
py_binary(
name = "baz",
...
deps = [
":foo",
] + all_requirements,
)
</code></pre>
Args:
requirements: The label of a requirements.txt file.
"""
def pip_repositories():
"""Pull in dependencies needed for pulling in pip dependencies.
A placeholder method that will eventually pull in any dependencies
needed to install pip dependencies.
"""
pass
| 29.651376
| 80
| 0.6612
|
31c103ee14a5d9be16e6ad38842a4987cb8868d5
| 602
|
py
|
Python
|
notification/notifier/slack.py
|
gueux/openduty
|
442a9a358f06408f27149c5cd311e51859ed7bc0
|
[
"MIT"
] | 595
|
2015-01-04T15:34:04.000Z
|
2022-01-21T14:13:32.000Z
|
notification/notifier/slack.py
|
gueux/openduty
|
442a9a358f06408f27149c5cd311e51859ed7bc0
|
[
"MIT"
] | 65
|
2015-01-02T01:57:37.000Z
|
2019-01-28T12:18:12.000Z
|
notification/notifier/slack.py
|
gueux/openduty
|
442a9a358f06408f27149c5cd311e51859ed7bc0
|
[
"MIT"
] | 214
|
2015-01-03T20:48:55.000Z
|
2022-03-22T09:18:12.000Z
|
from slacker import Slacker
class SlackNotifier:
def __init__(self, config):
self.__config = config
def notify(self, notification):
slack = Slacker(self.__config['apikey'])
response = slack.chat.post_message(notification.user_to_notify.profile.slack_room_name, notification.message,
username="Openduty", icon_url="https://slack.global.ssl.fastly.net/1937/img/services/pagerduty_48.png")
if not response.error:
print "Slack message sent"
else:
print "Failed to send Slack message"
| 37.625
| 146
| 0.641196
|
91b7fc25bf5b20af3458adc23d73def8fc869db5
| 7,124
|
py
|
Python
|
philbinss/flipflops.py
|
martinohanlon/PhilbinSS
|
994284fdca7c5ee0eba795ce045415e5dc6d7a00
|
[
"MIT"
] | 4
|
2017-05-27T21:39:07.000Z
|
2020-12-20T13:21:34.000Z
|
philbinss/flipflops.py
|
martinohanlon/PhilbinSS
|
994284fdca7c5ee0eba795ce045415e5dc6d7a00
|
[
"MIT"
] | 5
|
2017-05-24T10:46:45.000Z
|
2017-05-24T18:12:08.000Z
|
philbinss/flipflops.py
|
martinohanlon/PhilbinSS
|
994284fdca7c5ee0eba795ce045415e5dc6d7a00
|
[
"MIT"
] | null | null | null |
from interfaces import Interface
from logicgates import Nor, And, Not, Or, Nand
from components import Split
from primitives import Cathode
from mixins import InputSetMixin, InputResetMixin, InputDMixin, InputJMixin, InputKMixin, InputClockMixin, OutputQ_Mixin, OutputQMixin
from mixins import InputAMixin, InputBMixin, InputCMixin, OutputMixin
class SRFlipFlop(Interface, InputSetMixin, InputResetMixin, OutputQ_Mixin, OutputQMixin):
"""
The implementation of a SR (set / reset) flip flop
"""
def __init__(self):
inputs = {}
outputs = {}
n1 = Nor()
n2 = Nor()
inputs["set"] = n1.input_a
inputs["reset"] = n2.input_b
output_q_ = Cathode()
output_q = Cathode()
n1_split = Split()
n2_split = Split()
n1.output.connect(n1_split.input)
n1_split.connect(n2.input_a)
n1_split.connect(output_q_)
n2.output.connect(n2_split.input)
n2_split.connect(n1.input_b)
n2_split.connect(output_q)
outputs["output_q_"] = output_q_
outputs["output_q"] = output_q
super(SRFlipFlop, self).__init__(inputs, outputs)
def __str__(self):
return "SRFlipFlop: " + super(SRFlipFlop, self).__str__()
class JKFlipFlop(Interface, InputJMixin, InputKMixin, InputClockMixin, OutputQ_Mixin, OutputQMixin):
"""
The implementation of a JK flip flop
"""
def __init__(self):
inputs = {}
outputs = {}
aj1 = And()
aj2 = And()
ak1 = And()
ak2 = And()
sr = SRFlipFlop()
clk_split = Split()
q_split = Split()
qsplit = Split()
#connect up the inputs
inputs["input_j"] = aj1.input_a
inputs["clock"] = clk_split.input
clk_split.connect(aj1.input_b)
clk_split.connect(ak1.input_a)
inputs["input_k"] = ak1.input_b
#connect the 2nd AND gates to the SR flip flop
aj1.output.connect(aj2.input_b)
ak1.output.connect(ak2.input_a)
aj2.output.connect(sr.set)
ak2.output.connect(sr.reset)
#connect up the sr outputs
output_q_ = Cathode()
output_q = Cathode()
sr.output_q_.connect(q_split.input)
q_split.connect(aj2.input_a)
q_split.connect(output_q_)
sr.output_q.connect(qsplit.input)
qsplit.connect(ak2.input_b)
qsplit.connect(output_q)
outputs["output_q_"] = output_q_
outputs["output_q"] = output_q
super(JKFlipFlop, self).__init__(inputs, outputs)
def __str__(self):
return "JKFlipFlop: " + super(JKFlipFlop, self).__str__()
class ThreeInputNand(Interface, InputAMixin, InputBMixin, InputCMixin, OutputMixin):
"""
The implementation of a Nand gate, it accepts a three inputs and has a single output
"""
def __init__(self):
a1 = And()
a2 = And()
n = Not()
inputs = {}
inputs["input_a"] = a1.input_a
inputs["input_b"] = a1.input_b
inputs["input_c"] = a2.input_a
a1.output.connect(a2.input_b)
a2.output.connect(n.input)
outputs = {}
outputs["output"] = n.output
super(ThreeInputNand, self).__init__(inputs, outputs)
def __str__(self):
return "ThreeInputNand: " + super(ThreeInputNand, self).__str__()
class MasterSlaveJKFlipFlop(Interface, InputJMixin, InputKMixin, InputClockMixin, OutputQ_Mixin, OutputQMixin):
"""
The implementation of a JK flip flop
"""
def __init__(self):
inputs = {}
outputs = {}
n1 = ThreeInputNand()
n2 = ThreeInputNand()
n3 = Nand()
n4 = Nand()
n5 = Nand()
n6 = Nand()
n7 = Nand()
n8 = Nand()
n = Not()
clk_split = Split()
n3_split = Split()
n4_split = Split()
n_split = Split()
n7_split = Split()
n8_split = Split()
output_q_ = Cathode()
output_q = Cathode()
self.components = {}
self.components["clk_split"] = clk_split
self.components["n3_split"] = n3_split
self.components["n4_split"] = n4_split
self.components["n_split"] = n_split
# inputs
inputs["input_j"] = n1.input_b
inputs["clock"] = clk_split.input
inputs["input_k"] = n2.input_b
# clock split
clk_split.connect(n1.input_c)
clk_split.connect(n2.input_a)
clk_split.connect(n.input)
# nand 1
n1.output.connect(n3.input_a)
# nand 2
n2.output.connect(n4.input_b)
# nand 3
n3.output.connect(n3_split.input)
# nand 4
n4.output.connect(n4_split.input)
# not
n.output.connect(n_split.input)
# nand 3 split
n3_split.connect(n4.input_a)
n3_split.connect(n5.input_a)
# nand 4 split
n4_split.connect(n3.input_b)
n4_split.connect(n6.input_b)
# not split
n_split.connect(n5.input_b)
n_split.connect(n6.input_a)
# nand 5
n5.output.connect(n7.input_a)
# nand 6
n6.output.connect(n8.input_b)
# nand 7
n7.output.connect(n7_split.input)
# nand 8
n8.output.connect(n8_split.input)
# nand 7 split
n7_split.connect(n8.input_a)
n7_split.connect(output_q)
n7_split.connect(n2.input_c)
# nand 8 split
n8_split.connect(n7.input_b)
n8_split.connect(output_q_)
n8_split.connect(n1.input_a)
outputs["output_q_"] = output_q_
outputs["output_q"] = output_q
super(MasterSlaveJKFlipFlop, self).__init__(inputs, outputs)
def __str__(self):
return "MasterSlaveJKFlipFlop: " + super(MasterSlaveJKFlipFlop, self).__str__()
class DFlipFlop(Interface, InputDMixin, InputClockMixin, OutputQ_Mixin, OutputQMixin):
"""
The implementation of a D flip flop
"""
def __init__(self):
inputs = {}
outputs = {}
n = Not()
a1 = And()
a2 = And()
sr = SRFlipFlop()
clk_split = Split()
d_split = Split()
#connect up the inputs
inputs["input_d"] = d_split.input
d_split.connect(n.input)
d_split.connect(a2.input_b)
n.output.connect(a1.input_a)
inputs["clock"] = clk_split.input
clk_split.connect(a1.input_b)
clk_split.connect(a2.input_a)
a1.output.connect(sr.set)
a2.output.connect(sr.reset)
outputs["output_q_"] = sr.output_q_
outputs["output_q"] = sr.output_q
super(DFlipFlop, self).__init__(inputs, outputs)
def __str__(self):
return "DFlipFlop: " + super(DFlipFlop, self).__str__()
| 27.505792
| 135
| 0.569905
|
d27ed7c0c049247ff4b361a42b854e6d8097f890
| 3,481
|
py
|
Python
|
WebUtils/Tests/TestHTMLTag.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | 18
|
2016-08-01T20:15:59.000Z
|
2019-12-24T16:00:03.000Z
|
WebUtils/Tests/TestHTMLTag.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-13T05:48:45.000Z
|
2020-01-09T18:29:12.000Z
|
WebUtils/Tests/TestHTMLTag.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-16T14:32:29.000Z
|
2020-01-03T18:52:16.000Z
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath('../..'))
from MiscUtils import StringIO
from WebUtils.HTMLTag import HTMLReader
class HTMLTagTest(unittest.TestCase):
def setUp(self):
self._html = """\
<html>
<head>
<title>Example</title>
</head>
<body lang="en">
<p>What's up, <i>doc</i>?</p>
<hr>
<table id="dataTable">
<tr> <th> x </th> <th> y </th> </tr>
<tr><td class="datum">0</td><td class="datum">0</td></tr>
</table>
</body>
</html>"""
def checkBasics(self):
reader = HTMLReader()
tag = reader.readString('<html> </html>')
self.assertEqual(tag.name(), 'html')
self.assertEqual(reader.rootTag(), tag)
self.assertTrue(reader.filename() is None)
out = StringIO()
tag.pprint(out)
self.assertEqual(out.getvalue(), '<html>\n</html>\n')
def checkReuseReader(self):
reader = HTMLReader()
reader.readString('<html> </html>')
tag = reader.readString('<html> <body> </body> </html>')
self.assertFalse(reader.rootTag() is None)
self.assertEqual(reader.rootTag(), tag)
tag = reader.readString('<html> </html>', retainRootTag=0)
self.assertFalse(tag is None)
self.assertTrue(reader.rootTag() is None)
def checkAccess(self):
html = HTMLReader().readString(self._html)
# Name
self.assertEqual(html.name(), 'html')
# Attrs
self.assertEqual(html.numAttrs(), 0)
self.assertFalse(html.hasAttr('foo'))
self.assertRaises(KeyError, html.attr, 'foo')
self.assertTrue(html.attr('foo', None) is None)
# Children and subtags, when both are the same.
for numFoos, fooAt, foos in [
[html.numChildren, html.childAt, html.children],
[html.numSubtags, html.subtagAt, html.subtags]]:
self.assertEqual(numFoos(), 2)
self.assertEqual(len(foos()), 2)
self.assertEqual(fooAt(0).name(), 'head')
self.assertEqual(fooAt(1).name(), 'body')
# Children and subtags when they're different
body = html.subtagAt(1)
p = body.subtagAt(0)
self.assertEqual(p.name(), 'p')
self.assertEqual(p.numChildren(), 3)
self.assertEqual(p.numSubtags(), 1)
def checkMatchingAttr(self):
html = HTMLReader().readString(self._html)
self.assertEqual(
html.tagWithMatchingAttr('lang', 'en').name(), 'body')
self.assertEqual(
html.tagWithMatchingAttr('id', 'dataTable').name(), 'table')
self.assertEqual(html.tagWithId('dataTable').name(), 'table')
def checkInvalidHTML(self):
from WebUtils.HTMLTag import (
HTMLTagUnbalancedError, HTMLTagIncompleteError)
reader = HTMLReader()
html = '<html> <body> <table> </body> </html>'
self.assertRaises(HTMLTagUnbalancedError, reader.readString, html)
html = '<html> <body>'
self.assertRaises(HTMLTagIncompleteError, reader.readString, html)
def tearDown(self):
del self._html
def makeTestSuite():
cases = ['Basics', 'ReuseReader', 'Access', 'MatchingAttr', 'InvalidHTML']
tests = [HTMLTagTest('check'+case) for case in cases]
return unittest.TestSuite(tests)
if __name__ == '__main__':
runner = unittest.TextTestRunner(stream=sys.stdout)
unittest.main(defaultTest='makeTestSuite', testRunner=runner)
| 31.36036
| 78
| 0.608733
|
a4adbf1e52f40e1d2dcc0139e8a92868967a1ad3
| 4,774
|
py
|
Python
|
skka/settings.py
|
McCarthyCode/Susan-Kill-Kegan-and-Associates
|
4b3416aa9d94d409232ecb970d75d36cfd8928f9
|
[
"MIT"
] | null | null | null |
skka/settings.py
|
McCarthyCode/Susan-Kill-Kegan-and-Associates
|
4b3416aa9d94d409232ecb970d75d36cfd8928f9
|
[
"MIT"
] | 1
|
2020-06-09T02:06:39.000Z
|
2020-06-09T02:06:39.000Z
|
skka/settings.py
|
McCarthyCode/Susan-Kill-Kegan-and-Associates
|
4b3416aa9d94d409232ecb970d75d36cfd8928f9
|
[
"MIT"
] | null | null | null |
"""
Django settings for skka project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import pytz
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Retrieve production stage environment variable
class MissingEnvironmentVariable(Exception):
pass
class InvalidEnvironmentVariable(Exception):
pass
try:
STAGE = os.environ['STAGE']
except KeyError:
raise MissingEnvironmentVariable(
'Environment variable STAGE is not defined.')
# SECURITY WARNING: don't run with debug turned on in production!
if STAGE == 'development' or STAGE == 'staging':
DEBUG = True
elif STAGE == 'production':
DEBUG = False
else:
raise InvalidEnvironmentVariable(
'The value of environment variable STAGE is not valid.')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY_FILE = '%s/auth/secret.txt' % BASE_DIR
with open(SECRET_KEY_FILE, 'r', encoding='utf8') as f:
content = f.readline()
SECRET_KEY = content[:-1]
if STAGE == 'development':
ALLOWED_HOSTS = [
'localhost',
]
elif STAGE == 'staging':
ALLOWED_HOSTS = [
'staging.skkeganlandscapes.com',
]
elif STAGE == 'production':
ALLOWED_HOSTS = [
'skkeganlandscapes.com',
'www.skkeganlandscapes.com',
]
# Application definition
INSTALLED_APPS = [
'home',
'gallery',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'skka.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'skka.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if STAGE == 'development':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
PGPASSWORD_FILE = '%s/auth/.pgpass' % BASE_DIR
with open(PGPASSWORD_FILE, 'r', encoding='utf8') as f:
content = f.readline()
PGPASSWORD = content[14:-1]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'skka',
'USER': 'skka',
'PASSWORD': PGPASSWORD,
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
TZ = pytz.timezone(TIME_ZONE)
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Additional variables
NAME = 'Susan Kill Kegan & Associates'
| 25.529412
| 83
| 0.664851
|
c6fc1b7dc38c271496ee13679cd5c310a441fc61
| 16,745
|
py
|
Python
|
test/functional/rpc_decodescript.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
test/functional/rpc_decodescript.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
test/functional/rpc_decodescript.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import DiviTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(DiviTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| 71.255319
| 761
| 0.760287
|
0dceb2da1992b75d3ed0c0ea2cd7c49fae19753f
| 2,599
|
py
|
Python
|
src/graia/amnesia/builtins/uvicorn.py
|
GraiaProject/Amnesia
|
d48d3084f776f788767939d73774146086358887
|
[
"MIT"
] | 2
|
2022-03-13T09:12:44.000Z
|
2022-03-28T10:53:06.000Z
|
src/graia/amnesia/builtins/uvicorn.py
|
GraiaProject/Amnesia
|
d48d3084f776f788767939d73774146086358887
|
[
"MIT"
] | null | null | null |
src/graia/amnesia/builtins/uvicorn.py
|
GraiaProject/Amnesia
|
d48d3084f776f788767939d73774146086358887
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from launart.manager import Launart
from launart.service import Service
from launart.utilles import wait_fut
from loguru import logger
from uvicorn import Config, Server
from graia.amnesia.builtins.common import ASGIHandlerProvider
class LoguruHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None:
try:
level = logger.level(record.levelname).name
except ValueError:
level = str(record.levelno)
frame, depth = logging.currentframe(), 2
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level,
record.getMessage(),
)
class WithoutSigHandlerServer(Server):
def install_signal_handlers(self) -> None:
return
class UvicornService(Service):
supported_interface_types = set()
supported_description_types = set()
id = "http.asgi_runner"
server: Server
host: str
port: int
def __init__(self, host: str = "127.0.0.1", port: int = 8000):
self.host = host
self.port = port
super().__init__()
def get_interface(self, interface_type):
pass
@property
def required(self):
return {"http.universal_server"}
@property
def stages(self):
return {"preparing", "cleanup"}
async def launch(self, mgr: Launart):
async with self.stage("preparing"):
asgi_handler = mgr.get_interface(ASGIHandlerProvider).get_asgi_handler()
self.server = WithoutSigHandlerServer(Config(asgi_handler, host=self.host, port=self.port))
# TODO: 使用户拥有更多的对 Config 的配置能力.
PATCHES = "uvicorn.error", "uvicorn.asgi", "uvicorn.access", ""
level = logging.getLevelName(20) # default level for uvicorn
logging.basicConfig(handlers=[LoguruHandler()], level=level)
for name in PATCHES:
target = logging.getLogger(name)
target.handlers = [LoguruHandler(level=level)]
target.propagate = False
serve_task = asyncio.create_task(self.server.serve())
async with self.stage("cleanup"):
logger.warning("try to shutdown uvicorn server...")
self.server.should_exit = True
await wait_fut([serve_task, asyncio.sleep(10)], return_when=asyncio.FIRST_COMPLETED)
if not serve_task.done():
logger.warning("timeout, force exit uvicorn server...")
| 32.898734
| 103
| 0.640246
|
92c597bc083c0998370be284591335cb2f8261cf
| 1,973
|
py
|
Python
|
SEM-internal-cell-texture/analysis_scripts/3_feature_collection.py
|
ocarmo/EMP1-trafficking_PTP7-analysis
|
54138d63ed6bc60033d3e2412d373530f888ed86
|
[
"MIT"
] | null | null | null |
SEM-internal-cell-texture/analysis_scripts/3_feature_collection.py
|
ocarmo/EMP1-trafficking_PTP7-analysis
|
54138d63ed6bc60033d3e2412d373530f888ed86
|
[
"MIT"
] | null | null | null |
SEM-internal-cell-texture/analysis_scripts/3_feature_collection.py
|
ocarmo/EMP1-trafficking_PTP7-analysis
|
54138d63ed6bc60033d3e2412d373530f888ed86
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
import skimage.io
from skimage import measure
import functools
from loguru import logger
logger.info('Import OK')
image_folder = f'SEM-internal-cell-texture/python_results/SEM-internal-cell-texture_Analyzed/initial_cleanup_zoom/'
mask_folder = f'SEM-internal-cell-texture/python_results/SEM-internal-cell-texture_Analyzed/napari_masking/'
output_folder = f'SEM-internal-cell-texture/python_results/SEM-internal-cell-texture_Analyzed/feature_collection/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def feature_extractor(mask, properties=False):
if not properties:
properties = ['area', 'coords', 'centroid', 'convex_area', 'eccentricity', 'euler_number', 'label', 'local_centroid', 'major_axis_length', 'minor_axis_length', 'orientation', 'perimeter', 'solidity']
return pd.DataFrame(skimage.measure.regionprops_table(mask, properties=properties))
# ----------------Initialise file lists----------------
# read in masks
filtered_masks = {masks.replace('_mask.npy', ''): np.load(
f'{mask_folder}{masks}') for masks in os.listdir(f'{mask_folder}') if '.npy' in masks}
# ----------------collect feature information----------------
feature_information = []
for image_name, stack in filtered_masks.items():
image_name
logger.info(f'Processing {image_name}')
mask = (stack[1, :, :]).astype(int)
feature_properties = feature_extractor(mask)
feature_properties['ROI_type'] = 'knob'
# properties = pd.concat([feature_properties])
feature_properties['image_name'] = image_name
feature_information.append(feature_properties)
feature_information = pd.concat(feature_information)
logger.info('Completed feature collection')
feature_information.drop('coords', axis=1, inplace=True)
# ----------------save to csv----------------
feature_information.to_csv(f'{output_folder}feature_summary.csv')
| 38.686275
| 207
| 0.732387
|
67334cb6867d53406d0246c0bdfa59bacd9fac78
| 5,409
|
py
|
Python
|
src/server/fitbit.py
|
gyevnarb/IntelligentSleepAssistant
|
a167066df71d0f352e474fcd806857525a32e4f0
|
[
"MIT"
] | 1
|
2018-03-11T12:22:19.000Z
|
2018-03-11T12:22:19.000Z
|
src/server/fitbit.py
|
gyevnarb/IntelligentSleepAssistant
|
a167066df71d0f352e474fcd806857525a32e4f0
|
[
"MIT"
] | null | null | null |
src/server/fitbit.py
|
gyevnarb/IntelligentSleepAssistant
|
a167066df71d0f352e474fcd806857525a32e4f0
|
[
"MIT"
] | null | null | null |
"""
A Python library for accessing the FitBit API.
This library provides a wrapper to the FitBit API and does not provide storage of tokens or caching if that is required.
Most of the code has been adapted from: https://groups.google.com/group/fitbit-api/browse_thread/thread/0a45d0ebed3ebccb
5/22/2012 - JCF - Updated to work with python-oauth2 https://github.com/dgouldin/python-oauth2
10/22/2015 - JG - Removed use of oauth2 library (singing is not necessary anymore),
updated to use /oauth2/ authentication infrastructure to get access to more stats.
"""
import os, base64, requests, urllib
class Fitbit():
# All information must be as on the https://dev.fitbit.com/apps page.
CLIENT_ID = '22CNND'
CLIENT_SECRET = 'd50713c7425870e331710a35954fb293'
REDIRECT_URI = 'http://127.0.0.1:5000'
# Decide which information the FitBit.py should have access to.
# Options: 'activity', 'heartrate', 'location', 'nutrition',
# 'profile', 'settings', 'sleep', 'social', 'weight'
API_SCOPES = ('activity', 'heartrate', 'location', 'nutrition', 'profile', 'settings', 'sleep', 'social', 'weight')
# These settings should probably not be changed.
API_SERVER = 'api.fitbit.com'
WWW_SERVER = 'www.fitbit.com'
AUTHORIZE_URL = 'https://%s/oauth2/authorize' % WWW_SERVER
TOKEN_URL = 'https://%s/oauth2/token' % API_SERVER
def GetAuthorizationUri(self):
# Parameters for authorization, make sure to select
params = {
'client_id': self.CLIENT_ID,
'response_type': 'code',
'scope': ' '.join(self.API_SCOPES),
'redirect_uri': self.REDIRECT_URI
}
# Encode parameters and construct authorization url to be returned to user.
urlparams = urllib.urlencode(params)
return "%s?%s" % (self.AUTHORIZE_URL, urlparams)
# Tokes are requested based on access code. Access code must be fresh (10 minutes)
def GetAccessToken(self, access_code):
# Construct the authentication header
auth_header = base64.b64encode(self.CLIENT_ID + ':' + self.CLIENT_SECRET)
headers = {
'Authorization': 'Basic %s' % auth_header,
'Content-Type' : 'application/x-www-form-urlencoded'
}
# Paramaters for requesting tokens (auth + refresh)
params = {
'code': access_code,
'grant_type': 'authorization_code',
'client_id': self.CLIENT_ID,
'redirect_uri': self.REDIRECT_URI
}
# Place request
resp = requests.post(self.TOKEN_URL, data=params, headers=headers)
status_code = resp.status_code
resp = resp.json()
if status_code != 200:
raise Exception("Something went wrong exchanging code for token (%s): %s" % (resp['errors'][0]['errorType'], resp['errors'][0]['message']))
# Strip the goodies
token = dict()
token['access_token'] = resp['access_token']
token['refresh_token'] = resp['refresh_token']
return token
# Get new tokens based if authentication token is expired
def RefAccessToken(self, token):
# Construct the authentication header
auth_header = base64.b64encode(self.CLIENT_ID + ':' + self.CLIENT_SECRET)
headers = {
'Authorization': 'Basic %s' % auth_header,
'Content-Type' : 'application/x-www-form-urlencoded'
}
# Set up parameters for refresh request
params = {
'grant_type': 'refresh_token',
'refresh_token': token['refresh_token']
}
# Place request
resp = requests.post(self.TOKEN_URL, data=params, headers=headers)
status_code = resp.status_code
resp = resp.json()
if status_code != 200:
raise Exception("Something went wrong refreshing (%s): %s" % (resp['errors'][0]['errorType'], resp['errors'][0]['message']))
# Distil
token['access_token'] = resp['access_token']
token['refresh_token'] = resp['refresh_token']
return token
# Place api call to retrieve data
def ApiCall(self, token, apiCall='/1/user/-/activities/log/steps/date/today/1d.json'):
# Other API Calls possible, or read the FitBit documentation for the full list
# (https://dev.fitbit.com/docs/), e.g.:
# apiCall = '/1/user/-/devices.json'
# apiCall = '/1/user/-/profile.json'
# apiCall = '/1/user/-/activities/date/2015-10-22.json'
headers = {
'Authorization': 'Bearer %s' % token['access_token']
}
final_url = 'https://' + self.API_SERVER + apiCall
resp = requests.get(final_url, headers=headers)
status_code = resp.status_code
resp = resp.json()
resp['token'] = token
if status_code == 200:
return resp
elif status_code == 401:
print "The access token you provided has been expired let me refresh that for you."
# Refresh the access token with the refresh token if expired. Access tokens should be good for 1 hour.
token = self.RefAccessToken(token)
return self.ApiCall(token, apiCall)
else:
raise Exception("Something went wrong requesting (%s): %s" % (resp['errors'][0]['errorType'], resp['errors'][0]['message']))
| 38.635714
| 151
| 0.622111
|
329399076b083b5ff57cf7b7ebe4fb2d666411ed
| 698
|
py
|
Python
|
ros/src/pybullet_ros/plugins/joint_state_pub.py
|
domire8/pybullet_ros
|
3edc1e2bf8a8a9a9f3557e283ade2602d3905e2c
|
[
"MIT"
] | null | null | null |
ros/src/pybullet_ros/plugins/joint_state_pub.py
|
domire8/pybullet_ros
|
3edc1e2bf8a8a9a9f3557e283ade2602d3905e2c
|
[
"MIT"
] | 1
|
2021-05-20T12:32:00.000Z
|
2021-05-20T12:32:00.000Z
|
ros/src/pybullet_ros/plugins/joint_state_pub.py
|
domire8/pybullet_ros
|
3edc1e2bf8a8a9a9f3557e283ade2602d3905e2c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Query robot state and publish position, velocity and effort values to /robot_name/joint_states.
"""
import rospy
from sensor_msgs.msg import JointState
class JointStatePub:
def __init__(self, pybullet, robot):
self._pb = pybullet
self._robot = robot
self._publisher = rospy.Publisher(robot.namespace + "joint_states", JointState, queue_size=1)
def execute(self):
"""
Execute the plugin. This function is called from main update loop in the pybullet ros node.
"""
joint_msg = self._robot.get_joint_state_msg()
joint_msg.header.stamp = rospy.Time.now()
self._publisher.publish(joint_msg)
| 29.083333
| 101
| 0.687679
|
d8757ff1a9769875527b8b6429f74bb123ecda56
| 9,374
|
py
|
Python
|
bode.py
|
vanish125/DS1054_BodePlotter
|
569d3b97d8a0a657dd27d7b30b152fa78203995b
|
[
"MIT"
] | null | null | null |
bode.py
|
vanish125/DS1054_BodePlotter
|
569d3b97d8a0a657dd27d7b30b152fa78203995b
|
[
"MIT"
] | null | null | null |
bode.py
|
vanish125/DS1054_BodePlotter
|
569d3b97d8a0a657dd27d7b30b152fa78203995b
|
[
"MIT"
] | null | null | null |
# bode.py
# Program to plot bode diagrams using a DS1054Z and a jds6600
# Jan Böhmer (c) 2019
# published under MIT license. See file "LICENSE" for full license text
# from jds6600 import * 1st try
import fygen
import numpy as np
import time
from ds1054z import DS1054Z
import argparse
import dft
import matplotlib.pyplot as plt
import scipy.signal
parser = argparse.ArgumentParser(description="This program plots Bode Diagrams of a DUT using an JDS6600 and Rigol DS1054Z")
parser.add_argument('MIN_FREQ', metavar='min', type=float, help="The minimum frequency for which should be tested")
parser.add_argument('MAX_FREQ', metavar='max', type=float, help="The maximum frequency for which should be tested")
parser.add_argument('COUNT', metavar='N', nargs="?", default=50, type=int, help='The number of frequencies for which should be probed')
parser.add_argument("--awg_port", dest="AWG_PORT", default="/dev/ttyUSB0", help="The serial port where the AWG is connected to")
parser.add_argument("--ds_ip", default="auto", dest="OSC_IP", help="The IP address of the DS1054Z. Set to auto, to auto discover the oscilloscope via Zeroconf")
parser.add_argument("--linear", dest="LINEAR", action="store_true", help="Set this flag to use a linear scale")
parser.add_argument("--awg_voltage", dest="VOLTAGE", default=5, type=float, help="The amplitude of the signal used for the generator")
parser.add_argument("--awg_offset", dest="OFFSET", default=0, type=float, help="The offset of the signal used for the generator")
parser.add_argument("--step_time", dest="TIMEOUT", default=0.00, type=float, help="The pause between to measurements in ms.")
parser.add_argument("--phase", dest="PHASE", action="store_true", help="Set this flag if you want to plot the Phase diagram too")
parser.add_argument("--no_smoothing", dest="SMOOTH", action="store_false", help="Set this to disable the smoothing of the data with a Savitzky–Golay filter")
parser.add_argument("--use_manual_settings", dest="MANUAL_SETTINGS", action="store_true", help="When this option is set, the options on the oscilloscope for voltage and time base are not changed by this program.")
parser.add_argument("--output", dest="file", type=argparse.FileType("w"), help="Write the measured data to the given CSV file.")
parser.add_argument("--no_plots", dest="PLOTS", action="store_false", help="When this option is set no plots are shown. Useful in combination with --output")
parser.add_argument("--normalize", dest="NORMALIZE", action="store_true", help="Set this option if you dont want to get the absolute voltage levels on the output, but the value normalized on the input level.")
parser.add_argument("--use_dft", dest="DFT", action="store_true", help="Use Discrete Fourier Transform on raw data; more accurate but slower.")
args = parser.parse_args()
if args.OSC_IP == "auto":
import ds1054z.discovery
results = ds1054z.discovery.discover_devices()
if not results:
print("No Devices found! Try specifying the IP Address manually.")
exit()
OSC_IP = results[0].ip
print("Found Oscilloscope! Using IP Address " + OSC_IP)
else:
OSC_IP = args.OSC_IP
DEFAULT_PORT = args.AWG_PORT
MIN_FREQ = args.MIN_FREQ
MAX_FREQ = args.MAX_FREQ
STEP_COUNT = args.COUNT
# Do some validity checs
if MIN_FREQ < 0 or MAX_FREQ < 0:
exit("Frequencies has to be greater 0!")
if MIN_FREQ >= MAX_FREQ:
exit("MAX_FREQ has to be greater then min frequency")
if STEP_COUNT <= 0:
exit("The step count has to be positive")
TIMEOUT = args.TIMEOUT
AWG_CHANNEL = 0 # //channel 1
AWG_VOLT = args.VOLTAGE
AWG_OFFSET = args.OFFSET
print("Init AWG")
# awg = jds6600(DEFAULT_PORT)
awg = fygen.FYGen(DEFAULT_PORT)
# AWG_MAX_FREQ = awg.getinfo_devicetype()
AWG_MODEL = awg.get_model()
AWG_MAX_FREQ = float(AWG_MODEL[7:9])
print("Maximum Generator Frequency: %d MHz"% AWG_MAX_FREQ)
if MAX_FREQ > AWG_MAX_FREQ * 1e6:
exit("Your MAX_FREQ is higher than your AWG can achieve!")
# We use sine for sweep
# awg.setwaveform(AWG_CHANNEL, "sine")
awg.set(AWG_CHANNEL, enable=True, wave='sin')
# Init scope
scope = DS1054Z(OSC_IP)
# Set some options for the oscilloscope
if not args.MANUAL_SETTINGS:
# Center vertically
scope.set_channel_offset(1, 0)
scope.set_channel_offset(2, 0)
# Display one period in 2 divs
period = (1/MIN_FREQ) / 2
scope.timebase_scale = period
scope.run()
# Set the sensitivity according to the selected voltage
scope.set_channel_scale(1, args.VOLTAGE / 3, use_closest_match=True)
# Be a bit more pessimistic for the default voltage, because we run into problems if it is too confident
scope.set_channel_scale(2, args.VOLTAGE / 2, use_closest_match=True)
freqs = np.linspace(MIN_FREQ, MAX_FREQ, num=STEP_COUNT)
if not args.LINEAR:
freqs = np.logspace(np.log10(MIN_FREQ), np.log10(MAX_FREQ), num=STEP_COUNT)
else:
freqs = np.linspace(MIN_FREQ, MAX_FREQ, num=STEP_COUNT)
# Set amplitude
awg.set(AWG_CHANNEL, volts=AWG_VOLT, offset_volts=AWG_OFFSET, enable=True)
volts = list()
phases = list()
# We have to wait a bit before we measure the first value
awg.set(AWG_CHANNEL, freq_hz=float(freqs[0]), enable=True)
time.sleep(0.3)
if not args.MANUAL_SETTINGS:# initialize voltage reading to see if scope is set in correct vertical scale, in case vout is bigger than vin
scope.display_channel(1, enable=True)
scope.display_channel(2, enable=True)
volt = scope.get_channel_measurement(2, 'vpp')
vscalelist = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]
scopevscale = scope.get_channel_scale(2)
index = vscalelist.index(scopevscale)
while volt is None: # increase voltage scale until vpp is read
#print("vscale ", vscalelist[index])
scope.set_channel_scale(2, vscalelist[index] , use_closest_match=True)
time.sleep(1)
volt = scope.get_channel_measurement(2, 'vpp')
print("vpp: ", volt)
if index < 9:
index = index + 3
else:
index = 12
for freq in freqs:
awg.set(AWG_CHANNEL, freq_hz=float(freq), enable=True)
time.sleep(TIMEOUT)
if args.DFT:
volt0, volt, phase = dft.measure_with_dft(scope, freq)
phases.append(phase)
else:
volt0 = scope.get_channel_measurement(1, 'vpp')
volt = scope.get_channel_measurement(2, 'vpp')
phase = scope.get_channel_measurement('CHAN1, CHAN2', 'rphase')
if phase:
phase = -1*phase
phases.append(phase)
if not args.NORMALIZE:
volts.append(volt)
else:
if volt0 < 0.01:
print("Input voltage is very low, check your connections and retry")
exit()
else:
volts.append(volt/volt0)
# Use a better timebase
if not args.MANUAL_SETTINGS:
# Display one period in 2 divs
period = (1/freq) / 2
scope.timebase_scale = period
# Use better voltage scale for next time
if volt:
scope.set_channel_scale(2, volt / 2, use_closest_match=True)
else:
scope.set_channel_scale(2, AWG_VOLT / 2, use_closest_match=True)
print(freq, " volt0: ", volt0, " volt: ", volt, " phase: ", phase)
# Write data to file if needed
if args.file:
if args.PHASE:
args.file.write("Frequency in Hz; Amplitude in V; Phase in Degree\n")
else:
args.file.write("Frequency in Hz; Amplitude in V\n")
for n in range(0, len(freqs)):
if volts[n]:
volt = volts[n]
else:
volt = float("nan")
if args.PHASE:
if phases[n]:
phase = phases[n]
else:
phase = phases[n]
args.file.write("%f;%f;%f \n"%(freqs[n], volt, phase))
else:
args.file.write("%f;%f \n"%(freqs[n], volt))
args.file.close()
# Plot graphics
if not args.PLOTS:
exit()
plt.plot(freqs, volts, label="Measured data")
if args.SMOOTH:
try:
yhat = scipy.signal.savgol_filter(volts, 9, 3) # window size 51, polynomial order 3
plt.plot(freqs, yhat, "--", color="red", label="Smoothed data")
except:
print("Error during smoothing amplitude data")
plt.title("Amplitude diagram (N=%d)"%STEP_COUNT)
plt.xlabel("Frequency [Hz]")
plt.ylabel("Voltage Peak-Peak [V]")
plt.legend()
# Set log x axis
if not args.LINEAR:
plt.xscale("log")
plt.show()
if args.PHASE:
try:
plt.plot(freqs, phases)
plt.title("Phase diagram (N=%d)"%STEP_COUNT)
plt.ylabel("Phase [°]")
plt.xlabel("Frequency [Hz]")
except:
print("Phase was not correctly measured, check your connections")
if args.SMOOTH:
try:
yhat = scipy.signal.savgol_filter(phases, 9, 3) # window size 51, polynomial order 3
plt.plot(freqs, yhat, "--", color="red", label="Smoothed data")
except:
print("Error during smoothing phase data")
# Set log x axis
if not args.LINEAR:
plt.xscale("log")
plt.show()
| 36.053846
| 214
| 0.65607
|
9a9849da8f7906bc7a6a0f801a3574242e9b1eb0
| 2,521
|
py
|
Python
|
docs/conf.py
|
mozkzki/moz-youtube
|
e74cb17a618d5e73af5cf83d50ec349ac75e5113
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
mozkzki/moz-youtube
|
e74cb17a618d5e73af5cf83d50ec349ac75e5113
|
[
"MIT"
] | 1
|
2021-09-18T20:08:07.000Z
|
2021-09-18T20:08:07.000Z
|
docs/conf.py
|
mozkzki/moz-youtube
|
e74cb17a618d5e73af5cf83d50ec349ac75e5113
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys
sys.path.insert(0, "../src/moz_youtube")
# -- Project information -----------------------------------------------------
project = "moz_youtube"
copyright = "2021, mozkzki"
author = "mozkzki"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "alabaster"
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 33.613333
| 79
| 0.652519
|
4fc1ac5d29967dda5fac5876c6d7667916683f8f
| 106,763
|
py
|
Python
|
pysnmp_mibs/DOCS-CABLE-DEVICE-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 6
|
2017-04-21T13:48:08.000Z
|
2022-01-06T19:42:52.000Z
|
pysnmp_mibs/DOCS-CABLE-DEVICE-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 1
|
2020-05-05T16:42:25.000Z
|
2020-05-05T16:42:25.000Z
|
pysnmp_mibs/DOCS-CABLE-DEVICE-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 6
|
2020-02-08T20:28:49.000Z
|
2021-09-14T13:36:46.000Z
|
#
# PySNMP MIB module DOCS-CABLE-DEVICE-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/DOCS-CABLE-DEVICE-MIB
# Produced by pysmi-0.0.7 at Sun Feb 14 00:09:24 2016
# On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose
# Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52)
#
( ObjectIdentifier, Integer, OctetString, ) = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
( diffServMIBCounterGroup, diffServClfrStatus, diffServMIBMultiFieldClfrGroup, diffServAlgDropStatus, diffServMIBDscpMarkActGroup, diffServClfrElementStorage, diffServDataPathStatus, diffServMIBActionGroup, diffServMultiFieldClfrDstAddr, diffServMIBDataPathGroup, diffServActionStorage, diffServClfrStorage, diffServClfrElementStatus, diffServMultiFieldClfrStorage, diffServMIBAlgDropGroup, diffServMultiFieldClfrSrcAddr, diffServMIBClfrGroup, diffServDataPathStorage, diffServAlgDropType, diffServAlgDropStorage, diffServMultiFieldClfrAddrType, diffServCountActStorage, diffServMIBClfrElementGroup, ) = mibBuilder.importSymbols("DIFFSERV-MIB", "diffServMIBCounterGroup", "diffServClfrStatus", "diffServMIBMultiFieldClfrGroup", "diffServAlgDropStatus", "diffServMIBDscpMarkActGroup", "diffServClfrElementStorage", "diffServDataPathStatus", "diffServMIBActionGroup", "diffServMultiFieldClfrDstAddr", "diffServMIBDataPathGroup", "diffServActionStorage", "diffServClfrStorage", "diffServClfrElementStatus", "diffServMultiFieldClfrStorage", "diffServMIBAlgDropGroup", "diffServMultiFieldClfrSrcAddr", "diffServMIBClfrGroup", "diffServDataPathStorage", "diffServAlgDropType", "diffServAlgDropStorage", "diffServMultiFieldClfrAddrType", "diffServCountActStorage", "diffServMIBClfrElementGroup")
( InterfaceIndexOrZero, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
( InetAddress, InetAddressType, ) = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
( ZeroBasedCounter32, ) = mibBuilder.importSymbols("RMON2-MIB", "ZeroBasedCounter32")
( SnmpAdminString, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
( ObjectGroup, NotificationGroup, ModuleCompliance, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
( mib_2, Counter64, MibIdentifier, ModuleIdentity, Unsigned32, TimeTicks, Counter32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, zeroDotZero, iso, Integer32, NotificationType, Bits, IpAddress, Gauge32, ) = mibBuilder.importSymbols("SNMPv2-SMI", "mib-2", "Counter64", "MibIdentifier", "ModuleIdentity", "Unsigned32", "TimeTicks", "Counter32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "zeroDotZero", "iso", "Integer32", "NotificationType", "Bits", "IpAddress", "Gauge32")
( StorageType, TextualConvention, DateAndTime, RowStatus, DisplayString, RowPointer, TruthValue, ) = mibBuilder.importSymbols("SNMPv2-TC", "StorageType", "TextualConvention", "DateAndTime", "RowStatus", "DisplayString", "RowPointer", "TruthValue")
docsDev = ModuleIdentity((1, 3, 6, 1, 2, 1, 69)).setRevisions(("2006-12-20 00:00", "1999-08-19 00:00",))
if mibBuilder.loadTexts: docsDev.setLastUpdated('200612200000Z')
if mibBuilder.loadTexts: docsDev.setOrganization('IETF IP over Cable Data Network\n Working Group')
if mibBuilder.loadTexts: docsDev.setContactInfo(' Rich Woundy\n Postal: Comcast Cable\n 27 Industrial Avenue\n Chelmsford, MA 01824 U.S.A.\n Phone: +1 978 244 4010\n E-mail: richard_woundy@cable.comcast.com\n\n Kevin Marez\n Postal: Motorola Corporation\n 6450 Sequence Drive\n San Diego, CA 92121 U.S.A.\n Phone: +1 858 404 3785\n E-mail: kevin.marez@motorola.com\n\n IETF IPCDN Working Group\n General Discussion: ipcdn@ietf.org\n Subscribe: http://www.ietf.org/mailman/listinfo/ipcdn\n Archive: ftp://ftp.ietf.org/ietf-mail-archive/ipcdn\n Co-chairs: Richard Woundy,\n richard_woundy@cable.comcast.com\n Jean-Francois Mule,\n jf.mule@cablelabs.com')
if mibBuilder.loadTexts: docsDev.setDescription('This is the MIB Module for DOCSIS-compliant cable modems\n\n and cable-modem termination systems.\n\n Copyright (C) The IETF Trust (2006). This version\n of this MIB module was published in RFC 4639; for full\n legal notices see the RFC itself.')
docsDevMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1))
docsDevBase = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 1))
docsDevRole = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("cm", 1), ("cmtsActive", 2), ("cmtsBackup", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevRole.setDescription("Defines the current role of this device. cm(1) is a\n Cable Modem, cmtsActive(2) is a Cable Modem Termination\n System that is controlling the system of cable modems,\n and cmtsBackup(3) is a CMTS that is currently connected\n but is not controlling the system (not currently used).\n\n In general, if this device is a 'cm', its role will not\n change during operation or between reboots. If the\n device is a 'cmts' it may change between cmtsActive and\n cmtsBackup and back again during normal operation. NB:\n At this time, the DOCSIS standards do not support the\n concept of a backup CMTS, but cmtsBackup is included for\n completeness.")
docsDevDateTime = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 2), DateAndTime()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevDateTime.setDescription("The current date and time, with time zone information\n (if known).\n\n If the real data and time cannot be determined, this\n shall represent elapsed time from boot relative to\n the standard epoch '1970-1-1,0:0:0.0'. In other\n words, if this agent has been up for 3 minutes and\n not been able to determine what the actual date and\n time are, this object will return the value\n '1970-1-1,0:03:0.0'.")
docsDevResetNow = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevResetNow.setDescription('Setting this object to true(1) causes the device to\n reset. Reading this object always returns false(2).')
docsDevSerialNumber = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 4), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevSerialNumber.setDescription("The manufacturer's serial number for this device.")
docsDevSTPControl = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("stEnabled", 1), ("noStFilterBpdu", 2), ("noStPassBpdu", 3),)).clone('noStFilterBpdu')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSTPControl.setDescription('This object controls operation of the spanning tree\n protocol (as distinguished from transparent bridging).\n\n If set to stEnabled(1), then the spanning tree protocol\n is enabled, subject to bridging constraints.\n\n If noStFilterBpdu(2), then spanning tree is not active,\n and Bridge PDUs received are discarded.\n\n If noStPassBpdu(3), then spanning tree is not active,\n and Bridge PDUs are transparently forwarded.\n\n Note that a device need not implement all of these\n options, but that noStFilterBpdu(2) is required.')
docsDevIgmpModeControl = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("passive", 1), ("active", 2),)).clone('passive')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevIgmpModeControl.setDescription('This object controls the IGMP mode of operation for\n the CM or CMTS. In passive mode, the device forwards\n IGMP between interfaces as based on knowledge of\n Multicast Session activity on the subscriber side\n interface and the rules defined in the DOCSIS RFI\n specification. In active mode, the device terminates\n at and initiates IGMP through its interfaces as based\n on the knowledge of Multicast Session activity on the\n subscriber side interface.')
docsDevMaxCpe = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0,255))).setUnits('CPEs').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevMaxCpe.setDescription('The maximum number of CPEs that can be granted access\n through a CM during a CM epoch. This value can be\n obtained from the CM configuration file; however,\n it may be adjusted by the CM according to hardware or\n software limitations that have been imposed on the\n implementation.')
docsDevNmAccessTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 2), )
if mibBuilder.loadTexts: docsDevNmAccessTable.setDescription('This table controls access to SNMP objects by network\n management stations. If the table is empty, access to\n SNMP objects is unrestricted. The objects in this table\n MUST NOT persist across reboots. The objects in this\n table are only accessible from cable devices that are\n not capable of operating in SNMP Coexistence mode\n (RFC 3584) or in SNMPv3 mode (RFC 3410).\n See the conformance section for\n details. Note that some devices are required by other\n specifications (e.g., the DOCSIS OSSIv1.1 specification)\n to support the legacy SNMPv1/v2c docsDevNmAccess mode\n for backward compatibility.\n\n This table is deprecated. Instead, use the SNMP\n coexistence MIBs from RFC 3584, the TARGET and\n NOTIFICATION MIBs from RFC 3413, and\n the View-Based Access Control Model (VACM) MIBs for\n all SNMP protocol versions from RFC 3415.')
docsDevNmAccessEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 2, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessIndex"))
if mibBuilder.loadTexts: docsDevNmAccessEntry.setDescription('An entry describing access to SNMP objects by a\n particular network management station. An entry in\n this table is not readable unless the management station\n has read-write permission (either implicit if the table\n is empty, or explicit through an entry in this table).\n Entries are ordered by docsDevNmAccessIndex. The first\n\n matching entry (e.g., matching IP address and community\n string) is used to derive access.')
docsDevNmAccessIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevNmAccessIndex.setDescription('Index used to order the application of access\n entries.')
docsDevNmAccessIp = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 2), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessIp.setDescription('The IP address (or subnet) of the network management\n station. The address 0.0.0.0 is defined to mean\n any Network Management Station (NMS). If traps are\n enabled for this entry, then the value must be the\n address of a specific device. Implementations MAY\n recognize 255.255.255.255 as equivalent to 0.0.0.0.')
docsDevNmAccessIpMask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 3), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessIpMask.setDescription('The IP subnet mask of the network management stations.\n If traps are enabled for this entry, then the value must\n be 0.0.0.0. Implementations MAY recognize\n 255.255.255.255 as equivalent to 0.0.0.0.')
docsDevNmAccessCommunity = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 4), OctetString().clone('public')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessCommunity.setDescription('The community string to be matched for access by this\n entry. If set to a zero-length string, then any\n community string will match. When read, this object\n SHOULD return a zero-length string.')
docsDevNmAccessControl = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6,))).clone(namedValues=NamedValues(("none", 1), ("read", 2), ("readWrite", 3), ("roWithTraps", 4), ("rwWithTraps", 5), ("trapsOnly", 6),)).clone('read')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessControl.setDescription("Specifies the type of access allowed to this NMS.\n Setting this object to none(1) causes the table entry\n to be destroyed. Read(2) allows access by 'get' and\n 'get-next' PDUs. ReadWrite(3) allows access by 'set' as\n well. RoWithtraps(4), rwWithTraps(5), and trapsOnly(6)\n control distribution of Trap PDUs transmitted by this\n device.")
docsDevNmAccessInterfaces = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessInterfaces.setDescription("Specifies the set of interfaces from which requests from\n this NMS will be accepted. Each octet within\n the value of this object specifies a set of eight\n\n interfaces, the first octet specifying ports 1\n through 8, the second octet specifying interfaces 9\n through 16, etc. Within each octet, the most\n significant bit represents the lowest numbered\n interface, and the least significant bit represents the\n highest numbered interface. Thus, each interface is\n represented by a single bit within the value of this\n object. If that bit has a value of '1' then that\n interface is included in the set.\n\n Note that entries in this table apply only to link-layer\n interfaces (e.g., Ethernet and CATV MAC). Bits\n representing upstream and downstream channel interfaces\n MUST NOT be set to '1'.\n\n Note that if bits corresponding to non-existing\n interfaces are set, the result is implementation\n specific.\n\n Note that according to the DOCSIS OSSIv1.1\n specification, when ifIndex '1' is included in the\n set, then this row applies to all CPE\n (customer-facing) interfaces.\n\n The size of this object is the minimum required to\n represent all configured interfaces for this device.")
docsDevNmAccessStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessStatus.setDescription('Controls and reflects the status of rows in this\n table. Rows in this table may be created by either the\n create-and-go or create-and-wait paradigm. There is no\n restriction on changing values in a row of this table\n while the row is active.\n\n The following objects MUST have valid values before this\n object can be set to active: docsDevNmAccessIp,\n docsDevNmAccessStatus, docsDevNmAccessIpMask,\n docsDevNmAccessCommunity, docsDevNmAccessControl, and\n docsDevNmAccessInterfaces.')
docsDevNmAccessTrapVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("disableSNMPv2trap", 1), ("enableSNMPv2trap", 2),)).clone('disableSNMPv2trap')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevNmAccessTrapVersion.setDescription('Specifies the TRAP version that is sent to this NMS.\n Setting this object to disableSNMPv2trap (1) causes the\n trap in SNMPv1 format to be sent to a particular NMS.\n Setting this object to enableSNMPv2trap (2) causes the\n trap in SNMPv2 format be sent to a particular NMS.')
docsDevSoftware = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 3))
docsDevSwServer = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwServer.setDescription('The address of the TFTP server used for software\n upgrades. If the TFTP server is unknown or is a\n non-IPv4 address, return 0.0.0.0.\n\n This object is deprecated. See docsDevSwServerAddress\n for its replacement. This object will have its value\n modified, given a valid SET to docsDevSwServerAddress.')
docsDevSwFilename = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwFilename.setDescription('The filename of the software image to be downloaded via\n TFTP, or the abs_path (as defined in RFC 2616) of the\n software image to be downloaded via HTTP.\n\n Unless set via SNMP, this is the filename or abs_path\n specified by the provisioning server during the boot\n process that corresponds to the software version that\n\n is desired for this device.\n\n If unknown, the value of this object is the zero-length\n string.')
docsDevSwAdminStatus = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("upgradeFromMgt", 1), ("allowProvisioningUpgrade", 2), ("ignoreProvisioningUpgrade", 3),)).clone('allowProvisioningUpgrade')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwAdminStatus.setDescription('If set to upgradeFromMgt(1), the device will initiate a\n TFTP or HTTP software image download. After\n successfully receiving an image, the device will set\n its state to ignoreProvisioningUpgrade(3) and reboot.\n If the download process is interrupted (e.g., by a reset\n or power failure), the device will load the previous\n image and, after re-initialization, continue to attempt\n loading the image specified in docsDevSwFilename.\n\n If set to allowProvisioningUpgrade(2), the device will\n use the software version information supplied by the\n provisioning server when next rebooting (this does not\n cause a reboot).\n\n When set to ignoreProvisioningUpgrade(3), the device\n will disregard software image upgrade information\n from the provisioning server.\n\n Note that reading this object can return\n upgradeFromMgt(1). This indicates that a software\n download is currently in progress, and that the device\n will reboot after successfully receiving an image.')
docsDevSwOperStatus = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5,))).clone(namedValues=NamedValues(("inProgress", 1), ("completeFromProvisioning", 2), ("completeFromMgt", 3), ("failed", 4), ("other", 5),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevSwOperStatus.setDescription('InProgress(1) indicates that a TFTP or HTTP download is\n underway, either as a result of a version mismatch at\n provisioning or as a result of a upgradeFromMgt request.\n No other docsDevSw* objects can be modified in\n this state.\n\n CompleteFromProvisioning(2) indicates that the last\n software upgrade was a result of version mismatch at\n provisioning.\n\n CompleteFromMgt(3) indicates that the last software\n upgrade was a result of setting docsDevSwAdminStatus to\n upgradeFromMgt.\n\n Failed(4) indicates that the last attempted download\n failed, ordinarily due to TFTP or HTTP timeout.')
docsDevSwCurrentVers = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 5), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevSwCurrentVers.setDescription("The software version currently operating in this device.\n This string's syntax is that used by the\n individual vendor to identify software versions.\n For a CM, this string will describe the current\n software load. For a CMTS, this object SHOULD contain\n a human-readable representation either of the vendor\n specific designation of the software for the chassis,\n or of the software for the control processor. If\n neither of these is applicable, the value MUST be a\n zero-length string.")
docsDevSwServerAddressType = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 6), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwServerAddressType.setDescription('The type of address of the TFTP or HTTP server used for\n\n software upgrades.\n\n If docsDevSwServerTransportProtocol is currently set to\n tftp(1), attempting to set this object to dns(16) MUST\n result in an error.')
docsDevSwServerAddress = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 7), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwServerAddress.setDescription('The address of the TFTP or HTTP server used for software\n upgrades.\n\n If the TFTP/HTTP server is unknown, return the zero-\n length address string (see the TextualConvention).\n\n If docsDevSwServer is also implemented in this agent,\n this object is tied to it. A set of this object to an\n IPv4 address will result in also setting the value of\n docsDevSwServer to that address. If this object is set\n to an IPv6 address, docsDevSwServer is set to 0.0.0.0.\n If docsDevSwServer is set, this object is also set to\n that value. Note that if both are set in the same\n action, the order of which one sets the other is\n undefined.')
docsDevSwServerTransportProtocol = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 3, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("tftp", 1), ("http", 2),)).clone('tftp')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevSwServerTransportProtocol.setDescription('This object specifies the transport protocol (TFTP or\n HTTP) to be used for software upgrades.\n\n If the value of this object is tftp(1), then the cable\n device uses TFTP (RFC 1350) read request packets to\n download the docsDevSwFilename from the\n docsDevSwServerAddress in octet mode.\n\n If the value of this object is http(2), then the cable\n device uses HTTP 1.0 (RFC 1945) or HTTP 1.1 (RFC 2616)\n GET requests sent to host docsDevSwServerAddress to\n\n download the software image from path docsDevSwFilename.\n\n If docsDevSwServerAddressType is currently set to\n dns(16), attempting to set this object to tftp(1) MUST\n result in an error.')
docsDevServer = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 4))
docsDevServerBootState = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,))).clone(namedValues=NamedValues(("operational", 1), ("disabled", 2), ("waitingForDhcpOffer", 3), ("waitingForDhcpResponse", 4), ("waitingForTimeServer", 5), ("waitingForTftp", 6), ("refusedByCmts", 7), ("forwardingDenied", 8), ("other", 9), ("unknown", 10),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerBootState.setDescription('If operational(1), the device has completed loading and\n processing of configuration parameters, and the CMTS has\n completed the Registration exchange.\n\n If disabled(2), then the device was administratively\n disabled, possibly by being refused network access in\n the configuration file.\n\n If waitingForDhcpOffer(3), then a Dynamic Host\n Configuration Protocol (DHCP) Discover has been\n transmitted, and no offer has yet been received.\n\n If waitingForDhcpResponse(4), then a DHCP Request has\n been transmitted, and no response has yet been received.\n\n If waitingForTimeServer(5), then a Time Request has been\n transmitted, and no response has yet been received.\n\n If waitingForTftp(6), then a request to the TFTP\n parameter server has been made, and no response\n received.\n\n If refusedByCmts(7), then the Registration\n Request/Response exchange with the CMTS failed.\n\n If forwardingDenied(8), then the registration process\n was completed, but the network access option in the\n received configuration file prohibits forwarding.\n\n If other(9), then the registration process reached a\n point that does not fall into one of the above\n categories.\n\n If unknown(10), then the device has not yet begun the\n registration process or is in some other indeterminate\n state.')
docsDevServerDhcp = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerDhcp.setDescription('The IP address of the DHCP server that assigned an IP\n address to this device. Returns 0.0.0.0 if DHCP is not\n used for IP address assignment, or if this agent is\n not assigned an IPv4 address.\n\n This object is deprecated and is replaced by\n docsDevServerDhcpAddress.')
docsDevServerTime = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerTime.setDescription('The IP address of the Time server (RFC 0868). Returns\n 0.0.0.0 if the time server IP address is unknown, or if\n the time server is not an IPv4 server.\n\n This object is deprecated and is replaced by\n\n docsDevServerTimeAddress.')
docsDevServerTftp = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerTftp.setDescription('The IP address of the TFTP server responsible for\n downloading provisioning and configuration parameters\n to this device. Returns 0.0.0.0 if the TFTP server\n address is unknown or is not an IPv4 address.\n\n This object is deprecated and is replaced by\n docsDevServerConfigTftpAddress.')
docsDevServerConfigFile = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 5), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerConfigFile.setDescription('The name of the device configuration file read from\n the TFTP server. Returns a zero-length string if\n the configuration file name is unknown.')
docsDevServerDhcpAddressType = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 6), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerDhcpAddressType.setDescription('The type of address of docsDevServerDhcpAddress. If\n DHCP was not used, this value should return\n unknown(0).')
docsDevServerDhcpAddress = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 7), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerDhcpAddress.setDescription('The internet address of the DHCP server that assigned\n an IP address to this device. Returns the zero length\n octet string if DHCP was not used for IP address\n assignment.')
docsDevServerTimeAddressType = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 8), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerTimeAddressType.setDescription('The type of address of docsDevServerTimeAddress. If\n no time server exists, this value should return\n unknown(0).')
docsDevServerTimeAddress = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 9), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerTimeAddress.setDescription('The Internet address of the RFC 868 Time server,\n as provided by DHCP option 4.\n\n Note that if multiple values are provided to the\n CM in DHCP option 4, the value of this MIB object\n MUST be the Time server address from which the Time\n of Day reference was acquired as based on the DOCSIS\n RFI specification. During the period of time where\n the Time of Day have not been acquired, the Time\n server address reported by the CM may report the\n first address value in the DHCP option value or the\n last server address the CM attempted to get the Time\n of day value.\n\n Returns the zero-length octet string if the time server\n IP address is not provisioned.')
docsDevServerConfigTftpAddressType = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 10), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerConfigTftpAddressType.setDescription('The type of address of docsDevServerConfigTftpAddress.\n If no TFTP server exists, this value should return\n unknown(0).')
docsDevServerConfigTftpAddress = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 4, 11), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevServerConfigTftpAddress.setDescription('The internet address of the TFTP server responsible for\n downloading provisioning and configuration parameters\n to this device. Returns the zero-length octet string if\n the config server address is unknown. There are certain\n security risks that are involved with using TFTP.')
docsDevEvent = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 5))
docsDevEvControl = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("resetLog", 1), ("useDefaultReporting", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvControl.setDescription('Setting this object to resetLog(1) empties the event\n log. All data is deleted. Setting it to\n useDefaultReporting(2) returns all event priorities to\n their factory-default reporting. Reading this object\n always returns useDefaultReporting(2).')
docsDevEvSyslog = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvSyslog.setDescription('The IP address of the Syslog server. If 0.0.0.0, either\n syslog transmission is inhibited, or the Syslog server\n address is not an IPv4 address.\n\n This object is deprecated and is replaced by\n docsDevEvSyslogAddress.')
docsDevEvThrottleAdminStatus = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4,))).clone(namedValues=NamedValues(("unconstrained", 1), ("maintainBelowThreshold", 2), ("stopAtThreshold", 3), ("inhibited", 4),)).clone('unconstrained')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvThrottleAdminStatus.setDescription('Controls the transmission of traps and syslog messages\n with respect to the trap pacing threshold.\n\n unconstrained(1) causes traps and syslog messages to be\n transmitted without regard to the threshold settings.\n\n maintainBelowThreshold(2) causes trap transmission and\n syslog messages to be suppressed if the number of traps\n would otherwise exceed the threshold.\n\n stopAtThreshold(3) causes trap transmission to cease at\n the threshold and not to resume until directed to do so.\n\n inhibited(4) causes all trap transmission and syslog\n messages to be suppressed.\n\n A single event is always treated as a single event for\n threshold counting. That is, an event causing both a\n trap and a syslog message is still treated as a single\n event.\n\n Writing to this object resets the thresholding state.')
docsDevEvThrottleInhibited = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvThrottleInhibited.setDescription('If true(1), trap and syslog transmission is currently\n inhibited due to thresholds and/or the current setting\n of docsDevEvThrottleAdminStatus. In addition, this is\n true(1) when transmission is inhibited because no\n syslog (docsDevEvSyslog) or trap (docsDevNmAccessEntry)\n destinations have been set.\n\n This object is deprecated and is replaced by\n docsDevEvThrottleThresholdExceeded.')
docsDevEvThrottleThreshold = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 5), Unsigned32()).setUnits('events').setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvThrottleThreshold.setDescription('Number of events per docsDevEvThrottleInterval permitted\n before throttling is to occur.\n\n A single event, whether the notification could result in\n messages transmitted using syslog, SNMP, or both\n protocols, and regardless of the number of destinations,\n (including zero) is always treated as a single event for\n threshold counting. For example, an event causing both\n a trap and a syslog message is still treated as a single\n event.\n\n All system notifications that occur within the device\n should be taken into consideration when calculating\n and monitoring the threshold.')
docsDevEvThrottleInterval = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)).clone(1)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvThrottleInterval.setDescription('The interval over which docsDevEvThrottleThreshold\n applies.')
docsDevEvControlTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 5, 7), )
if mibBuilder.loadTexts: docsDevEvControlTable.setDescription('This table allows control of the reporting of event\n classes. For each event priority, a combination of\n\n logging and reporting mechanisms may be chosen. The\n mapping of event types to priorities is\n vendor dependent. Vendors may also choose to allow\n the user to control that mapping through proprietary\n means. Table entries MUST persist across reboots for\n CMTS devices and MUST NOT persist across reboots for CM\n devices.')
docsDevEvControlEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 5, 7, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevEvPriority"))
if mibBuilder.loadTexts: docsDevEvControlEntry.setDescription('Allows configuration of the reporting mechanisms for a\n particular event priority.')
docsDevEvPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 7, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8,))).clone(namedValues=NamedValues(("emergency", 1), ("alert", 2), ("critical", 3), ("error", 4), ("warning", 5), ("notice", 6), ("information", 7), ("debug", 8),)))
if mibBuilder.loadTexts: docsDevEvPriority.setDescription('The priority level that is controlled by this\n entry. These are ordered from most (emergency) to least\n (debug) critical. Each event with a CM or CMTS has a\n particular priority level associated with it (as defined\n by the vendor).\n\n emergency(1) events indicate vendor-specific fatal\n hardware or software errors that prevent normal system\n operation.\n\n alert(2) events indicate a serious failure that causes\n the reporting system to reboot but is not caused by\n hardware or software malfunctioning.\n\n critical(3) events indicate a serious failure that\n requires attention and prevents the device from\n transmitting data but that could be recovered without\n rebooting the system.\n\n error(4) and warning(5) events indicate that a failure\n occurred that could interrupt the normal data flow but\n that does not cause the device to re-register.\n\n notice(6) and information(7) events indicate a\n milestone or checkpoint in normal operation that could\n be of particular importance for troubleshooting.\n\n debug(8) events are reserved for vendor-specific\n events.\n\n During normal operation, no event more\n critical than notice(6) should be generated. Events\n between warning and emergency should be generated at\n appropriate levels of problems (e.g., emergency when the\n box is about to crash).')
docsDevEvReporting = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 7, 1, 2), Bits().clone(namedValues=NamedValues(("local", 0), ("traps", 1), ("syslog", 2), ("localVolatile", 8), ("stdInterface", 9),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvReporting.setDescription('Defines the action to be taken on occurrence of this\n event class. Implementations may not necessarily\n support all options for all event classes but at\n minimum must allow traps and syslogging to be\n disabled.\n\n If the local(0) bit is set, then log to the internal\n log and update non-volatile store, for backward\n compatibility with the original RFC 2669 definition.\n If the traps(1) bit is set, then generate\n an SNMP trap; if the syslog(2) bit is set, then\n send a syslog message (assuming that the syslog address\n is set). If the localVolatile(8) bit is set, then\n log to the internal log without updating non-volatile\n store. If the stdInterface(9) bit is set, then the\n agent ignores all other bits except the local(0),\n syslog(2), and localVolatile(8) bits. Setting the\n stdInterface(9) bit indicates that RFC3413 and\n RFC3014 are being used to control event reporting\n mechanisms.')
docsDevEventTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 5, 8), )
if mibBuilder.loadTexts: docsDevEventTable.setDescription('Contains a log of network and device events that may be\n of interest in fault isolation and troubleshooting.\n If the local(0) bit is set in docsDevEvReporting,\n entries in this table MUST persist across reboots.')
docsDevEventEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevEvIndex"))
if mibBuilder.loadTexts: docsDevEventEntry.setDescription('Describes a network or device event that may be of\n interest in fault isolation and troubleshooting.\n Multiple sequential identical events are represented by\n incrementing docsDevEvCounts and setting\n docsDevEvLastTime to the current time rather than\n creating multiple rows.\n\n Entries are created with the first occurrence of an\n event. docsDevEvControl can be used to clear the\n table. Individual events cannot be deleted.')
docsDevEvIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevEvIndex.setDescription('Provides relative ordering of the objects in the event\n log. This object will always increase except when\n (a) the log is reset via docsDevEvControl,\n (b) the device reboots and does not implement\n non-volatile storage for this log, or (c) it reaches\n the value 2^31. The next entry for all the above\n cases is 1.')
docsDevEvFirstTime = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvFirstTime.setDescription('The value of docsDevDateTime at the time this entry was\n created.')
docsDevEvLastTime = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvLastTime.setDescription('When an entry reports only one event, this object will\n have the same value as the corresponding instance of\n docsDevEvFirstTime. When an entry reports multiple\n events, this object will record the value that\n docsDevDateTime had when the most recent event for this\n entry occurred.')
docsDevEvCounts = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 4), Counter32()).setUnits('events').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvCounts.setDescription('The number of consecutive event instances reported by\n this entry. This starts at 1 with the creation of this\n row and increments by 1 for each subsequent duplicate\n event.')
docsDevEvLevel = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8,))).clone(namedValues=NamedValues(("emergency", 1), ("alert", 2), ("critical", 3), ("error", 4), ("warning", 5), ("notice", 6), ("information", 7), ("debug", 8),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvLevel.setDescription('The priority level of this event, as defined by the\n vendor. These are ordered from most serious (emergency)\n to least serious (debug).\n\n emergency(1) events indicate vendor-specific fatal\n hardware or software errors that prevent normal system\n operation.\n\n alert(2) events indicate a serious failure that causes\n the reporting system to reboot but that is not caused by\n hardware or software malfunctioning.\n\n critical(3) events indicate a serious failure that\n requires attention and prevents the device from\n transmitting data but that could be recovered without\n rebooting the system.\n\n error(4) and warning(5) events indicate that a failure\n occurred that could interrupt the normal data flow but\n that does not cause the device to re-register.\n\n notice(6) and information(7) events indicate a\n milestone or checkpoint in normal operation that could\n be of particular importance for troubleshooting.\n\n debug(8) events are reserved for vendor-specific\n\n events.\n\n During normal operation, no event more\n critical than notice(6) should be generated. Events\n between warning and emergency should be generated at\n appropriate levels of problems (e.g., emergency when the\n box is about to crash).')
docsDevEvId = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvId.setDescription('For this product, uniquely identifies the type of event\n that is reported by this entry.')
docsDevEvText = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 5, 8, 1, 7), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvText.setDescription('Provides a human-readable description of the event,\n including all relevant context (interface numbers,\n etc.).')
docsDevEvSyslogAddressType = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 9), InetAddressType().clone('unknown')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvSyslogAddressType.setDescription('The type of address of docsDevEvSyslogAddress. If\n no syslog server exists, this value should return\n unknown(0).')
docsDevEvSyslogAddress = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 10), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevEvSyslogAddress.setDescription('The Internet address of the Syslog server, as provided\n by DHCP option 7 or set via SNMP management. If the\n address of the server is set to the zero-length\n string, the 0.0.0.0 IPv4 address, or the 0: IPv6\n address, Syslog transmission is inhibited.\n\n Note that if multiple values are provided to the CM in\n DHCP option 7, the value of this MIB object MUST be the\n first Syslog server address received.\n\n By default at agent boot, this object returns the zero\n length string.')
docsDevEvThrottleThresholdExceeded = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 5, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevEvThrottleThresholdExceeded.setDescription('If true(1), trap and syslog transmission is currently\n inhibited due to exceeding the trap/syslog event\n threshold in the current interval.')
docsDevFilter = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 6))
docsDevFilterLLCUnmatchedAction = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("discard", 1), ("accept", 2),)).clone('accept')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevFilterLLCUnmatchedAction.setDescription('LLC (Link Level Control) filters can be defined on an\n inclusive or exclusive basis: CMs can be configured to\n forward only packets matching a set of layer three\n protocols, or to drop packets matching a set of layer\n three protocols. Typical use of these filters is to\n\n filter out possibly harmful (given the context of a\n large metropolitan LAN) protocols.\n\n If set to discard(1), any L2 packet that does not match\n at least one filter in the docsDevFilterLLCTable will be\n discarded. If set to accept(2), any L2 packet that\n does not match at least one filter in the\n docsDevFilterLLCTable will be accepted for further\n processing (e.g., bridging). In other words, if the\n packet does not match an entry in the table, it takes\n this action; if it does match an entry in the table, it\n takes the opposite of this action.')
docsDevFilterLLCTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 6, 2), )
if mibBuilder.loadTexts: docsDevFilterLLCTable.setDescription('A list of filters to apply to (bridged) LLC\n traffic. The filters in this table are applied to\n incoming traffic on the appropriate interface(s) prior\n to any further processing (e.g., before the packet\n is handed off for level 3 processing, or for bridging).\n The specific action taken when no filter is matched is\n controlled by docsDevFilterLLCUnmatchedAction. Table\n entries MUST NOT persist across reboots for any device.')
docsDevFilterLLCEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCIndex"))
if mibBuilder.loadTexts: docsDevFilterLLCEntry.setDescription('Describes a single filter to apply to (bridged) LLC\n traffic received on a specified interface. ')
docsDevFilterLLCIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevFilterLLCIndex.setDescription('Index used for the identification of filters (note that\n LLC filter order is irrelevant).')
docsDevFilterLLCStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterLLCStatus.setDescription('Controls and reflects the status of rows in this\n table. There is no restriction on changing any of the\n associated columns for this row while this object is set\n to active.\n\n Specifying only this object (with the\n appropriate index) on a CM is sufficient to create a\n filter row that matches all inbound packets on the\n ethernet interface and results in the packets being\n discarded. docsDevFilterLLCIfIndex (at least) must be\n specified on a CMTS to create a row.')
docsDevFilterLLCIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 3), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterLLCIfIndex.setDescription("The entry interface to which this filter applies. The\n value corresponds to ifIndex for either a CATV MAC or\n another network interface. If the value is zero, the\n filter applies to all interfaces. In Cable Modems, the\n default value is the customer side interface(s). In\n CMTSs, this object has to be specified to\n create a row in this table.\n\n Note that according to the DOCSIS OSSIv1.1\n specification, ifIndex '1' in the CM means that this\n row applies to all Cable Modem-to-CPE Interfaces\n (CMCI).")
docsDevFilterLLCProtocolType = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("ethertype", 1), ("dsap", 2),)).clone('ethertype')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterLLCProtocolType.setDescription('The format of the value in docsDevFilterLLCProtocol:\n either a two-byte Ethernet Ethertype, or a one-byte\n 802.2 Service Access Point (SAP) value. ethertype(1)\n also applies to Standard Network Access Protocol\n (SNAP) encapsulated frames.')
docsDevFilterLLCProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterLLCProtocol.setDescription('The layer-three protocol for which this filter applies.\n The protocol value format depends on\n docsDevFilterLLCProtocolType. Note that for SNAP\n frames, ethertype filtering is performed rather than\n Destination Service Access Point (DSAP) =0xAA.')
docsDevFilterLLCMatches = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 2, 1, 6), Counter32()).setUnits('matches').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevFilterLLCMatches.setDescription('Counts the number of times this filter was matched.')
docsDevFilterIpDefault = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("discard", 1), ("accept", 2),)).clone('accept')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevFilterIpDefault.setDescription('The default behavior for (bridged) packets that do not\n match IP filters (or Internet filters, if implemented)\n is defined by docsDevFilterIpDefault.\n\n If set to discard(1), all packets not matching an IP\n filter in docsDevFilterIpTable will be discarded. If\n set to accept(2), all packets not matching an IP filter\n or an Internet filter will be accepted for further\n processing (e.g., bridging).')
docsDevFilterIpTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 6, 4), )
if mibBuilder.loadTexts: docsDevFilterIpTable.setDescription('An ordered list of filters or classifiers to apply to\n IP traffic. Filter application is ordered by the filter\n index, rather than by a best match algorithm (note that\n this implies that the filter table may have gaps in the\n index values). Packets that match no filters will have\n policy 0 in the docsDevFilterPolicyTable applied to\n them, if it exists. Otherwise, Packets that match no\n filters are discarded or forwarded according to the\n setting of docsDevFilterIpDefault.\n\n Any IP packet can theoretically match multiple rows of\n this table. When considering a packet, the table is\n scanned in row index order (e.g., filter 10 is checked\n before filter 20). If the packet matches that filter\n (which means that it matches ALL criteria for that row),\n actions appropriate to docsDevFilterIpControl and\n docsDevFilterPolicyId are taken. If the packet was\n discarded processing is complete. If\n docsDevFilterIpContinue is set to true, the filter\n comparison continues with the next row in the table,\n looking for additional matches.\n\n If the packet matches no filter in the table, the packet\n is accepted or dropped for further processing\n according to the setting of docsDevFilterIpDefault.\n If the packet is accepted, the actions specified by\n policy group 0 (e.g., the rows in\n docsDevFilterPolicyTable that have a value of 0 for\n docsDevFilterPolicyId) are taken, if that policy\n\n group exists.\n\n Logically, this table is consulted twice during the\n processing of any IP packet: once upon its acceptance\n from the L2 entity, and once upon its transmission to\n the L2 entity. In actuality, for cable modems, IP\n filtering is generally the only IP processing done for\n transit traffic. This means that inbound and outbound\n filtering can generally be done at the same time with\n one pass through the filter table.\n\n The objects in this table are only accessible from cable\n devices that are not operating in DiffServ MIB mode\n (RFC 3289). See the conformance section for details.\n\n Note that some devices are required by other\n specifications (e.g., the DOCSIS OSSIv1.1 specification)\n to support the legacy SNMPv1/v2c docsDevFilter mode\n for backward compatibility.\n\n Table entries MUST NOT persist across reboots for any\n device.\n\n This table is deprecated. Instead, use the DiffServ MIB\n from RFC 3289.')
docsDevFilterIpEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpIndex"))
if mibBuilder.loadTexts: docsDevFilterIpEntry.setDescription('Describes a filter to apply to IP traffic received on a\n specified interface. All identity objects in this table\n (e.g., source and destination address/mask, protocol,\n source/dest port, TOS/mask, interface and direction)\n must match their respective fields in the packet for\n any given filter to match.\n\n To create an entry in this table, docsDevFilterIpIfIndex\n must be specified.')
docsDevFilterIpIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevFilterIpIndex.setDescription('Index used to order the application of filters.\n The filter with the lowest index is always applied\n first.')
docsDevFilterIpStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpStatus.setDescription('Controls and reflects the status of rows in this\n table. Specifying only this object (with the\n appropriate index) on a CM is sufficient to create a\n filter row that matches all inbound packets on the\n ethernet interface and results in the packets being\n discarded. docsDevFilterIpIfIndex (at least) must be\n specified on a CMTS to create a row. Creation of the\n rows may be done via either create-and-wait or\n create-and-go, but the filter is not applied until this\n object is set to (or changes to) active. There is no\n restriction in changing any object in a row while this\n object is set to active.')
docsDevFilterIpControl = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("discard", 1), ("accept", 2), ("policy", 3),)).clone('discard')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpControl.setDescription('If set to discard(1), all packets matching this filter\n will be discarded, and scanning of the remainder of the\n filter list will be aborted. If set to accept(2), all\n packets matching this filter will be accepted for\n further processing (e.g., bridging). If\n docsDevFilterIpContinue is set to true, see if there\n are other matches; otherwise, done. If set to\n policy (3), execute the policy entries\n matched by docsDevFilterIpPolicyId in\n docsDevFilterPolicyTable.\n\n If docsDevFilterIpContinue is set to true, continue\n scanning the table for other matches; otherwise, done.')
docsDevFilterIpIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpIfIndex.setDescription("The entry interface to which this filter applies. The\n value corresponds to ifIndex for either a CATV MAC or\n another interface. If the value is zero, the\n filter applies to all interfaces. Default value in CMs\n is the index of the customer-side (e.g., ethernet)\n interface(s). In CMTSes, this object MUST be\n specified to create a row in this table.\n\n Note that according to the DOCSIS OSSIv1.1\n specification, ifIndex '1' in the Cable Modem means\n that this row applies to all CMCI (customer-facing)\n interfaces.")
docsDevFilterIpDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("both", 3),)).clone('inbound')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpDirection.setDescription('Determines whether the filter is applied to inbound(1)\n traffic, outbound(2) traffic, or traffic in both(3)\n directions.')
docsDevFilterIpBroadcast = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpBroadcast.setDescription('If set to true(1), the filter only applies to multicast\n and broadcast traffic. If set to false(2), the filter\n applies to all traffic.')
docsDevFilterIpSaddr = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpSaddr.setDescription('The source IP address, or portion thereof, that is to be\n matched for this filter. The source address is first\n masked (ANDed) against docsDevFilterIpSmask before\n being compared to this value. A value of 0 for this\n object and 0 for the mask matches all IP addresses.')
docsDevFilterIpSmask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 8), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpSmask.setDescription('A bit mask that is to be applied to the source address\n prior to matching. This mask is not necessarily the\n same as a subnet mask, but 1s bits must be leftmost and\n contiguous.')
docsDevFilterIpDaddr = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 9), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpDaddr.setDescription('The destination IP address, or portion thereof, that is\n to be matched for this filter. The destination address\n is first masked (ANDed) against docsDevFilterIpDmask\n before being compared to this value. A value of\n 00000000 for this object and 00000000 for the mask\n matches all IP addresses.')
docsDevFilterIpDmask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 10), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpDmask.setDescription('A bit mask that is to be applied to the destination\n address prior to matching. This mask is not necessarily\n the same as a subnet mask, but 1s bits MUST be leftmost\n and contiguous.')
docsDevFilterIpProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,256)).clone(256)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpProtocol.setDescription('The IP protocol value that is to be matched. For\n example, icmp is 1, tcp is 6, and udp is 17. A value of\n 256 matches ANY protocol.')
docsDevFilterIpSourcePortLow = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpSourcePortLow.setDescription('This is the inclusive lower bound of the transport-layer\n source port range that is to be matched. If the IP\n protocol of the packet is neither UDP nor TCP, this\n\n object is ignored during matching.')
docsDevFilterIpSourcePortHigh = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535)).clone(65535)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpSourcePortHigh.setDescription('This is the inclusive upper bound of the transport-layer\n source port range that is to be matched. If the IP\n protocol of the packet is neither UDP nor TCP, this\n object is ignored during matching.')
docsDevFilterIpDestPortLow = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpDestPortLow.setDescription('This is the inclusive lower bound of the transport-layer\n destination port range that is to be matched. If the IP\n protocol of the packet is neither UDP nor TCP, this\n object is ignored during matching.')
docsDevFilterIpDestPortHigh = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535)).clone(65535)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpDestPortHigh.setDescription('This is the inclusive upper bound of the transport-layer\n destination port range that is to be matched. If the IP\n protocol of the packet is neither UDP nor TCP, this\n object is ignored during matching.')
docsDevFilterIpMatches = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 16), ZeroBasedCounter32()).setUnits('matches').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevFilterIpMatches.setDescription('Counts the number of times this filter was matched.\n This object is initialized to 0 at boot, or at row\n creation, and is reset only upon reboot.')
docsDevFilterIpTos = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,1)).setFixedLength(1).clone(hexValue="00")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpTos.setDescription("This is the value to be matched to the packet's\n TOS (Type of Service) value (after the TOS value\n is ANDed with docsDevFilterIpTosMask). A value for this\n object of 0 and a mask of 0 matches all TOS values.")
docsDevFilterIpTosMask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 18), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,1)).setFixedLength(1).clone(hexValue="00")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpTosMask.setDescription("The mask to be applied to the packet's TOS value before\n matching.")
docsDevFilterIpContinue = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 19), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpContinue.setDescription('If this value is set to true and docsDevFilterIpControl\n is anything but discard (1), continue scanning and\n applying policies. See Section 3.3.3 for more\n details.')
docsDevFilterIpPolicyId = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 4, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterIpPolicyId.setDescription('This object points to an entry in\n docsDevFilterPolicyTable. If docsDevFilterIpControl\n\n is set to policy (3), execute all matching policies\n in docsDevFilterPolicyTable. If no matching policy\n exists, treat as if docsDevFilterIpControl were set\n to accept (1). If this object is set to the value of\n 0, there is no matching policy, and\n docsDevFilterPolicyTable MUST NOT be consulted.')
docsDevFilterPolicyTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 6, 5), )
if mibBuilder.loadTexts: docsDevFilterPolicyTable.setDescription('A Table that maps between a policy group ID and a set\n of pointers to policies to be applied. All rows with\n the same docsDevFilterPolicyId are part of the same\n group of policy pointers and are applied in the order\n in this table. docsDevFilterPolicyTable exists to\n allow multiple policy actions (referenced by policy\n pointers) to be applied to any given classified packet.\n The policy actions are applied in index order.\n For example:\n\n Index ID Type Action\n 1 1 TOS 1\n 9 5 TOS 1\n 12 1 IPSEC 3\n\n This says that a packet that matches a filter with\n policy id 1 first has TOS policy 1 applied (which might\n set the TOS bits to enable a higher priority) and next\n has the IPSEC policy 3 applied (which may result in the\n packets being dumped into a secure VPN to a remote\n encryptor).\n\n Policy ID 0 is reserved for default actions and is\n applied only to packets that match no filters in\n docsDevFilterIpTable.\n\n Table entries MUST NOT persist across reboots for any\n device.\n\n This table is deprecated. Instead, use the DiffServ MIB\n\n from RFC 3289.')
docsDevFilterPolicyEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 6, 5, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevFilterPolicyIndex"))
if mibBuilder.loadTexts: docsDevFilterPolicyEntry.setDescription('An entry in the docsDevFilterPolicyTable. Entries are\n created by Network Management. To create an entry,\n docsDevFilterPolicyId MUST be specified.')
docsDevFilterPolicyIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevFilterPolicyIndex.setDescription('Index value for the table.')
docsDevFilterPolicyId = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterPolicyId.setDescription('Policy ID for this entry. If a policy ID can apply to\n multiple rows of this table, all relevant policies are\n executed. Policy 0 (if populated) is applied to all\n packets that do not match any of the filters. N.B. If\n docsDevFilterIpPolicyId is set to 0, it DOES NOT match\n policy 0 of this table.')
docsDevFilterPolicyStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 5, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterPolicyStatus.setDescription('Object used to create an entry in this table. There is\n no restriction in changing any object in a row while\n this object is set to active.\n The following object MUST have a valid value before this\n object can be set to active: docsDevFilterPolicyPtr.')
docsDevFilterPolicyPtr = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 5, 1, 6), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterPolicyPtr.setDescription('This object points to a row in an applicable filter\n policy table. Currently, the only standard policy\n table is docsDevFilterTosTable.\n\n Per the textual convention, this object points to the\n first accessible object in the row; e.g., to point to a\n row in docsDevFilterTosTable with an index of 21, the\n value of this object would be the object identifier\n docsDevTosStatus.21.\n\n Vendors are recommended to adhere to the same convention\n when adding vendor-specific policy table extensions.\n\n If this pointer references an empty or non-existent\n row, then no policy action is taken.\n\n The default upon row creation is a null pointer that\n results in no policy action being taken.')
docsDevFilterTosTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 6, 6), )
if mibBuilder.loadTexts: docsDevFilterTosTable.setDescription('Table used to describe Type of Service (TOS) bits\n\n processing.\n\n This table is an adjunct to the docsDevFilterIpTable\n and the docsDevFilterPolicy table. Entries in the\n latter table can point to specific rows in this (and\n other) tables and cause specific actions to be taken.\n This table permits the manipulation of the value of the\n Type of Service bits in the IP header of the matched\n packet as follows:\n\n Set the tosBits of the packet to\n (tosBits & docsDevFilterTosAndMask) |\n docsDevFilterTosOrMask\n\n This construct allows you to do a clear and set of all\n the TOS bits in a flexible manner.\n\n Table entries MUST NOT persist across reboots for any\n device.\n\n This table is deprecated. Instead, use the DiffServ MIB\n from RFC 3289.')
docsDevFilterTosEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 6, 6, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevFilterTosIndex"))
if mibBuilder.loadTexts: docsDevFilterTosEntry.setDescription('A TOS policy entry.')
docsDevFilterTosIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647)))
if mibBuilder.loadTexts: docsDevFilterTosIndex.setDescription('The unique index for this row. There are no ordering\n requirements for this table, and any valid index may be\n specified.')
docsDevFilterTosStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 6, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterTosStatus.setDescription('The object used to create and delete entries in this\n table. A row created by specifying just this object\n results in a row that specifies no change to the TOS\n bits. A row may be created using either the\n create-and-go or create-and-wait paradigms. There is\n no restriction on the ability to change values in this\n row while the row is active.')
docsDevFilterTosAndMask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 6, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,1)).setFixedLength(1).clone(hexValue="ff")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterTosAndMask.setDescription("This value is bitwise ANDed with the matched packet's\n TOS bits.")
docsDevFilterTosOrMask = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 6, 6, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,1)).setFixedLength(1).clone(hexValue="00")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevFilterTosOrMask.setDescription("This value is bitwise ORed with the result from the\n AND procedure (tosBits & docsDevFilterTosAndMask).\n The result then replaces the packet's TOS bits.")
docsDevCpe = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 1, 7))
docsDevCpeEnroll = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 7, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("none", 1), ("any", 2),)).clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevCpeEnroll.setDescription('This object controls the population of\n docsDevFilterCpeTable.\n If set to none, the filters must be set manually\n by a network management action (either configuration\n or SNMP set).\n If set to any, the CM wiretaps the packets originating\n from the ethernet and enrolls up to docsDevCpeIpMax\n addresses as based on the source IPv4 or v6 addresses of\n those packets.')
docsDevCpeIpMax = MibScalar((1, 3, 6, 1, 2, 1, 69, 1, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1,2147483647)).clone(-1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsDevCpeIpMax.setDescription('This object controls the maximum number of CPEs allowed\n to be learned behind this device. If set to zero, any\n number of CPEs may connect up to the maximum permitted\n for the device.\n If set to -1, no filtering is done on CPE source\n addresses, and no entries are made in the\n docsDevFilterCpeTable via learning. If an attempt is\n made to set this to a number greater than that\n permitted for the device, it is set to that maximum.')
docsDevCpeTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 7, 3), )
if mibBuilder.loadTexts: docsDevCpeTable.setDescription('This table lists the IPv4 addresses seen (or permitted)\n as source addresses in packets originating from the\n customer interface on this device. In addition, this\n table can be provisioned with the specific addresses\n permitted for the CPEs via the normal row creation\n mechanisms. Table entries MUST NOT persist across\n reboots for any device.\n\n N.B. Management action can add entries in this table\n and in docsDevCpeIpTable past the value of\n\n docsDevCpeIpMax. docsDevCpeIpMax ONLY restricts the\n ability of the CM to add learned addresses\n automatically.\n\n This table is deprecated and is replaced by\n docsDevCpeInetTable.')
docsDevCpeEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 7, 3, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevCpeIp"))
if mibBuilder.loadTexts: docsDevCpeEntry.setDescription('An entry in the docsDevFilterCpeTable. There is one\n entry for each IPv4 CPE seen or provisioned. If\n docsDevCpeIpMax is set to -1, this table is ignored;\n otherwise, upon receipt of an IP packet from the\n customer interface of the CM, the source IP address is\n checked against this table. If the address is in the\n table, packet processing continues. If the address is\n not in the table but docsDevCpeEnroll is set to any\n and the sum of the table sizes of docsDevCpeTable and\n docsDevCpeInetTable is less than docsDevCpeIpMax, the\n address is added to the table, and packet processing\n continues. Otherwise, the packet is dropped.\n\n The filtering actions specified by this table occur\n after any LLC filtering (docsDevFilterLLCTable), but\n prior to any IP filtering (docsDevFilterIpTable,\n docsDevNmAccessTable).')
docsDevCpeIp = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 3, 1, 1), IpAddress())
if mibBuilder.loadTexts: docsDevCpeIp.setDescription('The IPv4 address to which this entry applies.\n\n N.B. Attempts to set all zeros or all ones address\n values MUST be rejected.')
docsDevCpeSource = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("other", 1), ("manual", 2), ("learned", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevCpeSource.setDescription('This object describes how this entry was created. If\n the value is manual(2), this row was created by a\n network management action (either configuration or\n SNMP set). If set to learned(3), then it was found via\n looking at the source IPv4 address of a received packet.\n The value other(1) is used for any entries that do not\n meet manual(2) or learned(3) criteria.')
docsDevCpeStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 3, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevCpeStatus.setDescription('Standard object to manipulate rows. To create a row in\n this table, one only needs to specify this object.\n Management stations SHOULD use the create-and-go\n mechanism for creating rows in this table.')
docsDevCpeInetTable = MibTable((1, 3, 6, 1, 2, 1, 69, 1, 7, 4), )
if mibBuilder.loadTexts: docsDevCpeInetTable.setDescription('This table lists the IP addresses seen (or permitted) as\n source addresses in packets originating from the\n customer interface on this device. In addition, this\n table can be provisioned with the specific addresses\n permitted for the CPEs via the normal row creation\n mechanisms.\n\n N.B. Management action can add entries in this table\n and in docsDevCpeIpTable past the value of\n docsDevCpeIpMax. docsDevCpeIpMax ONLY restricts the\n ability of the CM to add learned addresses\n automatically.\n\n Table entries MUST NOT persist across reboots for any\n device.\n\n This table exactly mirrors docsDevCpeTable and applies\n to IPv4 and IPv6 addresses.')
docsDevCpeInetEntry = MibTableRow((1, 3, 6, 1, 2, 1, 69, 1, 7, 4, 1), ).setIndexNames((0, "DOCS-CABLE-DEVICE-MIB", "docsDevCpeInetType"), (0, "DOCS-CABLE-DEVICE-MIB", "docsDevCpeInetAddr"))
if mibBuilder.loadTexts: docsDevCpeInetEntry.setDescription('An entry in the docsDevFilterCpeInetTable. There is one\n entry for each IP CPE seen or provisioned. If\n docsDevCpeIpMax is set to -1, this table is ignored;\n otherwise, upon receipt of an IP packet from the\n customer interface of the CM, the source IP address is\n checked against this table. If the address is in the\n table, packet processing continues. If the address is\n not in the table but docsDevCpeEnroll is set to any and\n the sum of the table sizes for docsDevCpeTable and\n docsDevCpeInetTable is less than docsDevCpeIpMax, the\n address is added to the table, and packet processing\n continues. Otherwise, the packet is dropped.\n\n The filtering actions specified by this table occur\n after any LLC filtering (docsDevFilterLLCTable), but\n prior to any IP filtering (docsDevFilterIpTable,\n docsDevNmAccessTable).\n\n When an agent (cable modem) restarts, then all\n dynamically created rows are lost.')
docsDevCpeInetType = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 4, 1, 1), InetAddressType())
if mibBuilder.loadTexts: docsDevCpeInetType.setDescription('The type of internet address of docsDevCpeInetAddr.')
docsDevCpeInetAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 4, 1, 2), InetAddress())
if mibBuilder.loadTexts: docsDevCpeInetAddr.setDescription('The Internet address to which this entry applies.\n\n Implementors need to be aware that if the size of\n docsDevCpeInetAddr exceeds 114 octets OIDs of\n instances of columns in this row will have more\n than 128 sub-identifiers and cannot be accessed\n using SNMPv1, SNMPv2c, or SNMPv3. Only unicast\n address are allowed for this object.')
docsDevCpeInetSource = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3,))).clone(namedValues=NamedValues(("manual", 2), ("learned", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsDevCpeInetSource.setDescription('This object describes how this entry was created. If\n the value is manual(2), this row was created by a\n network management action (either configuration or\n SNMP set). If set to learned(3), then it was found\n via looking at the source IP address of a received\n packet.')
docsDevCpeInetRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 69, 1, 7, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsDevCpeInetRowStatus.setDescription('Standard object to manipulate rows. To create a row in\n this table, one only needs to specify this object.\n Management stations SHOULD use the create-and-go\n mechanism for creating rows in this table.')
docsDevNotification = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 2))
docsDevNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 0))
docsDevConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 3))
docsDevGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 3, 1))
docsDevCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 3, 2))
docsDevBasicCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 69, 3, 2, 1)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevBaseGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEventGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSoftwareGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeGroup"),))
if mibBuilder.loadTexts: docsDevBasicCompliance.setDescription('The RFC 2669 compliance statement for MCNS/DOCSIS\n Cable Modems and Cable Modem Termination Systems.')
docsDevBaseGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 1)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevRole"), ("DOCS-CABLE-DEVICE-MIB", "docsDevDateTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevResetNow"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSerialNumber"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSTPControl"),))
if mibBuilder.loadTexts: docsDevBaseGroup.setDescription('A collection of objects providing device status and\n control.')
docsDevNmAccessGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 2)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessIp"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessIpMask"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessCommunity"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessControl"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessInterfaces"), ("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessStatus"),))
if mibBuilder.loadTexts: docsDevNmAccessGroup.setDescription('A collection of objects for controlling access to SNMP\n objects on cable devices.\n\n This group has been deprecated because all the\n objects have been deprecated in favor of SNMPv3 and\n Coexistence MIBs.')
docsDevSoftwareGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 3)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevSwServer"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwFilename"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwAdminStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwOperStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwCurrentVers"),))
if mibBuilder.loadTexts: docsDevSoftwareGroup.setDescription('A collection of objects for controlling software\n downloads.\n\n This group has been deprecated and replaced by\n docsDevSoftwareGroupV2. Object docsDevSwServer\n has been replaced by docsDevSwServerAddressType\n and docsDevSwServerAddress, and\n docsDevSwServerTransportProtocol has been added to\n support TFTP and HTTP firmware downloads.')
docsDevServerGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 4)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevServerBootState"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerDhcp"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerTftp"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerConfigFile"),))
if mibBuilder.loadTexts: docsDevServerGroup.setDescription('A collection of objects providing status about server\n provisioning.\n\n This group has been deprecated and replaced by\n docsDevServerGroupV2. The objects docsDevServerDhcp,\n docsDevServerTime, and docsDevServerTftp have\n been replaced by docsDevServerDhcpAddressType,\n docsDevServerDhcpAddress, docsDevServerTimeAddressType,\n docsDevServerTimeAddress,\n docsDevServerConfigTftpAddressType, and\n docsDevServerConfigTftpAddress.')
docsDevEventGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 5)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevEvControl"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvSyslog"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleAdminStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleInhibited"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleThreshold"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleInterval"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvReporting"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvFirstTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvLastTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvCounts"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvLevel"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvId"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvText"),))
if mibBuilder.loadTexts: docsDevEventGroup.setDescription('A collection of objects used to control and monitor\n events.\n\n This group has been deprecated and replaced by\n docsDevEventGroupV2. The object docsDevEvSyslog has\n\n been replaced by docsDevEvSyslogAddressType and\n docsDevEvSyslogAddress, and docsDevEvThrottleInhibited\n has been replaced by\n docsDevEvThrottleThresholdExceeded.')
docsDevFilterGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 6)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCUnmatchedAction"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDefault"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCIfIndex"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCProtocolType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCProtocol"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCMatches"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpControl"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpIfIndex"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDirection"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpBroadcast"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpSaddr"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpSmask"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDaddr"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDmask"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpProtocol"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpSourcePortLow"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpSourcePortHigh"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDestPortLow"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpDestPortHigh"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpMatches"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpTos"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpTosMask"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpContinue"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterIpPolicyId"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterPolicyId"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterPolicyStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterPolicyPtr"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterTosStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterTosAndMask"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterTosOrMask"),))
if mibBuilder.loadTexts: docsDevFilterGroup.setDescription('A collection of objects to specify filters at the link\n layer and IPv4 layer.\n\n This group has been deprecated and replaced by various\n groups from the DiffServ MIB.')
docsDevCpeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 1, 7)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevCpeEnroll"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeIpMax"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeSource"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeStatus"),))
if mibBuilder.loadTexts: docsDevCpeGroup.setDescription('A collection of objects used to control the number\n and specific values of IPv4 addresses allowed for\n associated Customer Premises Equipment (CPE).\n\n This group has been deprecated and replaced by\n docsDevInetCpeGroup. The object docsDevCpeSource has\n been replaced by docsDevCpeInetSource, and\n docsDevCpeStatus has been replaced by\n docsDevCpeInetRowStatus.')
docsDevGroupsV2 = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 3, 3))
docsDevCompliancesV2 = MibIdentifier((1, 3, 6, 1, 2, 1, 69, 3, 4))
docsDevCmCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 69, 3, 4, 1)).setObjects(*(("DIFFSERV-MIB", "diffServMIBDataPathGroup"), ("DIFFSERV-MIB", "diffServMIBClfrGroup"), ("DIFFSERV-MIB", "diffServMIBClfrElementGroup"), ("DIFFSERV-MIB", "diffServMIBMultiFieldClfrGroup"), ("DIFFSERV-MIB", "diffServMIBActionGroup"), ("DIFFSERV-MIB", "diffServMIBDscpMarkActGroup"), ("DIFFSERV-MIB", "diffServMIBCounterGroup"), ("DIFFSERV-MIB", "diffServMIBAlgDropGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevBaseGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevBaseIgmpGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevBaseMaxCpeGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSoftwareGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEventGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevInetCpeGroup"),))
if mibBuilder.loadTexts: docsDevCmCompliance.setDescription('The compliance statement for DOCSIS Cable Modems.\n\n This compliance statement applies to implementations\n of DOCSIS versions that are not IPv6 capable.')
docsDevCmtsCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 69, 3, 4, 2)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevBaseGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevBaseIgmpGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevBaseMaxCpeGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSoftwareGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEventGroupV2"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCGroup"), ("DOCS-CABLE-DEVICE-MIB", "docsDevInetCpeGroup"),))
if mibBuilder.loadTexts: docsDevCmtsCompliance.setDescription('The compliance statement for DOCSIS Cable Modem\n Termination Systems.\n\n This compliance statement applies to implementations\n of DOCSIS versions that are not IPv6 capable.')
docsDevBaseIgmpGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 1)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevIgmpModeControl"),))
if mibBuilder.loadTexts: docsDevBaseIgmpGroup.setDescription('An object providing cable device IGMP status and\n control.')
docsDevBaseMaxCpeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 2)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevMaxCpe"),))
if mibBuilder.loadTexts: docsDevBaseMaxCpeGroup.setDescription('An object providing management of the maximum number of\n CPEs permitted access through a cable modem.')
docsDevNmAccessExtGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 3)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevNmAccessTrapVersion"),))
if mibBuilder.loadTexts: docsDevNmAccessExtGroup.setDescription('An object, in addition to the objects in\n docsDevNmAccessGroup, for controlling access to\n SNMP objects on cable devices.\n\n This group is included in this MIB due to existing\n implementations of docsDevNmAccessTrapVersion in\n DOCSIS cable modems.\n\n This group has been deprecated because the object has\n been deprecated in favor of SNMPv3 and Coexistence\n MIBs.')
docsDevSoftwareGroupV2 = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 4)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevSwFilename"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwAdminStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwOperStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwCurrentVers"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwServerAddressType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwServerAddress"), ("DOCS-CABLE-DEVICE-MIB", "docsDevSwServerTransportProtocol"),))
if mibBuilder.loadTexts: docsDevSoftwareGroupV2.setDescription('A collection of objects for controlling software\n downloads. This group replaces docsDevSoftwareGroup.')
docsDevServerGroupV2 = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 5)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevServerBootState"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerDhcpAddressType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerDhcpAddress"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerTimeAddressType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerTimeAddress"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerConfigTftpAddressType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerConfigTftpAddress"), ("DOCS-CABLE-DEVICE-MIB", "docsDevServerConfigFile"),))
if mibBuilder.loadTexts: docsDevServerGroupV2.setDescription('A collection of objects providing status about server\n provisioning. This group replaces docsDevServerGroup.')
docsDevEventGroupV2 = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 6)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevEvControl"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleAdminStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleThreshold"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleInterval"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvReporting"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvFirstTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvLastTime"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvCounts"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvLevel"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvId"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvText"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvSyslogAddressType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvSyslogAddress"), ("DOCS-CABLE-DEVICE-MIB", "docsDevEvThrottleThresholdExceeded"),))
if mibBuilder.loadTexts: docsDevEventGroupV2.setDescription('A collection of objects used to control and monitor\n events. This group replaces docsDevEventGroup.\n The event reporting mechanism, and more specifically\n docsDevEvReporting, can be used to take advantage of\n the event reporting features of RFC3413 and RFC3014.')
docsDevFilterLLCGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 7)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCUnmatchedAction"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCStatus"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCIfIndex"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCProtocolType"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCProtocol"), ("DOCS-CABLE-DEVICE-MIB", "docsDevFilterLLCMatches"),))
if mibBuilder.loadTexts: docsDevFilterLLCGroup.setDescription('A collection of objects to specify link layer filters.')
docsDevInetCpeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 69, 3, 3, 8)).setObjects(*(("DOCS-CABLE-DEVICE-MIB", "docsDevCpeEnroll"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeIpMax"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeInetSource"), ("DOCS-CABLE-DEVICE-MIB", "docsDevCpeInetRowStatus"),))
if mibBuilder.loadTexts: docsDevInetCpeGroup.setDescription('A collection of objects used to control the number\n and specific values of Internet (e.g., IPv4 and IPv6)\n addresses allowed for associated Customer Premises\n Equipment (CPE).')
mibBuilder.exportSymbols("DOCS-CABLE-DEVICE-MIB", docsDevBase=docsDevBase, docsDevEventEntry=docsDevEventEntry, docsDevNmAccessGroup=docsDevNmAccessGroup, docsDevFilterLLCMatches=docsDevFilterLLCMatches, docsDevCpeTable=docsDevCpeTable, docsDevNmAccessControl=docsDevNmAccessControl, docsDevCpeInetSource=docsDevCpeInetSource, docsDevFilterIpTable=docsDevFilterIpTable, docsDevCmtsCompliance=docsDevCmtsCompliance, docsDevBasicCompliance=docsDevBasicCompliance, docsDevServerDhcpAddress=docsDevServerDhcpAddress, docsDevNotification=docsDevNotification, docsDevFilterLLCEntry=docsDevFilterLLCEntry, docsDevGroups=docsDevGroups, docsDevFilterIpSmask=docsDevFilterIpSmask, docsDevServerBootState=docsDevServerBootState, docsDevFilterLLCTable=docsDevFilterLLCTable, docsDevEvSyslog=docsDevEvSyslog, docsDevEvThrottleInhibited=docsDevEvThrottleInhibited, docsDevServerGroupV2=docsDevServerGroupV2, docsDevEventGroupV2=docsDevEventGroupV2, docsDevCpeGroup=docsDevCpeGroup, docsDevNmAccessInterfaces=docsDevNmAccessInterfaces, docsDevEvId=docsDevEvId, docsDevFilterIpDefault=docsDevFilterIpDefault, docsDevCompliances=docsDevCompliances, docsDevFilterPolicyTable=docsDevFilterPolicyTable, docsDevEventTable=docsDevEventTable, docsDevFilterLLCIfIndex=docsDevFilterLLCIfIndex, docsDevNotifications=docsDevNotifications, docsDevEvReporting=docsDevEvReporting, docsDevServerConfigFile=docsDevServerConfigFile, docsDevFilterLLCProtocol=docsDevFilterLLCProtocol, docsDevCpeInetType=docsDevCpeInetType, docsDevNmAccessIp=docsDevNmAccessIp, docsDevConformance=docsDevConformance, docsDevCpeInetEntry=docsDevCpeInetEntry, docsDevServerTimeAddressType=docsDevServerTimeAddressType, docsDevEvControl=docsDevEvControl, docsDevCmCompliance=docsDevCmCompliance, docsDevEvLastTime=docsDevEvLastTime, docsDevSwServerAddressType=docsDevSwServerAddressType, docsDevNmAccessIndex=docsDevNmAccessIndex, docsDevFilterTosTable=docsDevFilterTosTable, docsDevFilterIpSourcePortLow=docsDevFilterIpSourcePortLow, docsDevFilterTosIndex=docsDevFilterTosIndex, docsDevNmAccessTrapVersion=docsDevNmAccessTrapVersion, docsDevFilterIpControl=docsDevFilterIpControl, docsDevSwFilename=docsDevSwFilename, docsDevFilterTosAndMask=docsDevFilterTosAndMask, docsDevMIBObjects=docsDevMIBObjects, docsDevNmAccessExtGroup=docsDevNmAccessExtGroup, docsDevSwCurrentVers=docsDevSwCurrentVers, docsDevCpeInetAddr=docsDevCpeInetAddr, docsDevFilterTosStatus=docsDevFilterTosStatus, docsDevEvThrottleInterval=docsDevEvThrottleInterval, docsDevGroupsV2=docsDevGroupsV2, docsDevEvControlTable=docsDevEvControlTable, docsDevFilterGroup=docsDevFilterGroup, docsDevFilterLLCStatus=docsDevFilterLLCStatus, docsDevNmAccessTable=docsDevNmAccessTable, docsDevSwServer=docsDevSwServer, docsDevFilterIpDestPortHigh=docsDevFilterIpDestPortHigh, docsDevServerGroup=docsDevServerGroup, docsDevNmAccessIpMask=docsDevNmAccessIpMask, docsDevSTPControl=docsDevSTPControl, docsDevEvControlEntry=docsDevEvControlEntry, docsDevMaxCpe=docsDevMaxCpe, PYSNMP_MODULE_ID=docsDev, docsDevEvent=docsDevEvent, docsDevEvFirstTime=docsDevEvFirstTime, docsDevNmAccessStatus=docsDevNmAccessStatus, docsDevSerialNumber=docsDevSerialNumber, docsDevEvThrottleThreshold=docsDevEvThrottleThreshold, docsDevCpeIp=docsDevCpeIp, docsDev=docsDev, docsDevCpeEnroll=docsDevCpeEnroll, docsDevBaseMaxCpeGroup=docsDevBaseMaxCpeGroup, docsDevEventGroup=docsDevEventGroup, docsDevBaseIgmpGroup=docsDevBaseIgmpGroup, docsDevFilterIpEntry=docsDevFilterIpEntry, docsDevSoftwareGroup=docsDevSoftwareGroup, docsDevEvPriority=docsDevEvPriority, docsDevEvThrottleThresholdExceeded=docsDevEvThrottleThresholdExceeded, docsDevEvSyslogAddress=docsDevEvSyslogAddress, docsDevNmAccessCommunity=docsDevNmAccessCommunity, docsDevEvThrottleAdminStatus=docsDevEvThrottleAdminStatus, docsDevEvCounts=docsDevEvCounts, docsDevFilterIpIndex=docsDevFilterIpIndex, docsDevResetNow=docsDevResetNow, docsDevSwServerAddress=docsDevSwServerAddress, docsDevNmAccessEntry=docsDevNmAccessEntry, docsDevDateTime=docsDevDateTime, docsDevSwServerTransportProtocol=docsDevSwServerTransportProtocol, docsDevServerTime=docsDevServerTime, docsDevServerDhcpAddressType=docsDevServerDhcpAddressType, docsDevInetCpeGroup=docsDevInetCpeGroup, docsDevServerConfigTftpAddress=docsDevServerConfigTftpAddress, docsDevFilterIpBroadcast=docsDevFilterIpBroadcast, docsDevFilterTosEntry=docsDevFilterTosEntry, docsDevRole=docsDevRole, docsDevFilterIpDaddr=docsDevFilterIpDaddr, docsDevFilterIpMatches=docsDevFilterIpMatches, docsDevCompliancesV2=docsDevCompliancesV2, docsDevEvLevel=docsDevEvLevel, docsDevFilterIpContinue=docsDevFilterIpContinue, docsDevCpe=docsDevCpe, docsDevEvSyslogAddressType=docsDevEvSyslogAddressType, docsDevServerDhcp=docsDevServerDhcp, docsDevFilterIpDestPortLow=docsDevFilterIpDestPortLow, docsDevFilterPolicyIndex=docsDevFilterPolicyIndex, docsDevServerTftp=docsDevServerTftp, docsDevFilterPolicyEntry=docsDevFilterPolicyEntry, docsDevFilterLLCProtocolType=docsDevFilterLLCProtocolType, docsDevEvIndex=docsDevEvIndex, docsDevFilterPolicyStatus=docsDevFilterPolicyStatus, docsDevServerConfigTftpAddressType=docsDevServerConfigTftpAddressType, docsDevFilterPolicyPtr=docsDevFilterPolicyPtr, docsDevSoftwareGroupV2=docsDevSoftwareGroupV2, docsDevServer=docsDevServer, docsDevFilterLLCIndex=docsDevFilterLLCIndex, docsDevEvText=docsDevEvText, docsDevCpeIpMax=docsDevCpeIpMax, docsDevFilterIpSourcePortHigh=docsDevFilterIpSourcePortHigh, docsDevBaseGroup=docsDevBaseGroup, docsDevFilterPolicyId=docsDevFilterPolicyId, docsDevFilterLLCUnmatchedAction=docsDevFilterLLCUnmatchedAction, docsDevFilterIpSaddr=docsDevFilterIpSaddr, docsDevFilterTosOrMask=docsDevFilterTosOrMask, docsDevCpeEntry=docsDevCpeEntry, docsDevFilterIpIfIndex=docsDevFilterIpIfIndex, docsDevFilterIpPolicyId=docsDevFilterIpPolicyId, docsDevCpeInetRowStatus=docsDevCpeInetRowStatus, docsDevServerTimeAddress=docsDevServerTimeAddress, docsDevSoftware=docsDevSoftware, docsDevFilterIpTos=docsDevFilterIpTos, docsDevCpeSource=docsDevCpeSource, docsDevFilterIpProtocol=docsDevFilterIpProtocol, docsDevIgmpModeControl=docsDevIgmpModeControl, docsDevCpeStatus=docsDevCpeStatus, docsDevFilterIpTosMask=docsDevFilterIpTosMask, docsDevFilterIpDmask=docsDevFilterIpDmask, docsDevSwOperStatus=docsDevSwOperStatus, docsDevFilterLLCGroup=docsDevFilterLLCGroup, docsDevFilter=docsDevFilter, docsDevSwAdminStatus=docsDevSwAdminStatus, docsDevFilterIpStatus=docsDevFilterIpStatus, docsDevFilterIpDirection=docsDevFilterIpDirection, docsDevCpeInetTable=docsDevCpeInetTable)
| 350.042623
| 6,497
| 0.704139
|
c6e04bc0342dca732e1080ec65ab77a24c9e06e4
| 11,513
|
py
|
Python
|
samples/openapi3/client/petstore/python/petstore_api/model/some_object_with_self_attr.py
|
rotty3000/openapi-generator
|
40d3331e789412d8f42df0148cc089a9d330b759
|
[
"Apache-2.0"
] | 1
|
2022-01-11T15:49:34.000Z
|
2022-01-11T15:49:34.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/some_object_with_self_attr.py
|
rotty3000/openapi-generator
|
40d3331e789412d8f42df0148cc089a9d330b759
|
[
"Apache-2.0"
] | 9
|
2021-11-01T08:59:31.000Z
|
2022-03-31T08:31:57.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/some_object_with_self_attr.py
|
rotty3000/openapi-generator
|
40d3331e789412d8f42df0148cc089a9d330b759
|
[
"Apache-2.0"
] | 1
|
2022-02-19T21:56:04.000Z
|
2022-02-19T21:56:04.000Z
|
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
class SomeObjectWithSelfAttr(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'_self': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_self': 'self', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SomeObjectWithSelfAttr - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_self (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SomeObjectWithSelfAttr - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_self (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.609848
| 174
| 0.560149
|
f328284d63a7544b1d3bb113e149cf446d9cd2ce
| 2,492
|
py
|
Python
|
src/the_tale/the_tale/blogs/migrations/0001_initial.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-02T11:51:20.000Z
|
2020-04-02T11:51:20.000Z
|
src/the_tale/the_tale/blogs/migrations/0001_initial.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/blogs/migrations/0001_initial.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models, migrations
import rels.django
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('caption', models.CharField(max_length=256)),
('text', models.TextField(default=b'', blank=True)),
('state', rels.django.RelationIntegerField(db_index=True)),
('votes', models.IntegerField(default=0)),
('author', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('forum_thread', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='forum.Thread', null=True)),
('moderator', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'permissions': (('moderate_post', '\u041c\u043e\u0436\u0435\u0442 \u0440\u0435\u0434\u0430\u043a\u0442\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0435\u0439'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(related_name='+', to='blogs.Post')),
('voter', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([('voter', 'post')]),
),
]
| 46.148148
| 299
| 0.60634
|
5a598cef6f499f4f2bcb63e8cf4de64ebac72c7a
| 4,224
|
py
|
Python
|
build_windows/gui_interface.py
|
grigorievich/Viy
|
c78cfb55e548ce40b9fe3756e4ce2a923b84a552
|
[
"Apache-2.0"
] | 14
|
2017-09-08T09:20:37.000Z
|
2020-09-20T10:56:22.000Z
|
build_windows/gui_interface.py
|
grigorievich/Viy
|
c78cfb55e548ce40b9fe3756e4ce2a923b84a552
|
[
"Apache-2.0"
] | null | null | null |
build_windows/gui_interface.py
|
grigorievich/Viy
|
c78cfb55e548ce40b9fe3756e4ce2a923b84a552
|
[
"Apache-2.0"
] | 2
|
2018-02-14T16:52:33.000Z
|
2018-06-20T22:16:02.000Z
|
from __future__ import print_function, division
import math
import ui
import messages_from_ui
class GtkUI(object):
"""Gtk+ UI class."""
def __init__(self, font):
"""Initialize the UI instance."""
pass
def start(self, bridge):
"""Start the UI event loop."""
bridge.attach(80, 24, True)
self._bridge = bridge
def register_poller(method, period):
import time
from threading import Thread
def poller():
while True:
time.sleep(period)
method()
t = Thread(target = poller, daemon = True)
t.start()
def input_poll():
inp = messages_from_ui.check_for_user_input()
if inp != '':
self._bridge.input(inp)
def resize_poll():
new_size = messages_from_ui.check_for_resize()
'''expects (cols, rows) format'''
if new_size[0] != -1:
self._bridge.resize(new_size[0], new_size[1])
def quit_poll():
need_to_quit = messages_from_ui.check_for_quit()
if need_to_quit == 1:
print("calling self._bridge.exit()")
self._bridge.exit()
register_poller(input_poll, 0.001)
register_poller(resize_poll, 0.1)
register_poller(quit_poll, 0.1)
def quit(self):
ui.quit()
def switch_to_navigator(self):
ui.switch_to_navigator()
def _nvim_resize(self, columns, rows):
'''expects (rows, columns) format'''
ui.resize(rows, columns)
def _nvim_clear(self):
ui.clear()
def _nvim_lock_update_mutex(self):
ui.lock_update_mutex()
def _nvim_unlock_update_mutex(self):
ui.unlock_update_mutex()
def _nvim_redraw(self):
ui.schedule_redraw()
def _nvim_eol_clear(self):
ui.eol_clear()
def _nvim_cursor_goto(self, row, col):
ui.cursor_goto(row, col)
def _nvim_busy_start(self):
pass
def _nvim_busy_stop(self):
pass
def _nvim_mouse_on(self):
pass
def _nvim_mouse_off(self):
pass
def _nvim_mode_change(self, mode):
if mode == 'normal':
ui.change_mode(0)
if mode == 'insert':
ui.change_mode(1)
def _nvim_set_scroll_region(self, top, bot, left, right):
ui.set_scroll_region(top, bot, left, right)
def _nvim_scroll(self, count):
ui.scroll(count)
def _nvim_highlight_set(self, attrs):
if attrs == {}:
'''reset highlight set'''
ui.highlight_set(-1, 0)
return
attributeIDs = {'bold' : 0,
'underline' : 1,
'undercurl' : 2,
'italic' : 3,
'reverse' : 4,
'foreground' : 5,
'background' : 6,
'special' : 7}
for attr, value in attrs.items():
if (type(value) == bool):
if (value == True):
cvalue = 1
else:
cvalue = 0
ui.highlight_set(attributeIDs[attr], cvalue)
#RPC(5, intToBytes(attributeIDs[attr]) + intToBytes(cvalue))
else:
ui.highlight_set(attributeIDs[attr], value)
#RPC(5, intToBytes(attributeIDs[attr]) + intToBytes(value))
def _nvim_put(self, text):
ui.put(text)
def _nvim_bell(self):
pass
def _nvim_visual_bell(self):
pass
def _nvim_update_fg(self, fg):
'''
fg could be negative, but update_fg receives an unsigned int.
The resulting conversion is what is apparently expected
by nvim, so simply passing should work.
'''
if fg < 0: fg = 0
ui.update_fg(fg)
def _nvim_update_bg(self, bg):
#bg could be negative
if bg < 0: bg = 0xFFFFFF
ui.update_bg(bg)
def _nvim_suspend(self):
pass
def _nvim_set_title(self, title):
ui.set_title(title)
def _nvim_set_icon(self, icon):
pass
| 26.236025
| 76
| 0.533617
|
fad35c486de48f087787b3e7b228c44cc14f5e49
| 632
|
py
|
Python
|
nima/train/emd_loss.py
|
jjmao-cs/nima.pytorch
|
62f1dda7b146d47084bb414933380c324487fb4e
|
[
"MIT"
] | 73
|
2020-10-15T06:38:56.000Z
|
2022-03-24T07:32:10.000Z
|
nima/train/emd_loss.py
|
jjmao-cs/nima.pytorch
|
62f1dda7b146d47084bb414933380c324487fb4e
|
[
"MIT"
] | 11
|
2020-12-17T06:21:27.000Z
|
2022-02-04T04:56:57.000Z
|
nima/train/emd_loss.py
|
jjmao-cs/nima.pytorch
|
62f1dda7b146d47084bb414933380c324487fb4e
|
[
"MIT"
] | 10
|
2020-10-16T13:11:34.000Z
|
2022-03-24T12:19:43.000Z
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target: Variable, p_estimate: Variable):
assert p_target.shape == p_estimate.shape
# cdf for values [1, 2, ..., 10]
cdf_target = torch.cumsum(p_target, dim=1)
# cdf for values [1, 2, ..., 10]
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff), 2)))
return samplewise_emd.mean()
| 33.263158
| 82
| 0.653481
|
2c399793654938b9bfa06a8ac3bd0bbc7b8657de
| 1,474
|
py
|
Python
|
python_schools/iterators.py
|
nussbrot/code-exchange
|
3427798c4391e5f9c17cd19399ac56997c091ee2
|
[
"MIT"
] | null | null | null |
python_schools/iterators.py
|
nussbrot/code-exchange
|
3427798c4391e5f9c17cd19399ac56997c091ee2
|
[
"MIT"
] | null | null | null |
python_schools/iterators.py
|
nussbrot/code-exchange
|
3427798c4391e5f9c17cd19399ac56997c091ee2
|
[
"MIT"
] | null | null | null |
# the iterator protocol needs functions __iter__() and __next__()
class iterator_protocol(object):
def __init__(self, *args):
self._data = args
self._idx = 0
def __iter__(self):
return self
def __next__(self):
if self._idx < len(self._data):
tmp = self._data[self._idx]
self._idx += 1
return tmp
else:
raise StopIteration
iterator = iterator_protocol(1, "Suppe", None, 3.12)
for item in iterator:
print(item)
# the same iterator implemented as generator function
def generator(*args):
for item in args:
yield item
for item in generator(1, "Suppe", None, 3.12):
print(item)
# Generators can also be created by putting list comprehensions in round brackets
print("\nSquares")
squares = (x*x for x in range(10))
for item in squares:
print(item)
# iterators also work recursively.
# let's take a look at Guido's binary tree inorder traversal:
def inorder(t):
if t:
for x in inorder(t.left):
yield x
yield t.dat
for x in inorder(t.right):
yield x
class tree(object):
def __init__(self, dat, left=None, right=None):
self.dat = dat
self.left = left
self.right = right
# A small test tree:
# 10
# /\
# 7 13
# /\
# 5 9
my_tree = tree(10, tree(7, tree(5), tree(9)), tree(13))
print("\nBinary tree traversal")
for node in inorder(my_tree):
print(node)
| 21.676471
| 81
| 0.612619
|
42fe8f6b717c18e981e9e6dd510736e9d42f38a2
| 8,310
|
py
|
Python
|
unsupervised_learning/clustering/OPTICS/sample_scratch.py
|
niektuytel/Machine_Learning
|
0cd5656ca8076c383fd81c5e32a49969a20ad042
|
[
"MIT"
] | 11
|
2021-07-05T15:51:35.000Z
|
2022-03-19T15:17:37.000Z
|
unsupervised_learning/clustering/OPTICS/sample_scratch.py
|
niektuytel/Machine_Learning
|
0cd5656ca8076c383fd81c5e32a49969a20ad042
|
[
"MIT"
] | null | null | null |
unsupervised_learning/clustering/OPTICS/sample_scratch.py
|
niektuytel/Machine_Learning
|
0cd5656ca8076c383fd81c5e32a49969a20ad042
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import operator, math
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# data
X = np.array([[15, 39], [15, 81], [16, 6], [16, 77], [17, 40], [17, 76], [18, 6], [18, 94], [19, 3], [19, 72], [19, 14], [19, 99], [20, 15], [20, 77], [20, 13], [20, 79], [21, 35], [21, 66], [23, 29], [23, 98], [24, 35], [24, 73], [25, 5], [25, 73], [28, 14], [28, 82], [28, 32], [28, 61], [29, 31], [29, 87], [30, 4], [30, 73], [33, 4], [33, 92], [33, 14], [33, 81], [34, 17], [34, 73], [37, 26], [37, 75], [38, 35], [38, 92], [39, 36], [39, 61], [39, 28], [39, 65], [40, 55], [40, 47], [40, 42], [40, 42], [42, 52], [42, 60], [43, 54], [43, 60], [43, 45], [43, 41], [44, 50], [44, 46], [46, 51], [46, 46], [46, 56], [46, 55], [47, 52], [47, 59], [48, 51], [48, 59], [48, 50], [48, 48], [48, 59], [48, 47], [49, 55], [49, 42], [50, 49], [50, 56], [54, 47], [54, 54], [54, 53], [54, 48], [54, 52], [54, 42], [54, 51], [54, 55], [54, 41], [54, 44], [54, 57], [54, 46], [57, 58], [57, 55], [58, 60], [58, 46], [59, 55], [59, 41], [60, 49], [60, 40], [60, 42], [60, 52], [60, 47], [60, 50], [61, 42], [61, 49], [62, 41], [62, 48], [62, 59], [62, 55], [62, 56], [62, 42], [63, 50], [63, 46], [63, 43], [63, 48], [63, 52], [63, 54], [64, 42], [64, 46], [65, 48], [65, 50], [65, 43], [65, 59], [67, 43], [67, 57], [67, 56], [67, 40], [69, 58], [69, 91], [70, 29], [70, 77], [71, 35], [71, 95], [71, 11], [71, 75], [71, 9], [71, 75], [72, 34], [72, 71], [73, 5], [73, 88], [73, 7], [73, 73], [74, 10], [74, 72], [75, 5], [75, 93], [76, 40], [76, 87], [77, 12], [77, 97], [77, 36], [77, 74], [78, 22], [78, 90], [78, 17], [78, 88], [78, 20], [78, 76], [78, 16], [78, 89], [78, 1], [78, 78], [78, 1], [78, 73], [79, 35], [79, 83], [81, 5], [81, 93], [85, 26], [85, 75], [86, 20], [86, 95], [87, 27], [87, 63], [87, 13], [87, 75], [87, 10], [87, 92], [88, 13], [88, 86], [88, 15], [88, 69], [93, 14], [93, 90], [97, 32], [97, 86], [98, 15], [98, 88], [99, 39], [99, 97], [101, 24], [101, 68], [103, 17], [103, 85], [103, 23], [103, 69], [113, 8], [113, 91], [120, 16], [120, 79], [126, 28], [126, 74], [137, 18], [137, 83]])
# Algorithms
def euclidean_distance(x1, x2):
""" Calculates the l2 distance between two vectors """
distance = 0
# Squared distance between each coordinate
for i in range(len(x1)):
distance += pow((x1[i] - x2[i]), 2)
return math.sqrt(distance)
class OPTICS():
def __init__(self, epsilon=10, min_samples=10, metric="euclidean"):
self.epsilon=epsilon
self.min_samples=min_samples
self.metric=metric
def _get_core_distances(self):
# min_index that will been checked
min_index = self.min_samples - 1 # list to array
# get matrix value of the min_samples
temp = self.adjacency_matrix[
np.arange(self.adjacency_matrix.shape[0]),
np.argsort(self.adjacency_matrix)[:, min_index]
]
# return matrix with all values that are less then self.epsilon else it will be -1
return np.where(temp <= self.epsilon, temp, -1)
def _get_neighbors(self, sample_i):
"""
Return a list of indexes of neighboring samples
A sample_2 is considered a neighbor of sample_1
if the distance between them is smaller than this.epsilon
"""
data = self.X
neighbors = []
all_indexes = np.arange(len(data))
for i, _sample in enumerate(data[all_indexes != sample_i]):
distance = euclidean_distance(data[sample_i], _sample)
if distance <= self.epsilon:
neighbors.append(i)
return np.array(neighbors)
def _update_reachable_distances(self, sample_i, neighbors, seeds=dict()):
# Iterate through neighbors and expand higest reachable distance from them
for neighbor in neighbors:
if self.visited_samples[neighbor]:
continue
# First calculate the reachable distance of the changed point for sample_i
new_reach_dist = max(
self.core_distances[sample_i],
self.adjacency_matrix[sample_i][neighbor]
)
seeds[neighbor] = self.reachable_distances[neighbor] = min(
self.reachable_distances[neighbor],
new_reach_dist
)
return seeds
def _get_cluster_labels(self, orders):
# find the index of the point in the ordered list that is smaller than epsilon,
# that is the index corresponding to the ordered list
clusters = np.where(self.reachable_distances[orders] <= self.epsilon)[0]
# Normally: the value of current should be one more index than the value of pre.
# If it is larger than one index, it means that it is not a category.
pre = clusters[0] - 1
clusterId = 0
# Will make sure all outliers have same cluster label
labels = np.full(shape=self.X.shape[0], fill_value=0)
for cluster_i, cluster in enumerate(clusters):
# Normally: the value of cluster should be one more index than the value of pre.
# If it is larger than one index, it means that it is not a category.
if(cluster - pre != 1):
clusterId = clusterId + 1
labels[orders[cluster]]=clusterId
pre=cluster
return labels
def fit_predict(self, X, is_adjacency_matrix=False):
self.X = X
# Compute the adjacency matrix
if not is_adjacency_matrix:
dist = pdist(X, metric=self.metric)
self.adjacency_matrix = squareform(dist)
else:
self.adjacency_matrix = X
self.visited_samples = np.zeros(self.X.shape[0])
self.reachable_distances = np.full(self.X.shape[0], np.inf)
self.core_distances = self._get_core_distances()
# all matched values as a summed Matrix
summed_matches = np.sum(np.where(self.adjacency_matrix <= self.epsilon, 1, 0), axis=1)
core_samples = np.where(summed_matches >= self.min_samples)[0]
used_samples = []
# Iterate through core samples and itterate to the next core samples
# so we get all grouped clusters at the end
for core_sample in core_samples:
if self.visited_samples[core_sample]:
continue
# unique noted data
self.visited_samples[core_sample] = int(True)
used_samples.append(core_sample)
# Find all points (samples) in epsilon range
neighbors = self._get_neighbors(core_sample)
nearest_samples = self._update_reachable_distances(core_sample, neighbors)
# check closest sample from core point
while len(nearest_samples) > 0:
closest_sample = sorted(nearest_samples.items(), key=operator.itemgetter(1))[0][0]
del nearest_samples[closest_sample]
# unique noted data
self.visited_samples[closest_sample] = int(True)
used_samples.append(closest_sample)
# Find all points (samples) in epsilon range
sample_neighbors = self._get_neighbors(closest_sample)
if len(sample_neighbors) >= self.min_samples:
nearest_samples = self._update_reachable_distances(closest_sample, sample_neighbors, nearest_samples)
cluster_labels = self._get_cluster_labels(used_samples)
return cluster_labels
if __name__ == "__main__":
# define model
model = OPTICS(epsilon=23, min_samples=15)
# assign cluster to each sample
yhat = model.fit_predict(X)
# retrieve unique clusters
clusters = np.unique(yhat)
# create scatter plot for samples from each cluster
for cluster in clusters:
# get row indexes for samples with this cluster
row_ix = np.where(yhat == cluster)
# create scatter of these samples
plt.scatter(X[row_ix, 0], X[row_ix, 1])
# show the plot
plt.show()
| 47.485714
| 2,014
| 0.570517
|
d28a5b3b01dc07a7aeaaa89942a0c20a00da447f
| 68,272
|
py
|
Python
|
src/twisted/web/test/test_static.py
|
ndg63276/twisted
|
f672a20395e8beece6350631a70514f06c391bae
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-30T16:35:00.000Z
|
2021-06-03T12:23:33.000Z
|
src/twisted/web/test/test_static.py
|
ndg63276/twisted
|
f672a20395e8beece6350631a70514f06c391bae
|
[
"Unlicense",
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
src/twisted/web/test/test_static.py
|
ndg63276/twisted
|
f672a20395e8beece6350631a70514f06c391bae
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-29T21:12:22.000Z
|
2021-05-30T04:56:50.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import errno
import inspect
import mimetypes
import os
import re
import sys
import warnings
from io import BytesIO as StringIO
from unittest import skipIf
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import compat, log
from twisted.python.compat import networkString
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.requesthelper import DummyRequest
from twisted.web.test._util import _render
from twisted.web._responses import FOUND
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data(b"foo", "bar")
request = DummyRequest([""])
request.method = b"HEAD"
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data(b"foo", b"bar")
request = DummyRequest([b""])
request.method = b"POST"
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_ignoredExtTrue(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the
wildcard C{"*"}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=1)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_ignoredExtFalse(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the empty
list.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=0)
self.assertEqual(file.ignoredExts, [])
self.assertEqual(len(caughtWarnings), 1)
def test_allowExt(self):
"""
Passing C{1} as the value to L{File}'s C{allowExt} argument
issues a warning and sets the ignored extensions to the
wildcard C{*}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=True)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([b""])
request.method = b"POST"
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b"foobar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b""])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_emptyChildUnicodeParent(self):
"""
The C{u''} child of a L{File} which corresponds to a directory
whose path is text is a L{DirectoryLister} that renders to a
binary listing.
@see: U{https://twistedmatrix.com/trac/ticket/9438}
"""
textBase = FilePath(self.mktemp()).asTextMode()
textBase.makedirs()
textBase.child("text-file").open("w").close()
textFile = static.File(textBase.path)
request = DummyRequest([b""])
child = resource.getChildForRequest(textFile, request)
self.assertIsInstance(child, static.DirectoryLister)
nativePath = compat.nativeString(textBase.path)
self.assertEqual(child.path, nativePath)
response = child.render(request)
self.assertIsInstance(response, bytes)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b".."])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
@skipIf(platform.isWindows(), "Cannot remove read permission on Windows")
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent(b"")
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0o700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([b""])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
def test_undecodablePath(self):
"""
A request whose path cannot be decoded as UTF-8 receives a not
found response, and the failure is logged.
"""
path = self.mktemp()
if isinstance(path, bytes):
path = path.decode("ascii")
base = FilePath(path)
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b"\xff"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
self.assertEqual(len(self.flushLoggedErrors(UnicodeDecodeError)), 1)
d.addCallback(cbRendered)
return d
def test_forbiddenResource_default(self):
"""
L{File.forbidden} defaults to L{resource.ForbiddenResource}.
"""
self.assertIsInstance(static.File(b".").forbidden, resource.ForbiddenResource)
def test_forbiddenResource_customize(self):
"""
The resource rendered for forbidden requests is stored as a class
member so that users can customize it.
"""
base = FilePath(self.mktemp())
base.setContent(b"")
markerResponse = b"custom-forbidden-response"
def failingOpenForReading():
raise OSError(errno.EACCES, "")
class CustomForbiddenResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
forbidden = CustomForbiddenResource()
fileResource = CustomStaticFile(base.path)
fileResource.openForReading = failingOpenForReading
request = DummyRequest([b""])
result = fileResource.render(request)
self.assertEqual(markerResponse, result)
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
file.indexNames = ["foo.bar"]
request = DummyRequest([b""])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"baz")
self.assertEqual(
request.responseHeaders.getRawHeaders(b"content-length")[0], b"3"
)
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
request = DummyRequest([b"foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"baz")
self.assertEqual(
request.responseHeaders.getRawHeaders(b"content-length")[0], b"3"
)
d.addCallback(cbRendered)
return d
@skipIf(
sys.getfilesystemencoding().lower() not in ("utf-8", "mcbs"),
"Cannot write unicode filenames with file system encoding of"
" {}".format(sys.getfilesystemencoding()),
)
def test_staticFileUnicodeFileName(self):
"""
A request for a existing unicode file path encoded as UTF-8
returns the contents of that file.
"""
name = "\N{GREEK SMALL LETTER ETA WITH PERISPOMENI}"
content = b"content"
base = FilePath(self.mktemp())
base.makedirs()
base.child(name).setContent(content)
file = static.File(base.path)
request = DummyRequest([name.encode("utf-8")])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), content)
self.assertEqual(
request.responseHeaders.getRawHeaders(b"content-length")[0],
networkString(str(len(content))),
)
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b"foo.bar"])
child = staticFile.getChild(b"foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b"foo.bar"])
request2 = DummyRequest([b"foo.bar"])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"".join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_getChildChildNotFound_customize(self):
"""
The resource rendered for child not found requests can be customize
using a class member.
"""
base = FilePath(self.mktemp())
base.setContent(b"")
markerResponse = b"custom-child-not-found-response"
class CustomChildNotFoundResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
childNotFound = CustomChildNotFoundResource()
fileResource = CustomStaticFile(base.path)
request = DummyRequest([b"no-child.txt"])
child = fileResource.getChild(b"no-child.txt", request)
result = child.render(request)
self.assertEqual(markerResponse, result)
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
request = DummyRequest([b""])
request.method = b"HEAD"
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
b"from twisted.web.static import Data\n"
b"resource = Data(b'dynamic world', 'text/plain')\n"
)
file = static.File(base.path)
file.processors = {".bar": script.ResourceScript}
request = DummyRequest([b"foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"dynamic world")
self.assertEqual(
request.responseHeaders.getRawHeaders(b"content-length")[0], b"13"
)
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(b".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(b".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
base.child("foo.quux").setContent(b"foobar")
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest([b"foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"baz")
d.addCallback(cbRendered)
return d
def test_directoryWithoutTrailingSlashRedirects(self):
"""
A request for a path which is a directory but does not have a trailing
slash will be redirected to a URL which does have a slash by L{File}.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("folder").makedirs()
file = static.File(base.path)
request = DummyRequest([b"folder"])
request.uri = b"http://dummy/folder#baz?foo=bar"
child = resource.getChildForRequest(file, request)
self.successResultOf(self._render(child, request))
self.assertEqual(request.responseCode, FOUND)
self.assertEqual(
request.responseHeaders.getRawHeaders(b"location"),
[b"http://dummy/folder/#baz?foo=bar"],
)
def _makeFilePathWithStringIO(self):
"""
Create a L{File} that when opened for reading, returns a L{StringIO}.
@return: 2-tuple of the opened "file" and the L{File}.
@rtype: L{tuple}
"""
fakeFile = StringIO()
path = FilePath(self.mktemp())
path.touch()
file = static.File(path.path)
# Open our file instead of a real one
file.open = lambda: fakeFile
return fakeFile, file
def test_HEADClosesFile(self):
"""
A HEAD request opens the file, gets the size, and then closes it after
the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([""])
request.method = b"HEAD"
self.successResultOf(_render(file, request))
self.assertEqual(b"".join(request.written), b"")
self.assertTrue(fakeFile.closed)
def test_cachedRequestClosesFile(self):
"""
A GET request that is cached closes the file after the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([""])
request.method = b"GET"
# This request will always return saying that it is cached
request.setLastModified = lambda _: http.CACHED
self.successResultOf(_render(file, request))
self.assertEqual(b"".join(request.written), b"")
self.assertTrue(fakeFile.closed)
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The L{bytes} to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = FilePath(self.mktemp())
fileName.setContent(content)
resource = static.File(fileName._asBytesPath())
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.responseHeaders.getAllRawHeaders():
if k.lower().startswith(b"content-"):
contentHeaders[k.lower()] = v[0]
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent(b"")
request = DummyRequest([])
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent(b"")
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = "gzip"
resource = self.makeResourceWithContent(
b"a" * length, type=contentType, encoding=contentEncoding
)
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{
b"content-type": networkString(contentType),
b"content-length": b"%d" % (length,),
b"content-encoding": networkString(contentEncoding),
},
self.contentHeaders(request),
)
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3")
resource = self.makeResourceWithContent(b"abcdef")
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3")
resource = self.makeResourceWithContent(b"abcdef")
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3")
contentType = "text/plain"
contentEncoding = "gzip"
resource = self.makeResourceWithContent(
b"abcdef", type=contentType, encoding=contentEncoding
)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{
b"content-type": networkString(contentType),
b"content-encoding": networkString(contentEncoding),
b"content-range": b"bytes 1-3/6",
b"content-length": b"3",
},
self.contentHeaders(request),
)
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=4-10")
resource = self.makeResourceWithContent(b"abc")
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=4-10")
resource = self.makeResourceWithContent(b"abc")
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=4-10")
contentType = "text/plain"
resource = self.makeResourceWithContent(b"abc", type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{
b"content-type": b"text/plain",
b"content-length": b"0",
b"content-range": b"bytes */3",
},
self.contentHeaders(request),
)
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=2-10")
contentType = "text/plain"
resource = self.makeResourceWithContent(b"abc", type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{
b"content-type": b"text/plain",
b"content-length": b"1",
b"content-range": b"bytes 2-2/3",
},
self.contentHeaders(request),
)
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3,5-6")
resource = self.makeResourceWithContent(b"abcdef")
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3,5-6")
resource = self.makeResourceWithContent(b"abcdef")
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3,5-6")
resource = self.makeResourceWithContent(b"abcdefghijkl", encoding="gzip")
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
{b"content-length", b"content-type"}, set(contentHeaders.keys())
)
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(
b"%d" % (expectedLength,), contentHeaders[b"content-length"]
)
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn(b"content-type", contentHeaders)
contentType = contentHeaders[b"content-type"]
self.assertNotIdentical(
None,
re.match(br'multipart/byteranges; boundary="[^"]*"\Z', contentType),
)
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn(b"content-encoding", contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=10-12,15-20")
resource = self.makeResourceWithContent(b"abc")
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=10-12,15-20")
resource = self.makeResourceWithContent(b"abc")
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=4-10")
contentType = "text/plain"
request.requestHeaders.addRawHeader(b"range", b"bytes=10-12,15-20")
resource = self.makeResourceWithContent(b"abc", type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{
b"content-length": b"0",
b"content-range": b"bytes */3",
b"content-type": b"text/plain",
},
self.contentHeaders(request),
)
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b"range", b"bytes=1-3,100-200")
resource = self.makeResourceWithContent(b"abcdef")
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(interfaces.IPullProducer, static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = b"abcdef"
producer = static.NoRangeStaticProducer(request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, b"".join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b"a" * (2 * bufferSize + 1)
producer = static.NoRangeStaticProducer(request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize : 2 * bufferSize],
content[2 * bufferSize :],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(request, StringIO(b"abcdef"))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None),
)
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = b"abcdef"
producer = static.SingleRangeStaticProducer(request, StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], b"".join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b"abc" * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO(content), 1, bufferSize + 10
)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1 : bufferSize + 1],
content[bufferSize + 1 : bufferSize + 11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(request, StringIO(b"abcdef"), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None),
)
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = b"abcdef"
producer = static.MultipleRangeStaticProducer(
request, StringIO(content), [(b"1", 1, 3), (b"2", 5, 1)]
)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(b"1bcd2f", b"".join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = b"0123456789" * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO(content), [(b"a", 0, 2), (b"b", 5, 10), (b"c", 0, 0)]
)
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
b"a" + content[0:2] + b"b" + content[5:11],
content[11:15] + b"c",
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO(b"abcdef"), [(b"", 1, 2)]
)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = (
b"\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7"
b"\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0"
b"\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d"
b"&\xfd%\xdd\x82q/A\x10Y\x8b"
)
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([b""])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occurred: {!r}".format(logItem)
)
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, b"bytes")
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, b"unknown=1-2")
# there's no - in =stuff
self.assertRaises(ValueError, f, b"bytes=3")
# both start and end are empty
self.assertRaises(ValueError, f, b"bytes=-")
# start isn't an integer
self.assertRaises(ValueError, f, b"bytes=foo-")
# end isn't an integer
self.assertRaises(ValueError, f, b"bytes=-foo")
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, b"bytes=5-4")
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and L{None}.
"""
self.assertEqual(self.resource._parseRangeHeader(b"bytes=0-"), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of L{None} and the end position.
"""
self.assertEqual(self.resource._parseRangeHeader(b"bytes=-3"), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(self.resource._parseRangeHeader(b"bytes=2-5"), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(self.resource._parseRangeHeader(b" bytes=1-2 "), [(1, 2)])
self.assertEqual(self.resource._parseRangeHeader(b"bytes =1-2 "), [(1, 2)])
self.assertEqual(self.resource._parseRangeHeader(b"bytes= 1-2"), [(1, 2)])
self.assertEqual(self.resource._parseRangeHeader(b"bytes=1 -2"), [(1, 2)])
self.assertEqual(self.resource._parseRangeHeader(b"bytes=1- 2"), [(1, 2)])
self.assertEqual(self.resource._parseRangeHeader(b"bytes=1-2 "), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b"bytes=1-2,\r\n, ,\t"), [(1, 2)]
)
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b"bytes=1-2,3-4"), [(1, 2), (3, 4)]
)
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=0-43")
self.resource.render(self.request)
self.assertEqual(len(b"".join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
range = b"foobar=0-43"
self.request.requestHeaders.addRawHeader(b"range", range)
self.resource.render(self.request)
expected = "Ignoring malformed Range header {!r}".format(range.decode())
self._assertLogged(expected)
self.assertEqual(b"".join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-length")[0],
b"%d" % (len(self.payload),),
)
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = b"\r\n--" + boundary
parts = body.split(sep)
self.assertEqual(b"", parts[0])
self.assertEqual(b"--\r\n", parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split(b"\r\n", 4)
headers = header1 + b"\n" + header2
self.assertEqual(b"", before)
self.assertEqual(b"", blank)
partContentTypeValue = re.search(
b"^content-type: (.*)$", headers, re.I | re.M
).group(1)
start, end, size = re.search(
b"^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$",
headers,
re.I | re.M,
).groups()
parsed_parts.append(
{
b"contentType": partContentTypeValue,
b"contentRange": (start, end, size),
b"body": partBody,
}
)
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = b",".join(
[networkString("{}-{}".format(s, e)) for (s, e) in startEnds]
)
self.request.requestHeaders.addRawHeader(b"range", b"bytes=" + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b"content-type")[0],
).group(1)
parts = self.parseMultipartBody(b"".join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type), part[b"contentType"])
start, end, size = part[b"contentRange"]
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s : e + 1], part[b"body"])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = b",".join(
[networkString("{}-{}".format(s, e)) for (s, e) in startEnds]
)
self.request.requestHeaders.addRawHeader(b"range", b"bytes=" + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b"content-type")[0],
).group(1)
parts = self.parseMultipartBody(b"".join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type), part[b"contentType"])
start, end, size = part[b"contentRange"]
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize() - 1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s : e + 1], part[b"body"])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=23-")
self.resource.render(self.request)
self.assertEqual(b"".join(self.request.written), self.payload[23:])
self.assertEqual(len(b"".join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-range")[0],
b"bytes 23-63/64",
)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-length")[0], b"41"
)
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=-17")
self.resource.render(self.request)
self.assertEqual(b"".join(self.request.written), self.payload[-17:])
self.assertEqual(len(b"".join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-range")[0],
b"bytes 47-63/64",
)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-length")[0], b"17"
)
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=3-43")
self.resource.render(self.request)
written = b"".join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-range")[0],
b"bytes 3-43/64",
)
self.assertEqual(
b"%d" % (len(written),),
self.request.responseHeaders.getRawHeaders(b"content-length")[0],
)
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=40-100")
self.resource.render(self.request)
written = b"".join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-range")[0],
b"bytes 40-63/64",
)
self.assertEqual(
b"%d" % (len(written),),
self.request.responseHeaders.getRawHeaders(b"content-length")[0],
)
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=20-13")
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(b"".join(self.request.written), self.payload)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-length")[0],
b"%d" % (len(self.payload),),
)
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.requestHeaders.addRawHeader(b"range", b"bytes=67-108")
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE
)
self.assertEqual(b"".join(self.request.written), b"")
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-length")[0], b"0"
)
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b"content-range")[0],
networkString("bytes */%d" % (len(self.payload),)),
)
class DirectoryListerTests(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([b""])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b"foo"))
self.assertIn(b"<h1>Directory listing for foo</h1>", data)
self.assertIn(b"<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b"foo%20bar"))
self.assertIn(b"<h1>Directory listing for foo bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b"foo%26bar"))
self.assertIn(b"<h1>Directory listing for foo&bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child("file1").setContent(b"content1")
path.child("file2").setContent(b"content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b"foo"))
body = b"""<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child("dir1").makedirs()
path.child("dir2 & 3").makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b"foo"))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes an optional C{dirs} argument that
filter out the list of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child("dir1").makedirs()
path.child("dir2").makedirs()
path.child("dir3").makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request(b"foo"))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [
{"href": "", "text": "", "size": "", "type": "", "encoding": ""}
for i in range(5)
]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request(b"")
lister.render(req)
self.assertEqual(
req.responseHeaders.getRawHeaders(b"content-type")[0],
b"text/html; charset=utf-8",
)
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child("file1.txt").setContent(b"file1")
path.child("file2.py").setContent(b"python")
path.child("file3.conf.gz").setContent(b"conf compressed")
path.child("file4.diff.bz2").setContent(b"diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff",
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(
files,
[
{
"encoding": "",
"href": "file1.txt",
"size": "5B",
"text": "file1.txt",
"type": "[text/plain]",
},
{
"encoding": "",
"href": "file2.py",
"size": "6B",
"text": "file2.py",
"type": "[text/python]",
},
{
"encoding": "[gzip]",
"href": "file3.conf.gz",
"size": "15B",
"text": "file3.conf.gz",
"type": "[text/configuration]",
},
{
"encoding": "[bzip2]",
"href": "file4.diff.bz2",
"size": "15B",
"text": "file4.diff.bz2",
"type": "[text/diff]",
},
],
)
@skipIf(not platform._supportsSymlinks(), "No symlink support")
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child("file1")
file1.setContent(b"file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request(b"")
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister), "<DirectoryLister of {!r}>".format(path.path))
self.assertEqual(str(lister), "<DirectoryLister of {!r}>".format(path.path))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, L{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
if getattr(inspect, "signature", None):
signature = inspect.signature(static.loadMimeTypes)
self.assertIs(signature.parameters["init"].default, mimetypes.init)
else:
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIs(defaultInit, mimetypes.init)
class StaticDeprecationTests(TestCase):
def test_addSlashDeprecated(self):
"""
L{twisted.web.static.addSlash} is deprecated.
"""
from twisted.web.static import addSlash
addSlash(DummyRequest([b""]))
warnings = self.flushWarnings([self.test_addSlashDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]["message"],
"twisted.web.static.addSlash was deprecated in Twisted 16.0.0",
)
| 37.409315
| 88
| 0.613634
|
2340742f71f589e3c3bace832d0090d0e4289bea
| 9,008
|
py
|
Python
|
pennylane/tape/tapes/reversible.py
|
theRoughCode/pennylane
|
317f82ef00c752beeef7d2412b88119a753467b4
|
[
"Apache-2.0"
] | null | null | null |
pennylane/tape/tapes/reversible.py
|
theRoughCode/pennylane
|
317f82ef00c752beeef7d2412b88119a753467b4
|
[
"Apache-2.0"
] | 1
|
2020-10-04T22:45:45.000Z
|
2020-10-04T22:45:45.000Z
|
pennylane/tape/tapes/reversible.py
|
theRoughCode/pennylane
|
317f82ef00c752beeef7d2412b88119a753467b4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantum tape that implements reversible backpropagation.
"""
# pylint: disable=attribute-defined-outside-init,protected-access
from copy import copy
from functools import reduce
from string import ascii_letters as ABC
import numpy as np
import pennylane as qml
from .tape import QuantumTape
ABC_ARRAY = np.array(list(ABC))
class ReversibleTape(QuantumTape):
r"""Quantum tape for computing gradients via reversible analytic differentiation.
.. note::
The reversible analytic differentation method has the following restrictions:
* As it requires knowledge of the statevector, only statevector simulator devices can be used.
* Differentiation is only supported for the parametrized quantum operations
:class:`~.RX`, :class:`~.RY`, :class:`~.RZ`, and :class:`~.Rot`.
This class extends the :class:`~.jacobian` method of the quantum tape to support analytic
gradients of qubit operations using reversible analytic differentiation. This gradient method
returns *exact* gradients, however requires use of a statevector simulator. Simply create
the tape, and then call the Jacobian method:
>>> tape.jacobian(dev)
For more details on the quantum tape, please see :class:`~.QuantumTape`.
**Reversible analytic differentiation**
Assume a circuit has a gate :math:`G(\theta)` that we want to differentiate.
Without loss of generality, we can write the circuit in the form three unitaries: :math:`UGV`.
Starting from the initial state :math:`\vert 0\rangle`, the quantum state is evolved up to the
"pre-measurement" state :math:`\vert\psi\rangle=UGV\vert 0\rangle`, which is saved
(this can be reused for each variable being differentiated).
We then apply the unitary :math:`V^{-1}` to evolve this state backwards in time
until just after the gate :math:`G` (hence the name "reversible").
The generator of :math:`G` is then applied as a gate, and we evolve forward using :math:`V` again.
At this stage, the state of the simulator is proportional to
:math:`\frac{\partial}{\partial\theta}\vert\psi\rangle`.
Some further post-processing of this gives the derivative
:math:`\frac{\partial}{\partial\theta} \langle \hat{O} \rangle` for any observable O.
The reversible approach is similar to backpropagation, but trades off extra computation for
enhanced memory efficiency. Where backpropagation caches the state tensors at each step during
a forward pass, the reversible method only caches the final pre-measurement state.
Compared to the parameter-shift rule, the reversible method can
be faster or slower, depending on the density and location of parametrized gates in a circuit
(circuits with higher density of parametrized gates near the end of the circuit will see a
benefit).
"""
def _grad_method(self, idx, use_graph=True, default_method="A"):
return super()._grad_method(idx, use_graph=use_graph, default_method=default_method)
@staticmethod
def _matrix_elem(vec1, obs, vec2, device):
r"""Computes the matrix element of an observable.
That is, given two basis states :math:`\mathbf{i}`, :math:`\mathbf{j}`,
this method returns :math:`\langle \mathbf{i} \vert \hat{O} \vert \mathbf{j} \rangle`.
Unmeasured wires are contracted, and a scalar is returned.
Args:
vec1 (array[complex]): a length :math:`2^N` statevector
obs (.Observable): a PennyLane observable
vec2 (array[complex]): a length :math:`2^N` statevector
device (.QubitDevice): the device used to compute the matrix elements
"""
# pylint: disable=protected-access
mat = device._reshape(obs.matrix, [2] * len(obs.wires) * 2)
wires = obs.wires
vec1_indices = ABC[: device.num_wires]
obs_in_indices = "".join(ABC_ARRAY[wires.tolist()].tolist())
obs_out_indices = ABC[device.num_wires : device.num_wires + len(wires)]
obs_indices = "".join([obs_in_indices, obs_out_indices])
vec2_indices = reduce(
lambda old_string, idx_pair: old_string.replace(idx_pair[0], idx_pair[1]),
zip(obs_in_indices, obs_out_indices),
vec1_indices,
)
einsum_str = "{vec1_indices},{obs_indices},{vec2_indices}->".format(
vec1_indices=vec1_indices,
obs_indices=obs_indices,
vec2_indices=vec2_indices,
)
return device._einsum(einsum_str, device._conj(vec1), mat, vec2)
def jacobian(self, device, params=None, **options):
# The parameter_shift_var method needs to evaluate the circuit
# at the unshifted parameter values; the pre-rotated statevector is then stored
# self._state attribute. Here, we set the value of the attribute to None
# before each Jacobian call, so that the statevector is calculated only once.
self._state = None
return super().jacobian(device, params, **options)
def analytic_pd(self, idx, device, params=None, **options):
t_idx = list(self.trainable_params)[idx]
op = self._par_info[t_idx]["op"]
p_idx = self._par_info[t_idx]["p_idx"]
# The reversible tape only support differentiating
# expectation values of observables for now.
for m in self.measurements:
if (
m.return_type is qml.operation.Variance
or m.return_type is qml.operation.Probability
):
raise ValueError(
f"{m.return_type} is not supported with the reversible gradient method"
)
# The reversible tape only supports the RX, RY, RZ, and Rot operations for now:
#
# * CRX, CRY, CRZ ops have a non-unitary matrix as generator.
#
# * PauliRot, MultiRZ, U2, and U3 do not have generators specified.
#
# TODO: the controlled rotations can be supported by multiplying ``state``
# directly by these generators within this function
# (or by allowing non-unitary matrix multiplies in the simulator backends)
if op.name not in ["RX", "RY", "RZ", "Rot"]:
raise ValueError(
"The {} gate is not currently supported with the "
"reversible gradient method.".format(op.name)
)
if self._state is None:
self.execute_device(params, device)
self._state = device._pre_rotated_state
self.set_parameters(params)
# create a new circuit which rewinds the pre-measurement state to just after `op`,
# applies the generator of `op`, and then plays forward back to
# pre-measurement step
wires = op.wires
op_idx = self.operations.index(op)
# TODO: likely better to use circuitgraph to determine minimally necessary ops
between_ops = self.operations[op_idx + 1 :]
if op.name == "Rot":
decomp = op.decomposition(*op.parameters, wires=wires)
generator, multiplier = decomp[p_idx].generator
between_ops = decomp[p_idx + 1 :] + between_ops
else:
generator, multiplier = op.generator
generator = generator(wires)
diff_circuit = QuantumTape()
diff_circuit._ops = [copy(op).inv() for op in between_ops[::-1]] + [generator] + between_ops
# set the simulator state to be the pre-measurement state
device._state = self._state
# evolve the pre-measurement state under this new circuit
device.execute(diff_circuit)
dstate = device._pre_rotated_state # TODO: this will only work for QubitDevices
# compute matrix element <d(state)|O|state> for each observable O
matrix_elems = device._asarray(
[self._matrix_elem(dstate, ob, self._state, device) for ob in self.observables]
# TODO: if all observables act on same number of wires, could
# do all at once with einsum
)
# reset state back to pre-measurement value
device._pre_rotated_state = self._state
return 2 * multiplier * device._imag(matrix_elems)
| 43.941463
| 103
| 0.658637
|
fe4e8b1a8acfdf7aaa8e2d98cac419a3504f8ce3
| 45,630
|
py
|
Python
|
third_party/python/Parser/asdl_c.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Parser/asdl_c.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Parser/asdl_c.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
#! /usr/bin/env python
"""Generate C code from an ASDL description."""
import os, sys
import asdl
TABSIZE = 4
MAX_COL = 80
def get_c_type(name):
"""Return a string for the C name of the type.
This function special cases the default types provided by asdl.
"""
if name in asdl.builtin_types:
return name
else:
return "%s_ty" % name
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
# XXX this should be fixed for real
if i == -1 and 'GeneratorExp' in cur:
i = size + 3
assert i != -1, "Impossible line %d to reflow: %r" % (size, s)
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
else:
j = cur.find('(', 0, i)
if j >= 0:
j += 1 # account for the paren (no space after it)
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, file):
self.file = file
self.identifiers = set()
super(EmitVisitor, self).__init__()
def emit_identifier(self, name):
name = str(name)
if name in self.identifiers:
return
self.emit("_Py_IDENTIFIER(%s);" % name, 0)
self.identifiers.add(name)
def emit(self, s, depth, reflow=True):
# XXX reflow long lines?
if reflow:
lines = reflow_lines(s, depth)
else:
lines = [s]
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typedefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type == 'cmpop':
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
self.emit("asdl_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
for field in product.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
self.emit("%s %s;" % (type, field.name), depth + 1);
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type == 'cmpop':
ctype = "asdl_int_seq *"
else:
ctype = "asdl_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=True):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
margs = "a0"
for i in range(1, len(args)+1):
margs += ", a%d" % i
self.emit("#define %s(%s) _Py_%s(%s)" % (name, margs, name, margs), 0,
reflow=False)
self.emit("%s _Py_%s(%s);" % (ctype, name, argstr), False)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields),
self.get_args(prod.attributes),
union=False)
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=True):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (name, argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
if not opt and argtype != "int":
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field %s is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=False)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p)", 1)
emit("return NULL;", 2)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
class PickleVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
pass
def visitProduct(self, sum, name):
pass
def visitConstructor(self, cons, name):
pass
def visitField(self, sum):
pass
class Obj2ModPrototypeVisitor(PickleVisitor):
def visitProduct(self, prod, name):
code = "static int obj2ast_%s(PyObject* obj, %s* out, PyArena* arena);"
self.emit(code % (name, get_c_type(name)), 0)
visitSum = visitProduct
class Obj2ModVisitor(PickleVisitor):
def funcHeader(self, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("int isinstance;", 1)
self.emit("", 0)
def sumTrailer(self, name, add_label=False):
self.emit("", 0)
# there's really nothing more we can do if this fails ...
error = "expected some sort of %s, but got %%R" % name
format = "PyErr_Format(PyExc_TypeError, \"%s\", obj);"
self.emit(format % error, 1, reflow=False)
if add_label:
self.emit("failed:", 1)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def simpleSum(self, sum, name):
self.funcHeader(name)
for t in sum.types:
line = ("isinstance = PyObject_IsInstance(obj, "
"(PyObject *)%s_type);")
self.emit(line % (t.name,), 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
self.emit("*out = %s;" % t.name, 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name)
def buildArgs(self, fields):
return ", ".join(fields + ["arena"])
def complexSum(self, sum, name):
self.funcHeader(name)
self.emit("PyObject *tmp = NULL;", 1)
for a in sum.attributes:
self.visitAttributeDeclaration(a, name, sum=sum)
self.emit("", 0)
# XXX: should we only do this for 'expr'?
self.emit("if (obj == Py_None) {", 1)
self.emit("*out = NULL;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
for a in sum.attributes:
self.visitField(a, name, sum=sum, depth=1)
for t in sum.types:
line = "isinstance = PyObject_IsInstance(obj, (PyObject*)%s_type);"
self.emit(line % (t.name,), 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
for f in t.fields:
self.visitFieldDeclaration(f, t.name, sum=sum, depth=2)
self.emit("", 0)
for f in t.fields:
self.visitField(f, t.name, sum=sum, depth=2)
args = [f.name for f in t.fields] + [a.name for a in sum.attributes]
self.emit("*out = %s(%s);" % (t.name, self.buildArgs(args)), 2)
self.emit("if (*out == NULL) goto failed;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name, True)
def visitAttributeDeclaration(self, a, name, sum=sum):
ctype = get_c_type(a.type)
self.emit("%s %s;" % (ctype, a.name), 1)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
else:
self.complexSum(sum, name)
def visitProduct(self, prod, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("PyObject* tmp = NULL;", 1)
for f in prod.fields:
self.visitFieldDeclaration(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitFieldDeclaration(a, name, prod=prod, depth=1)
self.emit("", 0)
for f in prod.fields:
self.visitField(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitField(a, name, prod=prod, depth=1)
args = [f.name for f in prod.fields]
args.extend([a.name for a in prod.attributes])
self.emit("*out = %s(%s);" % (name, self.buildArgs(args)), 1)
self.emit("return 0;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitFieldDeclaration(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
if field.seq:
if self.isSimpleType(field):
self.emit("asdl_int_seq* %s;" % field.name, depth)
else:
self.emit("asdl_seq* %s;" % field.name, depth)
else:
ctype = get_c_type(field.type)
self.emit("%s %s;" % (ctype, field.name), depth)
def isSimpleSum(self, field):
# XXX can the members of this list be determined automatically?
return field.type in ('expr_context', 'boolop', 'operator',
'unaryop', 'cmpop')
def isNumeric(self, field):
return get_c_type(field.type) in ("int", "bool")
def isSimpleType(self, field):
return self.isSimpleSum(field) or self.isNumeric(field)
def visitField(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
if field.opt:
check = "exists_not_none(obj, &PyId_%s)" % (field.name,)
else:
check = "_PyObject_HasAttrId(obj, &PyId_%s)" % (field.name,)
self.emit("if (%s) {" % (check,), depth, reflow=False)
self.emit("int res;", depth+1)
if field.seq:
self.emit("Py_ssize_t len;", depth+1)
self.emit("Py_ssize_t i;", depth+1)
self.emit("tmp = _PyObject_GetAttrId(obj, &PyId_%s);" % field.name, depth+1)
self.emit("if (tmp == NULL) goto failed;", depth+1)
if field.seq:
self.emit("if (!PyList_Check(tmp)) {", depth+1)
self.emit("PyErr_Format(PyExc_TypeError, \"%s field \\\"%s\\\" must "
"be a list, not a %%.200s\", tmp->ob_type->tp_name);" %
(name, field.name),
depth+2, reflow=False)
self.emit("goto failed;", depth+2)
self.emit("}", depth+1)
self.emit("len = PyList_GET_SIZE(tmp);", depth+1)
if self.isSimpleType(field):
self.emit("%s = _Py_asdl_int_seq_new(len, arena);" % field.name, depth+1)
else:
self.emit("%s = _Py_asdl_seq_new(len, arena);" % field.name, depth+1)
self.emit("if (%s == NULL) goto failed;" % field.name, depth+1)
self.emit("for (i = 0; i < len; i++) {", depth+1)
self.emit("%s val;" % ctype, depth+2)
self.emit("res = obj2ast_%s(PyList_GET_ITEM(tmp, i), &val, arena);" %
field.type, depth+2, reflow=False)
self.emit("if (res != 0) goto failed;", depth+2)
self.emit("if (len != PyList_GET_SIZE(tmp)) {", depth+2)
self.emit("PyErr_SetString(PyExc_RuntimeError, \"%s field \\\"%s\\\" "
"changed size during iteration\");" %
(name, field.name),
depth+3, reflow=False)
self.emit("goto failed;", depth+3)
self.emit("}", depth+2)
self.emit("asdl_seq_SET(%s, i, val);" % field.name, depth+2)
self.emit("}", depth+1)
else:
self.emit("res = obj2ast_%s(tmp, &%s, arena);" %
(field.type, field.name), depth+1)
self.emit("if (res != 0) goto failed;", depth+1)
self.emit("Py_CLEAR(tmp);", depth+1)
self.emit("} else {", depth)
if not field.opt:
message = "required field \\\"%s\\\" missing from %s" % (field.name, name)
format = "PyErr_SetString(PyExc_TypeError, \"%s\");"
self.emit(format % message, depth+1, reflow=False)
self.emit("return 1;", depth+1)
else:
if self.isNumeric(field):
self.emit("%s = 0;" % field.name, depth+1)
elif not self.isSimpleType(field):
self.emit("%s = NULL;" % field.name, depth+1)
else:
raise TypeError("could not determine the default value for %s" % field.name)
self.emit("}", depth)
class MarshalPrototypeVisitor(PickleVisitor):
def prototype(self, sum, name):
ctype = get_c_type(name)
self.emit("static int marshal_write_%s(PyObject **, int *, %s);"
% (name, ctype), 0)
visitProduct = visitSum = prototype
class PyTypesDeclareVisitor(PickleVisitor):
def visitProduct(self, prod, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
self.emit("static PyObject* ast2obj_%s(void*);" % name, 0)
if prod.attributes:
for a in prod.attributes:
self.emit_identifier(a.name)
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in prod.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
if prod.fields:
for f in prod.fields:
self.emit_identifier(f.name)
self.emit("static char *%s_fields[]={" % name,0)
for f in prod.fields:
self.emit('"%s",' % f.name, 1)
self.emit("};", 0)
def visitSum(self, sum, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
if sum.attributes:
for a in sum.attributes:
self.emit_identifier(a.name)
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in sum.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
ptype = "void*"
if is_simple(sum):
ptype = get_c_type(name)
tnames = []
for t in sum.types:
tnames.append(str(t.name)+"_singleton")
tnames = ", *".join(tnames)
self.emit("static PyObject *%s;" % tnames, 0)
self.emit("static PyObject* ast2obj_%s(%s);" % (name, ptype), 0)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.emit("static PyTypeObject *%s_type;" % cons.name, 0)
if cons.fields:
for t in cons.fields:
self.emit_identifier(t.name)
self.emit("static char *%s_fields[]={" % cons.name, 0)
for t in cons.fields:
self.emit('"%s",' % t.name, 1)
self.emit("};",0)
class PyTypesVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("""
typedef struct {
PyObject_HEAD
PyObject *dict;
} AST_object;
static void
ast_dealloc(AST_object *self)
{
/* bpo-31095: UnTrack is needed before calling any callbacks */
PyObject_GC_UnTrack(self);
Py_CLEAR(self->dict);
Py_TYPE(self)->tp_free(self);
}
static int
ast_traverse(AST_object *self, visitproc visit, void *arg)
{
Py_VISIT(self->dict);
return 0;
}
static int
ast_clear(AST_object *self)
{
Py_CLEAR(self->dict);
return 0;
}
static int
ast_type_init(PyObject *self, PyObject *args, PyObject *kw)
{
_Py_IDENTIFIER(_fields);
Py_ssize_t i, numfields = 0;
int res = -1;
PyObject *key, *value, *fields;
fields = _PyObject_GetAttrId((PyObject*)Py_TYPE(self), &PyId__fields);
if (!fields)
PyErr_Clear();
if (fields) {
numfields = PySequence_Size(fields);
if (numfields == -1)
goto cleanup;
}
res = 0; /* if no error occurs, this stays 0 to the end */
if (PyTuple_GET_SIZE(args) > 0) {
if (numfields != PyTuple_GET_SIZE(args)) {
PyErr_Format(PyExc_TypeError, "%.400s constructor takes %s"
"%zd positional argument%s",
Py_TYPE(self)->tp_name,
numfields == 0 ? "" : "either 0 or ",
numfields, numfields == 1 ? "" : "s");
res = -1;
goto cleanup;
}
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
/* cannot be reached when fields is NULL */
PyObject *name = PySequence_GetItem(fields, i);
if (!name) {
res = -1;
goto cleanup;
}
res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i));
Py_DECREF(name);
if (res < 0)
goto cleanup;
}
}
if (kw) {
i = 0; /* needed by PyDict_Next */
while (PyDict_Next(kw, &i, &key, &value)) {
res = PyObject_SetAttr(self, key, value);
if (res < 0)
goto cleanup;
}
}
cleanup:
Py_XDECREF(fields);
return res;
}
/* Pickling support */
static PyObject *
ast_type_reduce(PyObject *self, PyObject *unused)
{
PyObject *res;
_Py_IDENTIFIER(__dict__);
PyObject *dict = _PyObject_GetAttrId(self, &PyId___dict__);
if (dict == NULL) {
if (PyErr_ExceptionMatches(PyExc_AttributeError))
PyErr_Clear();
else
return NULL;
}
if (dict) {
res = Py_BuildValue("O()O", Py_TYPE(self), dict);
Py_DECREF(dict);
return res;
}
return Py_BuildValue("O()", Py_TYPE(self));
}
static PyMethodDef ast_type_methods[] = {
{"__reduce__", ast_type_reduce, METH_NOARGS, NULL},
{NULL}
};
static PyGetSetDef ast_type_getsets[] = {
{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict},
{NULL}
};
static PyTypeObject AST_type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"_ast.AST",
sizeof(AST_object),
0,
(destructor)ast_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)ast_traverse, /* tp_traverse */
(inquiry)ast_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
ast_type_methods, /* tp_methods */
0, /* tp_members */
ast_type_getsets, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
offsetof(AST_object, dict),/* tp_dictoffset */
(initproc)ast_type_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
PyType_GenericNew, /* tp_new */
PyObject_GC_Del, /* tp_free */
};
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields)
{
PyObject *fnames, *result;
int i;
fnames = PyTuple_New(num_fields);
if (!fnames) return NULL;
for (i = 0; i < num_fields; i++) {
PyObject *field = PyUnicode_FromString(fields[i]);
if (!field) {
Py_DECREF(fnames);
return NULL;
}
PyTuple_SET_ITEM(fnames, i, field);
}
result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}",
type, base, "_fields", fnames, "__module__", "_ast");
Py_DECREF(fnames);
return (PyTypeObject*)result;
}
static int add_attributes(PyTypeObject* type, char**attrs, int num_fields)
{
int i, result;
_Py_IDENTIFIER(_attributes);
PyObject *s, *l = PyTuple_New(num_fields);
if (!l)
return 0;
for (i = 0; i < num_fields; i++) {
s = PyUnicode_FromString(attrs[i]);
if (!s) {
Py_DECREF(l);
return 0;
}
PyTuple_SET_ITEM(l, i, s);
}
result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0;
Py_DECREF(l);
return result;
}
/* Conversion AST -> Python */
static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*))
{
Py_ssize_t i, n = asdl_seq_LEN(seq);
PyObject *result = PyList_New(n);
PyObject *value;
if (!result)
return NULL;
for (i = 0; i < n; i++) {
value = func(asdl_seq_GET(seq, i));
if (!value) {
Py_DECREF(result);
return NULL;
}
PyList_SET_ITEM(result, i, value);
}
return result;
}
static PyObject* ast2obj_object(void *o)
{
if (!o)
o = Py_None;
Py_INCREF((PyObject*)o);
return (PyObject*)o;
}
#define ast2obj_singleton ast2obj_object
#define ast2obj_constant ast2obj_object
#define ast2obj_identifier ast2obj_object
#define ast2obj_string ast2obj_object
#define ast2obj_bytes ast2obj_object
static PyObject* ast2obj_int(long b)
{
return PyLong_FromLong(b);
}
/* Conversion Python -> AST */
static int obj2ast_singleton(PyObject *obj, PyObject** out, PyArena* arena)
{
if (obj != Py_None && obj != Py_True && obj != Py_False) {
PyErr_SetString(PyExc_ValueError,
"AST singleton must be True, False, or None");
return 1;
}
*out = obj;
return 0;
}
static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena)
{
if (obj == Py_None)
obj = NULL;
if (obj) {
if (PyArena_AddPyObject(arena, obj) < 0) {
*out = NULL;
return -1;
}
Py_INCREF(obj);
}
*out = obj;
return 0;
}
static int obj2ast_constant(PyObject* obj, PyObject** out, PyArena* arena)
{
if (obj) {
if (PyArena_AddPyObject(arena, obj) < 0) {
*out = NULL;
return -1;
}
Py_INCREF(obj);
}
*out = obj;
return 0;
}
static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && obj != Py_None) {
PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) {
PyErr_SetString(PyExc_TypeError, "AST string must be of type str");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_bytes(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyBytes_CheckExact(obj)) {
PyErr_SetString(PyExc_TypeError, "AST bytes must be of type bytes");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_int(PyObject* obj, int* out, PyArena* arena)
{
int i;
if (!PyLong_Check(obj)) {
PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj);
return 1;
}
i = _PyLong_AsInt(obj);
if (i == -1 && PyErr_Occurred())
return 1;
*out = i;
return 0;
}
static int add_ast_fields(void)
{
PyObject *empty_tuple, *d;
if (PyType_Ready(&AST_type) < 0)
return -1;
d = AST_type.tp_dict;
empty_tuple = PyTuple_New(0);
if (!empty_tuple ||
PyDict_SetItemString(d, "_fields", empty_tuple) < 0 ||
PyDict_SetItemString(d, "_attributes", empty_tuple) < 0) {
Py_XDECREF(empty_tuple);
return -1;
}
Py_DECREF(empty_tuple);
return 0;
}
static int exists_not_none(PyObject *obj, _Py_Identifier *id)
{
int isnone;
PyObject *attr = _PyObject_GetAttrId(obj, id);
if (!attr) {
PyErr_Clear();
return 0;
}
isnone = attr == Py_None;
Py_DECREF(attr);
return !isnone;
}
""", 0, reflow=False)
self.emit("static int init_types(void)",0)
self.emit("{", 0)
self.emit("static int initialized;", 1)
self.emit("if (initialized) return 1;", 1)
self.emit("if (add_ast_fields() < 0) return 0;", 1)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("initialized = 1;", 1)
self.emit("return 1;", 1);
self.emit("}", 0)
def visitProduct(self, prod, name):
if prod.fields:
fields = name+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", &AST_type, %s, %d);' %
(name, name, fields, len(prod.fields)), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if prod.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(prod.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
def visitSum(self, sum, name):
self.emit('%s_type = make_type("%s", &AST_type, NULL, 0);' %
(name, name), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if sum.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(sum.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
simple = is_simple(sum)
for t in sum.types:
self.visitConstructor(t, name, simple)
def visitConstructor(self, cons, name, simple):
if cons.fields:
fields = cons.name+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", %s_type, %s, %d);' %
(cons.name, cons.name, name, fields, len(cons.fields)), 1)
self.emit("if (!%s_type) return 0;" % cons.name, 1)
if simple:
self.emit("%s_singleton = PyType_GenericNew(%s_type, NULL, NULL);" %
(cons.name, cons.name), 1)
self.emit("if (!%s_singleton) return 0;" % cons.name, 1)
class ASTModuleVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("static struct PyModuleDef _astmodule = {", 0)
self.emit(' PyModuleDef_HEAD_INIT, "_ast"', 0)
self.emit("};", 0)
self.emit("PyMODINIT_FUNC", 0)
self.emit("PyInit__ast(void)", 0)
self.emit("{", 0)
self.emit("PyObject *m, *d;", 1)
self.emit("if (!init_types()) return NULL;", 1)
self.emit('m = PyModule_Create(&_astmodule);', 1)
self.emit("if (!m) return NULL;", 1)
self.emit("d = PyModule_GetDict(m);", 1)
self.emit('if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL;', 1)
self.emit('if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0)', 1)
self.emit("return NULL;", 2)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("return m;", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.addObj(name)
def visitSum(self, sum, name):
self.addObj(name)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.addObj(cons.name)
def addObj(self, name):
self.emit('if (PyDict_SetItemString(d, "%s", (PyObject*)%s_type) < 0) return NULL;' % (name, name), 1)
_SPECIALIZED_SEQUENCES = ('stmt', 'expr')
def find_sequence(fields, doing_specialization):
"""Return True if any field uses a sequence."""
for f in fields:
if f.seq:
if not doing_specialization:
return True
if str(f.type) not in _SPECIALIZED_SEQUENCES:
return True
return False
def has_sequence(types, doing_specialization):
for t in types:
if find_sequence(t.fields, doing_specialization):
return True
return False
class StaticVisitor(PickleVisitor):
CODE = '''Very simple, always emit this static code. Override CODE'''
def visit(self, object):
self.emit(self.CODE, 0, reflow=False)
class ObjVisitor(PickleVisitor):
def func_begin(self, name):
ctype = get_c_type(name)
self.emit("PyObject*", 0)
self.emit("ast2obj_%s(void* _o)" % (name), 0)
self.emit("{", 0)
self.emit("%s o = (%s)_o;" % (ctype, ctype), 1)
self.emit("PyObject *result = NULL, *value = NULL;", 1)
self.emit('if (!o) {', 1)
self.emit("Py_INCREF(Py_None);", 2)
self.emit('return Py_None;', 2)
self.emit("}", 1)
self.emit('', 0)
def func_end(self):
self.emit("return result;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(value);", 1)
self.emit("Py_XDECREF(result);", 1)
self.emit("return NULL;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
return
self.func_begin(name)
self.emit("switch (o->kind) {", 1)
for i in range(len(sum.types)):
t = sum.types[i]
self.visitConstructor(t, i + 1, name)
self.emit("}", 1)
for a in sum.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def simpleSum(self, sum, name):
self.emit("PyObject* ast2obj_%s(%s_ty o)" % (name, name), 0)
self.emit("{", 0)
self.emit("switch(o) {", 1)
for t in sum.types:
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(%s_singleton);" % t.name, 3)
self.emit("return %s_singleton;" % t.name, 3)
self.emit("default:", 2)
self.emit('/* should never happen, but just in case ... */', 3)
code = "PyErr_Format(PyExc_SystemError, \"unknown %s found\");" % name
self.emit(code, 3, reflow=False)
self.emit("return NULL;", 3)
self.emit("}", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.func_begin(name)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % name, 1);
self.emit("if (!result) return NULL;", 1)
for field in prod.fields:
self.visitField(field, name, 1, True)
for a in prod.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def visitConstructor(self, cons, enum, name):
self.emit("case %s_kind:" % cons.name, 1)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % cons.name, 2);
self.emit("if (!result) goto failed;", 2)
for f in cons.fields:
self.visitField(f, cons.name, 2, False)
self.emit("break;", 2)
def visitField(self, field, name, depth, product):
def emit(s, d):
self.emit(s, depth + d)
if product:
value = "o->%s" % field.name
else:
value = "o->v.%s.%s" % (name, field.name)
self.set(field, value, depth)
emit("if (!value) goto failed;", 0)
emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) == -1)' % field.name, 0)
emit("goto failed;", 1)
emit("Py_DECREF(value);", 0)
def emitSeq(self, field, value, depth, emit):
emit("seq = %s;" % value, 0)
emit("n = asdl_seq_LEN(seq);", 0)
emit("value = PyList_New(n);", 0)
emit("if (!value) goto failed;", 0)
emit("for (i = 0; i < n; i++) {", 0)
self.set("value", field, "asdl_seq_GET(seq, i)", depth + 1)
emit("if (!value1) goto failed;", 1)
emit("PyList_SET_ITEM(value, i, value1);", 1)
emit("value1 = NULL;", 1)
emit("}", 0)
def set(self, field, value, depth):
if field.seq:
# XXX should really check for is_simple, but that requires a symbol table
if field.type == "cmpop":
# While the sequence elements are stored as void*,
# ast2obj_cmpop expects an enum
self.emit("{", depth)
self.emit("Py_ssize_t i, n = asdl_seq_LEN(%s);" % value, depth+1)
self.emit("value = PyList_New(n);", depth+1)
self.emit("if (!value) goto failed;", depth+1)
self.emit("for(i = 0; i < n; i++)", depth+1)
# This cannot fail, so no need for error handling
self.emit("PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(%s, i)));" % value,
depth+2, reflow=False)
self.emit("}", depth)
else:
self.emit("value = ast2obj_list(%s, ast2obj_%s);" % (value, field.type), depth)
else:
ctype = get_c_type(field.type)
self.emit("value = ast2obj_%s(%s);" % (field.type, value), depth, reflow=False)
class PartingShots(StaticVisitor):
CODE = """
PyObject* PyAST_mod2obj(mod_ty t)
{
if (!init_types())
return NULL;
return ast2obj_mod(t);
}
/* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode)
{
mod_ty res;
PyObject *req_type[3];
char *req_name[] = {"Module", "Expression", "Interactive"};
int isinstance;
req_type[0] = (PyObject*)Module_type;
req_type[1] = (PyObject*)Expression_type;
req_type[2] = (PyObject*)Interactive_type;
assert(0 <= mode && mode <= 2);
if (!init_types())
return NULL;
isinstance = PyObject_IsInstance(ast, req_type[mode]);
if (isinstance == -1)
return NULL;
if (!isinstance) {
PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s",
req_name[mode], Py_TYPE(ast)->tp_name);
return NULL;
}
if (obj2ast_mod(ast, &res, arena) != 0)
return NULL;
else
return res;
}
int PyAST_Check(PyObject* obj)
{
if (!init_types())
return -1;
return PyObject_IsInstance(obj, (PyObject*)&AST_type);
}
"""
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
def main(srcfile, dump_module=False):
argv0 = sys.argv[0]
components = argv0.split(os.sep)
argv0 = os.sep.join(components[-2:])
mod = asdl.parse(srcfile)
if dump_module:
print('Parsed Module:')
print(mod)
if not asdl.check(mod):
sys.exit(1)
if H_FILE:
with open(H_FILE, "w") as f:
f.write("\
#ifndef COSMOPOLITAN_THIRD_PARTY_PYTHON_INCLUDE_PYTHON_AST_H_\n\
#define COSMOPOLITAN_THIRD_PARTY_PYTHON_INCLUDE_PYTHON_AST_H_\n\
#include \"third_party/python/Include/asdl.h\"\n\
#if !(__ASSEMBLER__ + __LINKER__ + 0)\n\
COSMOPOLITAN_C_START_\n\
/* clang-format off */\n\
/* File automatically generated by %s. */\n\
\n\
" % argv0)
c = ChainOfVisitors(TypeDefVisitor(f),
StructVisitor(f),
PrototypeVisitor(f),
)
c.visit(mod)
f.write("\
PyObject* PyAST_mod2obj(mod_ty t);\n\
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);\n\
int PyAST_Check(PyObject* obj);\n\
\n\
COSMOPOLITAN_C_END_\n\
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */\n\
#endif /* COSMOPOLITAN_THIRD_PARTY_PYTHON_INCLUDE_PYTHON_AST_H_ */\n")
if C_FILE:
with open(C_FILE, "w") as f:
f.write('\
#include "third_party/python/Include/%s-ast.h"\n\
#include "third_party/python/Include/abstract.h"\n\
#include "third_party/python/Include/boolobject.h"\n\
#include "third_party/python/Include/descrobject.h"\n\
#include "third_party/python/Include/dictobject.h"\n\
#include "third_party/python/Include/listobject.h"\n\
#include "third_party/python/Include/longobject.h"\n\
#include "third_party/python/Include/modsupport.h"\n\
#include "third_party/python/Include/object.h"\n\
#include "third_party/python/Include/objimpl.h"\n\
#include "third_party/python/Include/pyerrors.h"\n\
#include "third_party/python/Include/pythonrun.h"\n\
#include "third_party/python/Include/tupleobject.h"\n\
#include "third_party/python/Include/yoink.h"\n\
/* clang-format off */\n\
\n\
PYTHON_PROVIDE("_ast");\n\
\n\
/* File automatically generated by %s. */\n\
\n\
static PyTypeObject AST_type;\n\
' % (mod.name, argv0))
v = ChainOfVisitors(
PyTypesDeclareVisitor(f),
PyTypesVisitor(f),
Obj2ModPrototypeVisitor(f),
FunctionVisitor(f),
ObjVisitor(f),
Obj2ModVisitor(f),
ASTModuleVisitor(f),
PartingShots(f),
)
v.visit(mod)
if __name__ == "__main__":
import getopt
H_FILE = ''
C_FILE = ''
dump_module = False
opts, args = getopt.getopt(sys.argv[1:], "dh:c:")
for o, v in opts:
if o == '-h':
H_FILE = v
if o == '-c':
C_FILE = v
if o == '-d':
dump_module = True
if H_FILE and C_FILE:
print('Must specify exactly one output file')
sys.exit(1)
elif len(args) != 1:
print('Must specify single input file')
sys.exit(1)
main(args[0], dump_module)
| 33.379663
| 110
| 0.539667
|
59d05a4fabb908c5d24ef1c8efff8300a1df26c3
| 362
|
py
|
Python
|
mptt_graph/admin.py
|
synw/django-mptt-graph
|
e4ff6b1d77f43b70fa2d58b3ba9a3155e279a1e2
|
[
"MIT"
] | 4
|
2019-04-14T11:00:44.000Z
|
2021-06-06T09:56:44.000Z
|
mptt_graph/admin.py
|
synw/django-mptt-graph
|
e4ff6b1d77f43b70fa2d58b3ba9a3155e279a1e2
|
[
"MIT"
] | 3
|
2017-08-21T10:53:12.000Z
|
2020-01-02T12:36:15.000Z
|
mptt_graph/admin.py
|
synw/django-mptt-graph
|
e4ff6b1d77f43b70fa2d58b3ba9a3155e279a1e2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from mptt_graph.models import GraphModel, TreeNode
@admin.register(GraphModel)
class UrlTreeAdmin(admin.ModelAdmin):
list_display = ["title", "model_path", "model_pk"]
@admin.register(TreeNode)
class TreeNodeAdmin(MPTTModelAdmin):
mptt_level_indent = 30
| 22.625
| 54
| 0.765193
|
f2c0be66df871726673ea1149e9d3a6832043b3e
| 539
|
py
|
Python
|
Python3/0029-Divide-Two-Integers/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0029-Divide-Two-Integers/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0029-Divide-Two-Integers/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = -1 if dividend * divisor < 0 else +1
a, b = abs(dividend), abs(divisor)
res = 0
while a >= b:
temp, unit = b, 1
while a >= temp:
a -= temp
res += unit
temp *= 10
unit *= 10
res *= sign
return res if -2**31 <= res <= 2**31 - 1 else 2**31 - 1
| 28.368421
| 63
| 0.408163
|
356b22c632cf5199319e3d46ba764f04833632c3
| 50,030
|
py
|
Python
|
python/paddle/nn/layer/loss.py
|
wangna11BD/Paddle
|
bc379ca3d5895eadbc1748bc5b71606011563ee1
|
[
"Apache-2.0"
] | 1
|
2021-04-28T13:47:27.000Z
|
2021-04-28T13:47:27.000Z
|
python/paddle/nn/layer/loss.py
|
wangna11BD/Paddle
|
bc379ca3d5895eadbc1748bc5b71606011563ee1
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/nn/layer/loss.py
|
wangna11BD/Paddle
|
bc379ca3d5895eadbc1748bc5b71606011563ee1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define loss functions of neural network
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle
from .. import functional as F
from paddle.fluid.framework import core, in_dygraph_mode, _varbase_creator
class BCEWithLogitsLoss(fluid.dygraph.Layer):
r"""
This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer.
Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits``
layer and some reduce operations.
This measures the element-wise probability error in classification tasks
in which each class is independent.
This can be thought of as predicting labels for a data-point, where labels
are not mutually exclusive. For example, a news article can be about
politics, technology or sports at the same time or none of these.
First this operator calculate loss function as follows:
.. math::
Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get:
.. math::
Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit})
For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0,
we reformulate the loss as follows:
.. math::
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|})
Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
weight tensor on the loss `Out`. The ``weight`` tensor will attach different
weight on every items in the batch. The ``pos_weight`` will attach different
weight on the positive label of each class.
Finally, this operator applies reduce operation on the loss.
If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`.
If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`.
If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`.
Note that the target labels ``label`` should be numbers between 0 and 1.
Args:
weight (Tensor, optional): A manual rescaling weight given to the loss of each
batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`,
The data type is float32, float64. Default is ``'None'``.
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`reduction` is ``'sum'``, the summed loss is returned.
Default is ``'mean'``.
pos_weight (Tensor, optional): A weight of positive examples. Must be a vector
with length equal to the number of classes. The data type is float32, float64.
Default is ``'None'``.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shapes:
logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *],
N is batch_size, `*` means number of additional dimensions. The ``logit``
is usually the output of Linear layer. Available dtype is float32, float64.
label (Tensor): The target labels tensor. 2-D tensor with the same shape as
``logit``. The target labels which values should be numbers between 0 and 1.
Available dtype is float32, float64.
output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
same as ``logit`` , else the shape of output is scalar.
Returns:
A callable object of BCEWithLogitsLoss.
Examples:
.. code-block:: python
import paddle
logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32")
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
output = bce_logit_loss(logit, label)
print(output.numpy()) # [0.45618808]
"""
def __init__(self,
weight=None,
reduction='mean',
pos_weight=None,
name=None):
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in BCEWithLogitsLoss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed." % reduction)
super(BCEWithLogitsLoss, self).__init__()
self.weight = weight
self.reduction = reduction
self.pos_weight = pos_weight
self.name = name
def forward(self, logit, label):
out = paddle.nn.functional.binary_cross_entropy_with_logits(
logit, label, self.weight, self.reduction, self.pos_weight,
self.name)
return out
class CrossEntropyLoss(fluid.dygraph.Layer):
r"""
By default, this operator implements the cross entropy loss function with softmax. This function
combines the calculation of the softmax operation and the cross entropy loss function
to provide a more numerically stable computing.
This operator will calculate the cross entropy loss function without softmax when use_softmax=False.
By default, this operator will calculate the mean of the result, and you can also affect
the default behavior by using the reduction parameter. Please refer to the part of
parameters for details.
This operator can be used to calculate the softmax cross entropy loss with soft and hard labels.
Where, the hard labels mean the actual label value, 0, 1, 2, etc. And the soft labels
mean the probability of the actual label, 0.6, 0.8, 0.2, etc.
The calculation of this operator includes the following two steps.
- **I.softmax cross entropy**
1. Hard label (each sample can only be assigned into one category)
1.1. when use_softmax=True
.. math::
\\loss_j=-\text{logits}_{label_j}+\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right) , j = 1,...,N
where, N is the number of samples and C is the number of categories.
1.2. when use_softmax=False
.. math::
\\loss_j=-\log\left({P}_{label_j}\right) , j = 1,...,N
where, N is the number of samples and C is the number of categories, P is input(the output of softmax).
2. Soft label (each sample is assigned to multiple categories with a certain probability, and the probability sum is 1).
2.1. when use_softmax=True
.. math::
\\loss_j=-\sum_{i=0}^{C}\text{label}_i\left(\text{logits}_i-\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right)\right) , j = 1,...,N
where, N is the number of samples and C is the number of categories.
2.2. when use_softmax=False
.. math::
\\loss_j=-\sum_{j=0}^{C}\left({label}_j*\log\left({P}_{label_j}\right)\right) , j = 1,...,N
where, N is the number of samples and C is the number of categories, P is input(the output of softmax).
- **II.Weight and reduction processing**
1. Weight
If the ``weight`` parameter is ``None`` , go to the next step directly.
If the ``weight`` parameter is not ``None`` , the cross entropy of each sample is weighted by weight
according to soft_label = False or True as follows.
1.1. Hard labels (soft_label = False)
.. math::
\\loss_j=loss_j*weight[label_j]
1.2. Soft labels (soft_label = True)
.. math::
\\loss_j=loss_j*\sum_{i}\left(weight[label_i]*logits_i\right)
2. reduction
2.1 if the ``reduction`` parameter is ``none``
Return the previous result directly
2.2 if the ``reduction`` parameter is ``sum``
Return the sum of the previous results
.. math::
\\loss=\sum_{j}loss_j
2.3 if the ``reduction`` parameter is ``mean`` , it will be processed according to
the ``weight`` parameter as follows.
2.3.1. If the ``weight`` parameter is ``None``
Return the average value of the previous results
.. math::
\\loss=\sum_{j}loss_j/N
where, N is the number of samples and C is the number of categories.
2.3.2. If the 'weight' parameter is not 'None', the weighted average value of the previous result will be returned
1. Hard labels (soft_label = False)
.. math::
\\loss=\sum_{j}loss_j/\sum_{j}weight[label_j]
2. Soft labels (soft_label = True)
.. math::
\\loss=\sum_{j}loss_j/\sum_{j}\left(\sum_{i}weight[label_i]\right)
Parameters:
- **weight** (Tensor, optional)
a manual rescaling weight given to each class.
If given, has to be a Tensor of size C and the data type is float32, float64.
Default is ``'None'`` .
- **ignore_index** (int64, optional)
Specifies a target value that is ignored
and does not contribute to the loss. A negative value means that no label
value needs to be ignored. Only valid when soft_label = False.
Default is ``-100`` .
- **reduction** (str, optional)
Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
- **soft_label** (bool, optional)
Indicate whether label is soft.
If soft_label=False, the label is hard. If soft_label=True, the label is soft.
Default is ``False``.
- **axis** (int, optional)
The index of dimension to perform softmax calculations.
It should be in range :math:`[-1, rank - 1]`, where :math:`rank` is the number
of dimensions of input :attr:`input`.
Default is ``-1`` .
- **use_softmax** (bool, optional)
Indicate whether compute softmax before cross_entropy.
Default is ``True``.
- **name** (str, optional)
The name of the operator. Default is ``None`` .
For more information, please refer to :ref:`api_guide_Name` .
Shape:
- **input** (Tensor)
Input tensor, the data type is float32, float64. Shape is
:math:`[N_1, N_2, ..., N_k, C]`, where C is number of classes , ``k >= 1`` .
Note:
1. when use_softmax=True, it expects unscaled logits. This operator should not be used with the
output of softmax operator, which will produce incorrect results.
2. when use_softmax=False, it expects the output of softmax operator.
- **label** (Tensor)
1. If soft_label=False, the shape is
:math:`[N_1, N_2, ..., N_k]` or :math:`[N_1, N_2, ..., N_k, 1]`, k >= 1.
the data type is int32, int64, float32, float64, where each value is [0, C-1].
2. If soft_label=True, the shape and data type should be same with ``input`` ,
and the sum of the labels for each sample should be 1.
- **output** (Tensor)
Return the softmax cross_entropy loss of ``input`` and ``label``.
The data type is the same as input.
If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the dimension of return value is ``1``.
If :attr:`reduction` is ``'none'``:
1. If soft_label = False, the dimension of return value is the same with ``label`` .
2. if soft_label = True, the dimension of return value is :math:`[N_1, N_2, ..., N_k, 1]` .
Example1(hard labels):
.. code-block:: python
import paddle
paddle.seed(99999)
N=100
C=200
reduction='mean'
input = paddle.rand([N, C], dtype='float64')
label = paddle.randint(0, C, shape=[N], dtype='int64')
weight = paddle.rand([C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=reduction)
dy_ret = cross_entropy_loss(
input,
label)
print(dy_ret.numpy()) #[5.41993642]
Example2(soft labels):
.. code-block:: python
import paddle
paddle.seed(99999)
axis = -1
ignore_index = -100
N = 4
C = 3
shape = [N, C]
reduction='mean'
weight = None
logits = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0)
labels /= paddle.sum(labels, axis=axis, keepdim=True)
paddle_loss_mean = paddle.nn.functional.cross_entropy(
logits,
labels,
soft_label=True,
axis=axis,
weight=weight,
reduction=reduction)
print(paddle_loss_mean.numpy()) #[1.12908343]
"""
def __init__(self,
weight=None,
ignore_index=-100,
reduction='mean',
soft_label=False,
axis=-1,
use_softmax=True,
name=None):
super(CrossEntropyLoss, self).__init__()
self.weight = weight
self.reduction = reduction
self.ignore_index = ignore_index
self.soft_label = soft_label
self.axis = axis
self.use_softmax = use_softmax
self.name = name
def forward(self, input, label):
ret = paddle.nn.functional.cross_entropy(
input,
label,
weight=self.weight,
ignore_index=self.ignore_index,
reduction=self.reduction,
soft_label=self.soft_label,
axis=self.axis,
use_softmax=self.use_softmax,
name=self.name)
return ret
class HSigmoidLoss(fluid.dygraph.Layer):
"""
Hierarchical Sigmoid Layer.
The hierarchical sigmoid organizes the classes into a complete binary tree to reduce the computational complexity
and speed up the model training, especially the training of language model.
Each leaf node of the complete binary tree represents a class(word) and each non-leaf node acts as a binary classifier.
For each class(word), there's a unique path from root to itself, hsigmoid calculate the cost for each non-leaf node on
the path, and sum them to get a total cost.
Comparing to softmax, the OP can reduce the computational complexity from :math:`O(N)` to :math:`O(logN)`, where :math:`N`
represents the number of classes or the size of word dict.
The OP supports default tree and custom tree. For the default tree, you can refer to `Hierarchical Probabilistic Neural
Network Language Model <http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf>_`. For the custom
tree, you need to set :attr:`is_custom` to True, and do the following steps (take the language model as an example):
1. Using a custom word dict to build a binary tree, each leaf node should be an word in the word dict.
2. Creating a dict map word_id -> path that from the word to the root node, we call it path_table.
3. Creating a dict map word_id -> code of path that from the word to the root node, we call it path_code.
Code means the label of each binary classifier, 1 indicate true, 0 indicate false.
4. Now, each word should has its path and code along the path, you can pass a batch of path and code related
to the same batch of inputs.
Parameters:
feature_size (int): The number of features.
num_classes (int): The number of classes or the size of word dict, must be greater than 2.
If the default tree is used (:attr:`is_custom` is set to False), :attr:`num_classes`
should not be None. If the custom tree is used (:attr:`is_custom` is set to True),
:attr:`num_classes` should be the number of non-leaf nodes, which indicates the num of
classes using by the binary classifier.
weight_attr (ParamAttr, optional): The parameter attribute for the learnable weights
of hsigmoid. If it is set to None or one attribute of ParamAttr, hsigmoid will create a
ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is
initialized with Xavier. Default is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of hsigmoid. If it
is set to False, no bias will be added. If it is set to None or one attribute of ParamAttr,
hsigmoid will create a ParamAttr as bias_attr. If the Initializer of the bias_attr is not
set, the bias is initialized zero. Default is None.
is_custom (bool, optional): Whether use custom binary tree. If it's True, `path_table` and
`path_code` should be passed to its forward method, otherwise `path_table` and `path_code`
should not be passed to its forward method. Default is False.
is_sparse (bool, optional): Whether use sparse updating instead of dense updating, if it's True,
the gradient of weight and input will be sparse. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): The input tensor. The shapes is [N, D], where N is batch size and D is feature size. It's data type should be float32, float64.
label (Tensor): It's shapes is [N, 1]. It's data type should be int64.
output (Tensor): The HSigmoid Loss of ``input`` and ``label``. Shape is [N, 1]
Examples:
.. code-block:: python
import paddle
paddle.set_device('cpu')
input = paddle.uniform([2, 3])
# [[-0.2820413 0.9528898 -0.81638825] # random
# [-0.6733154 -0.33866507 0.25770962]] # random
label = paddle.to_tensor([0, 1, 4, 5])
m = paddle.nn.HSigmoidLoss(3, 5)
out = m(input, label)
# [[2.4543471]
# [1.9359267]]
"""
def __init__(self,
feature_size,
num_classes,
weight_attr=None,
bias_attr=None,
is_custom=False,
is_sparse=False,
name=None):
super(HSigmoidLoss, self).__init__()
if (num_classes < 2) and (not is_custom):
raise ValueError(
"num_classes must not be less than 2 with default tree")
if (not is_custom) and (is_sparse):
print("Sparse mode should not be used without custom tree")
is_sparse = False
self._feature_size = feature_size
self._num_classes = num_classes
self._is_custom = is_custom
self._is_sparse = is_sparse
self._weight_attr = weight_attr
self._bias_attr = bias_attr
self._name = name
self._dtype = paddle.get_default_dtype()
remote_prefetch = is_sparse
print("With sparse mode, if your models has only"
" small parameter prefetch may cause speed down")
C = self._num_classes if is_custom else self._num_classes - 1
self.weight = self.create_parameter(
[C, self._feature_size],
attr=self._weight_attr,
is_bias=False,
dtype=self._dtype)
self.bias = self.create_parameter(
[C, 1], attr=self._bias_attr, is_bias=True, dtype=self._dtype)
def forward(self, input, label, path_table=None, path_code=None):
out = F.hsigmoid_loss(
input,
label,
self._num_classes,
self.weight,
self.bias,
path_table=path_table,
path_code=path_code,
is_sparse=self._is_sparse,
name=self._name)
return out
class MSELoss(fluid.dygraph.layers.Layer):
r"""
**Mean Square Error Loss**
Computes the mean square error (squared L2 norm) of given input and label.
If :attr:`reduction` is set to ``'none'``, loss is calculated as:
.. math::
Out = (input - label)^2
If :attr:`reduction` is set to ``'mean'``, loss is calculated as:
.. math::
Out = \operatorname{mean}((input - label)^2)
If :attr:`reduction` is set to ``'sum'``, loss is calculated as:
.. math::
Out = \operatorname{sum}((input - label)^2)
where `input` and `label` are `float32` tensors of same shape.
Parameters:
reduction (string, optional): The reduction method for the output,
could be 'none' | 'mean' | 'sum'.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned.
If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
Shape:
input (Tensor): Input tensor, the data type is float32 or float64
label (Tensor): Label tensor, the data type is float32 or float64
output (Tensor): output tensor storing the MSE loss of input and label, the data type is same as input.
Examples:
.. code-block:: python
import numpy as np
import paddle
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
mse_loss = paddle.nn.loss.MSELoss()
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
output = mse_loss(input, label)
print(output)
# [0.04000002]
"""
def __init__(self, reduction='mean'):
super(MSELoss, self).__init__()
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'MSELoss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))
self.reduction = reduction
def forward(self, input, label):
if not fluid.framework.in_dygraph_mode():
fluid.data_feeder.check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'MSELoss')
fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'MSELoss')
square_out = fluid.layers.square(
fluid.layers.elementwise_sub(input, label))
if self.reduction == 'none':
return square_out
reduce_op = 'reduce_mean'
if self.reduction == 'sum':
reduce_op = 'reduce_sum'
return getattr(fluid.layers, reduce_op)(square_out)
class L1Loss(fluid.dygraph.Layer):
r"""
This interface is used to construct a callable object of the ``L1Loss`` class.
The L1Loss layer calculates the L1 Loss of ``input`` and ``label`` as follows.
If `reduction` set to ``'none'``, the loss is:
.. math::
Out = \lvert input - label\rvert
If `reduction` set to ``'mean'``, the loss is:
.. math::
Out = MEAN(\lvert input - label\rvert)
If `reduction` set to ``'sum'``, the loss is:
.. math::
Out = SUM(\lvert input - label\rvert)
Parameters:
reduction (str, optional): Indicate the reduction to apply to the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'none'``, the unreduced loss is returned;
If `reduction` is ``'mean'``, the reduced mean loss is returned.
If `reduction` is ``'sum'``, the reduced sum loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
output (Tensor): The L1 Loss of ``input`` and ``label``.
If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples:
.. code-block:: python
import paddle
import numpy as np
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.L1Loss()
output = l1_loss(input, label)
print(output.numpy())
# [0.35]
l1_loss = paddle.nn.L1Loss(reduction='sum')
output = l1_loss(input, label)
print(output.numpy())
# [1.4]
l1_loss = paddle.nn.L1Loss(reduction='none')
output = l1_loss(input, label)
print(output)
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
"""
def __init__(self, reduction='mean', name=None):
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in L1Loss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed." % reduction)
super(L1Loss, self).__init__()
self.reduction = reduction
self.name = name
def forward(self, input, label):
return paddle.nn.functional.l1_loss(
input, label, self.reduction, name=self.name)
class BCELoss(fluid.dygraph.Layer):
"""
This interface is used to construct a callable object of the ``BCELoss`` class.
The BCELoss layer measures the binary_cross_entropy loss between input predictions ``input``
and target labels ``label`` . The binary_cross_entropy loss can be described as:
If :attr:`weight` is set, the loss is:
.. math::
Out = -1 * weight * (label * log(input) + (1 - label) * log(1 - input))
If :attr:`weight` is None, the loss is:
.. math::
Out = -1 * (label * log(input) + (1 - label) * log(1 - input))
If :attr:`reduction` set to ``'none'``, the interface will return the original loss `Out`.
If :attr:`reduction` set to ``'mean'``, the reduced mean loss is:
.. math::
Out = MEAN(Out)
If :attr:`reduction` set to ``'sum'``, the reduced sum loss is:
.. math::
Out = SUM(Out)
Note that the input predictions ``input`` always be the output of sigmoid, and the target labels ``label``
should be numbers between 0 and 1.
Parameters:
weight (Tensor, optional): A manual rescaling weight given to the loss of each
batch element. If given, has to be a Tensor of size nbatch and the data type
is float32, float64. Default is ``'None'``.
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`reduction` is ``'sum'``, the summed loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): 2-D tensor with shape: [N, *], N is batch_size, `*` means
number of additional dimensions. The input ``input`` should always
be the output of sigmod. Available dtype is float32, float64.
label (Tensor): 2-D tensor with the same shape as ``input``. The target
labels which values should be numbers between 0 and 1. Available
dtype is float32, float64.
output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
same as ``input`` , else the shape of output is scalar.
Returns:
A callable object of BCELoss.
Examples:
.. code-block:: python
import numpy as np
import paddle
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
bce_loss = paddle.nn.BCELoss()
output = bce_loss(input, label)
print(output) # [0.65537095]
"""
def __init__(self, weight=None, reduction='mean', name=None):
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in bce_loss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed." % reduction)
super(BCELoss, self).__init__()
self.weight = weight
self.reduction = reduction
self.name = name
def forward(self, input, label):
out = paddle.nn.functional.binary_cross_entropy(
input, label, self.weight, self.reduction, self.name)
return out
class NLLLoss(fluid.dygraph.Layer):
r"""
:alias_main: paddle.nn.NLLLoss
:alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss
This class accepts input and target label and returns negative log likelihood
cross error. It is useful to train a classification problem with C classes.
The input for the loss is epected to contain log-probabilities of
each classes. It has to be a Tensor of size either (batch_size, C) or
(batch_size, C, d1, d2, ..., dK) with K >= 1 for the K-dimensional case.
The label for the loss should be a class index in the range [0, C-1]
where C is the number of classes. If ignore_index is specified, the
specified target value does not contribute to the input gradient.
If the optional argument `weight` is provided, it should be a 1D Tensor
assigning weight to each of the classed. This is particularly useful
when you have an unbalanced training set.
The loss is calculated as follows.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\\top, \quad
l_n = - w_{y_n} x_{n,y_n}, \quad
w_{c} = \\text{weight}[c] \cdot \mathbb{1}\{c \\not= \\text{ignore\\_index}\},
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \\begin{cases}
\\sum_{n=1}^N \\frac{1}{\\sum_{n=1}^N w_{y_n}} l_n, &
\\text{if reduction} = \\text{'mean';}\\\\
\\sum_{n=1}^N l_n, &
\\text{if reduction} = \\text{'sum'.}
\\end{cases}
Parameters:
weight (Tensor, optional): Weight tensor, a manual rescaling weight given
to each class. If given, it has to be a 1D Tensor whose size is `[C, ]`. Otherwise,
it treated as if having all ones. the data type is
float32, float64, Default is ``'None'``.
ignore_index (int64, optional): Specifies a target value that is ignored
and does not contribute to the input gradient.
reduction (str, optional): Indicate how to average the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'mean'``, the reduced mean loss is returned;
if `reduction` is ``'sum'``, the reduced sum loss is returned;
if `reduction` is ``'none'``, no reduction will be apllied.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): Input tensor, the shape is :math:`[N, C]`, `C` is the number of classes.
But in K-dimension situation, the shape is :math:`[N, C, d_1, d_2, ..., d_K]`.
The data type is float32, float64.
label (Tensor): Label tensor, the shape is :math:`[N,]` or :math:`[N, d_1, d_2, ..., d_K]`.
The data type is int64.
output (Tensor): the `negative log likelihood loss` between input `x` and `label`.
If `reduction` is `'none'`, the shape is `[N, *]`.
If `reduction` is `'sum'` or `'mean'`, the shape is `[1]`.
Examples:
.. code-block:: python
import paddle
nll_loss = paddle.nn.loss.NLLLoss()
log_softmax = paddle.nn.LogSoftmax(axis=1)
input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
[0.53331435, 0.07999352, 0.8549948 ],
[0.25879037, 0.39530203, 0.698465 ],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954 , 0.6325046 ]], "float32")
log_out = log_softmax(input)
label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
result = nll_loss(log_out, label)
print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101])
"""
def __init__(self,
weight=None,
ignore_index=-100,
reduction='mean',
name=None):
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in nll_loss should be 'sum', 'mean' or "
"'none', but received %s, which is not allowed." % reduction)
super(NLLLoss, self).__init__()
self._weight = weight
self._ignore_index = ignore_index
self._reduction = reduction
self._name = name
def forward(self, input, label):
return F.nll_loss(
input,
label,
weight=self._weight,
ignore_index=self._ignore_index,
reduction=self._reduction,
name=self._name)
class KLDivLoss(fluid.dygraph.Layer):
r"""
This interface calculates the Kullback-Leibler divergence loss
between Input(X) and Input(Target). Notes that Input(X) is the
log-probability and Input(Target) is the probability.
KL divergence loss is calculated as follows:
$$l(x, y) = y * (\log(y) - x)$$
Parameters:
reduction (Tensor): Indicate how to average the loss,
the candicates are ``'none'`` | ``'batchmean'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'mean'``, the reduced mean loss is returned;
If `reduction` is ``'batchmean'``, the sum loss divided by batch size is returned;
if `reduction` is ``'sum'``, the reduced sum loss is returned;
if `reduction` is ``'none'``, no reduction will be apllied.
Default is ``'mean'``.
Shape:
- input (Tensor): (N, *), where * means, any number of additional dimensions.
- label (Tensor): (N, *), same shape as input.
- output (Tensor): tensor with shape: [1] by default.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn as nn
shape = (5, 20)
x = np.random.uniform(-10, 10, shape).astype('float32')
target = np.random.uniform(-10, 10, shape).astype('float32')
# 'batchmean' reduction, loss shape will be [1]
kldiv_criterion = nn.KLDivLoss(reduction='batchmean')
pred_loss = kldiv_criterion(paddle.to_tensor(x),
paddle.to_tensor(target))
# shape=[1]
# 'mean' reduction, loss shape will be [1]
kldiv_criterion = nn.KLDivLoss(reduction='mean')
pred_loss = kldiv_criterion(paddle.to_tensor(x),
paddle.to_tensor(target))
# shape=[1]
# 'sum' reduction, loss shape will be [1]
kldiv_criterion = nn.KLDivLoss(reduction='sum')
pred_loss = kldiv_criterion(paddle.to_tensor(x),
paddle.to_tensor(target))
# shape=[1]
# 'none' reduction, loss shape is same with X shape
kldiv_criterion = nn.KLDivLoss(reduction='none')
pred_loss = kldiv_criterion(paddle.to_tensor(x),
paddle.to_tensor(target))
# shape=[5, 20]
"""
def __init__(self, reduction='mean'):
super(KLDivLoss, self).__init__()
self.reduction = reduction
def forward(self, input, label):
out = F.kl_div(input, label, self.reduction)
return out
class MarginRankingLoss(fluid.dygraph.Layer):
r"""
This interface is used to construct a callable object of the ``MarginRankingLoss`` class.
The MarginRankingLoss layer calculates the margin rank loss between the input, other and label
, use the math function as follows.
.. math::
margin\_rank\_loss = max(0, -label * (input - other) + margin)
If :attr:`reduction` set to ``'mean'``, the reduced mean loss is:
.. math::
Out = MEAN(margin\_rank\_loss)
If :attr:`reduction` set to ``'sum'``, the reduced sum loss is:
.. math::
Out = SUM(margin\_rank\_loss)
If :attr:`reduction` set to ``'none'``, just return the origin ``margin_rank_loss``.
Parameters:
margin (float, optional): The margin value to add, default value is 0;
reduction (str, optional): Indicate the reduction to apply to the loss, the candicates are ``'none'``, ``'mean'``, ``'sum'``.If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
other: N-D Tensor, `other` have the same shape and dtype as `input`.
label: N-D Tensor, label have the same shape and dtype as `input`.
output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
Returns:
A callable object of MarginRankingLoss.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]], dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss)
# [0.75]
"""
def __init__(self, margin=0.0, reduction='mean', name=None):
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed." % reduction)
super(MarginRankingLoss, self).__init__()
self.margin = margin
self.reduction = reduction
self.name = name
def forward(self, input, other, label):
out = paddle.nn.functional.margin_ranking_loss(
input, other, label, self.margin, self.reduction, self.name)
return out
class CTCLoss(fluid.dygraph.Layer):
"""
An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc)
to compute Connectionist Temporal Classification (CTC) loss.
It can be aliased as softmax with CTC, since a native softmax activation
is interated to the Warp-CTC library to normalize values for each row of the input tensor.
Parameters:
blank (int, optional): The blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). The data type must be int32. Default is 0.
reduction (string, optional): Indicate how to average the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output loss will be divided by the label_lengths, and then return the mean of quotient; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``.
Shape:
log_probs (Tensor): The unscaled probability sequence with padding, which is a 3-D Tensor. The tensor shape is [max_logit_length, batch_size, num_classes + 1], where max_logit_length is the longest length of input logit sequence. The data type should be float32 or float64.
labels (Tensor): The ground truth sequence with padding, which must be a 3-D Tensor. The tensor shape is [batch_size, max_label_length], where max_label_length is the longest length of label sequence. The data type must be int32.
input_lengths (Tensor): The length for each input sequence, it should have shape [batch_size] and dtype int64.
label_lengths (Tensor): The length for each label sequence, it should have shape [batch_size] and dtype int64.
norm_by_times (bool, default false) – Whether to normalize the gradients by the number of time-step, which is also the sequence’s length. There is no need to normalize the gradients if reduction mode is 'mean'.
Returns:
Tensor, The Connectionist Temporal Classification (CTC) loss between ``log_probs`` and ``labels``. If attr:`reduction` is ``'none'``, the shape of loss is [batch_size], otherwise, the shape of loss is [1]. Data type is the same as ``log_probs``.
Examples:
.. code-block:: python
# declarative mode
import numpy as np
import paddle
# length of the longest logit sequence
max_seq_length = 4
#length of the longest label sequence
max_label_length = 3
# number of logit sequences
batch_size = 2
# class num
class_num = 3
np.random.seed(1)
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
[5.38816750e-01, 4.19194520e-01, 6.85219526e-01]],
[[2.04452246e-01, 8.78117442e-01, 2.73875929e-02],
[6.70467496e-01, 4.17304814e-01, 5.58689833e-01]],
[[1.40386939e-01, 1.98101491e-01, 8.00744593e-01],
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
labels = np.array([[1, 2, 2],
[1, 2, 2]]).astype("int32")
input_lengths = np.array([5, 5]).astype("int64")
label_lengths = np.array([3, 3]).astype("int64")
log_probs = paddle.to_tensor(log_probs)
labels = paddle.to_tensor(labels)
input_lengths = paddle.to_tensor(input_lengths)
label_lengths = paddle.to_tensor(label_lengths)
loss = paddle.nn.CTCLoss(blank=0, reduction='none')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[3.9179852 2.9076521]
loss = paddle.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels,
input_lengths,
label_lengths)
print(loss) #[1.1376063]
"""
def __init__(self, blank=0, reduction='mean'):
super(CTCLoss, self).__init__()
self.blank = blank
self.reduction = reduction
def forward(self,
log_probs,
labels,
input_lengths,
label_lengths,
norm_by_times=False):
return paddle.nn.functional.ctc_loss(
log_probs,
labels,
input_lengths,
label_lengths,
self.blank,
self.reduction,
norm_by_times=norm_by_times)
class SmoothL1Loss(fluid.dygraph.Layer):
r"""
This operator calculates smooth_l1_loss. Creates a criterion that uses a squared
term if the absolute element-wise error falls below 1 and an L1 term otherwise.
In some cases it can prevent exploding gradients and it is more robust and less
sensitivity to outliers. Also known as the Huber loss:
.. math::
loss(x,y) = \\frac{1}{n}\\sum_{i}z_i
where z_i is given by:
.. math::
\\mathop{z_i} = \\left\\{\\begin{array}{rcl}
0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\
delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise}
\\end{array} \\right.
Parameters:
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned.
Default is ``'mean'``.
delta (float, optional): Specifies the hyperparameter delta to be used.
The value determines how large the errors need to be to use L1. Errors
smaller than delta are minimized with L2. Parameter is ignored for
negative/zero values. Default = 1.0
name (str, optional): Name for the operation (optional, default is
None). For more information, please refer to :ref:`api_guide_Name`.
Call Parameters:
input (Tensor): Input tensor, the data type is float32 or float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor, the data type is float32 or float64. The shape of label
is the same as the shape of input.
Returns:
The tensor storing the smooth_l1_loss of input and label.
Return type: Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
input_data = np.random.rand(3,3).astype("float32")
label_data = np.random.rand(3,3).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
loss = paddle.nn.SmoothL1Loss()
output = loss(input, label)
print(output)
"""
def __init__(self, reduction='mean', delta=1.0, name=None):
super(SmoothL1Loss, self).__init__()
self.reduction = reduction
self.delta = delta
self.name = name
def forward(self, input, label):
return F.smooth_l1_loss(
input,
label,
reduction=self.reduction,
delta=self.delta,
name=self.name)
| 41.656953
| 403
| 0.589246
|
9a53e42f5c7bf6242872577e9fdbe58ab9869727
| 29,916
|
py
|
Python
|
core/platform/search/gae_search_services_test.py
|
steve7158/oppia
|
e2cae72fa5d3503c64d195f09d3460507697730c
|
[
"Apache-2.0"
] | 2
|
2018-12-14T05:46:31.000Z
|
2019-01-04T21:52:44.000Z
|
core/platform/search/gae_search_services_test.py
|
steve7158/oppia
|
e2cae72fa5d3503c64d195f09d3460507697730c
|
[
"Apache-2.0"
] | 5
|
2018-06-09T02:05:45.000Z
|
2018-09-20T13:53:42.000Z
|
core/platform/search/gae_search_services_test.py
|
steve7158/oppia
|
e2cae72fa5d3503c64d195f09d3460507697730c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the appengine search api wrapper."""
import datetime
import time
from core.platform.search import gae_search_services
from core.tests import test_utils
from google.appengine.api import search
class SearchAddToIndexTests(test_utils.GenericTestBase):
"""Test inserting documents into search indexes."""
def test_insert_document_with_id(self):
date = datetime.date(year=2015, month=3, day=14)
datetime_value = datetime.datetime(
year=1991, month=6, day=20, hour=16, minute=30, second=14)
doc_id = 'abcdefghijklmnop'
doc = {
'id': doc_id,
'numberfield': 5,
'stringfield': 'abc',
'datefield': date,
'datetimefield': datetime_value
}
result = gae_search_services.add_documents_to_index([doc], 'my_index')
self.assertEqual(result, [doc_id])
result_doc = search.Index('my_index').get(doc_id)
self.assertEqual(result_doc.doc_id, doc_id)
self.assertEqual(result_doc.field('numberfield').value, 5)
self.assertEqual(result_doc.field('stringfield').value, 'abc')
# The search api returns date fields as datetime fields with
# time at midnight.
self.assertEqual(
result_doc.field('datefield').value,
datetime.datetime.combine(
date=date,
time=datetime.datetime.min.time()))
self.assertEqual(
result_doc.field('datetimefield').value, datetime_value)
def test_insert_document_without_id(self):
doc = {'abc': 'def'}
result = gae_search_services.add_documents_to_index([doc], 'my_index')
retrieved_doc = search.Index('my_index').get(result[0])
self.assertEqual(retrieved_doc.field('abc').value, 'def')
def test_insert_multiple_with_id(self):
docs = [{'id': 'id%d' % n, 'name': 'doc%d' % n} for n in range(5)]
result = gae_search_services.add_documents_to_index(docs, 'my_index')
index = search.Index('my_index')
for ind in range(5):
retrieved_doc = index.get('id%d' % ind)
self.assertEqual(retrieved_doc.field('name').value, 'doc%d' % ind)
self.assertEqual(result[ind], 'id%d' % ind)
def test_insert_document_with_multi_valued_property(self):
doc = {'id': 'doc', 'prop': ['val1', 'val2', 'val3']}
gae_search_services.add_documents_to_index([doc], 'index')
resultdoc = search.Index('index').get('doc')
values = set([field.value for field in resultdoc['prop']])
self.assertEqual(values, set(['val1', 'val2', 'val3']))
def test_disallow_unsuported_value_types(self):
with self.assertRaises(ValueError):
doc = {'abc': set('xyz')}
gae_search_services.add_documents_to_index(doc, 'my_index')
def test_add_document_with_rank(self):
doc = {'id': 'my_doc', 'field': 'value', 'rank': 42}
gae_search_services.add_documents_to_index([doc], 'my_index')
index = search.Index('my_index')
self.assertEqual(index.get('my_doc').rank, 42)
def test_add_document_with_existing_id_updates_it(self):
doc1 = {'id': 'doc', 'version': 1, 'rank': 10}
doc2 = {'id': 'doc', 'version': 2, 'rank': 20}
gae_search_services.add_documents_to_index([doc1], 'my_index')
index = search.Index('my_index')
self.assertEqual(index.get('doc').field('version').value, 1)
self.assertEqual(index.get('doc').rank, 10)
gae_search_services.add_documents_to_index([doc2], 'my_index')
self.assertEqual(index.get('doc').field('version').value, 2)
self.assertEqual(index.get('doc').rank, 20)
def test_validate_list_values(self):
doc1 = {'f': ['a', 'b', ['c', 'd']]}
doc2 = {'f': ['a', 'b', 3, set([4, 5, 6])]}
# The str() of list and set are passed in to ensure that we mention the
# type the user passed in, in our error message..
with self.assertRaisesRegexp(ValueError, str(list)):
gae_search_services.add_documents_to_index([doc1], 'my_index')
with self.assertRaisesRegexp(ValueError, str(set)):
gae_search_services.add_documents_to_index([doc2], 'my_index')
def test_index_must_be_string(self):
index = search.Index('test')
# Check that the error message mentions the type the user passed in.
with self.assertRaisesRegexp(ValueError, str(type(index))):
gae_search_services.add_documents_to_index(
{'id': 'one', 'key': 'value'}, index)
def _get_put_error(self, num_res, transient=None):
"""returns a PutError. with num_res results.
If transient is given, it should be an index in the
results array. The result at that index will have a transient
error code.
"""
non_trans_code = search.OperationResult.INVALID_REQUEST
trans_code = search.OperationResult.TRANSIENT_ERROR
results = [
search.PutResult(code=non_trans_code) for _ in range(num_res)]
if transient is not None:
results[transient] = search.PutResult(code=trans_code)
return search.PutError('lol', results)
def test_use_default_num_retries(self):
doc = {'id': 'doc', 'prop': 'val'}
exception = self._get_put_error(1, transient=0)
failing_put = test_utils.FailingFunction(
search.Index.put,
exception,
gae_search_services.DEFAULT_NUM_RETRIES,
)
add_docs_counter = test_utils.CallCounter(
gae_search_services.add_documents_to_index)
put_ctx = self.swap(search.Index, 'put', failing_put)
add_docs_ctx = self.swap(
gae_search_services,
'add_documents_to_index',
add_docs_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with put_ctx, add_docs_ctx, assert_raises_ctx as context_mgr:
gae_search_services.add_documents_to_index([doc], 'my_index')
self.assertEqual(context_mgr.exception.original_exception, exception)
self.assertEqual(
add_docs_counter.times_called,
gae_search_services.DEFAULT_NUM_RETRIES)
def test_use_custom_number_of_retries(self):
doc = {'id': 'doc', 'prop': 'val'}
exception = self._get_put_error(1, transient=0)
failing_put = test_utils.FailingFunction(
search.Index.put,
exception,
42)
add_docs_counter = test_utils.CallCounter(
gae_search_services.add_documents_to_index)
put_ctx = self.swap(search.Index, 'put', failing_put)
add_docs_ctx = self.swap(
gae_search_services, 'add_documents_to_index', add_docs_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with put_ctx, add_docs_ctx, assert_raises_ctx:
gae_search_services.add_documents_to_index(
[doc], 'my_index', retries=42)
self.assertEqual(add_docs_counter.times_called, 42)
def test_arguments_are_preserved_in_retries(self):
doc = {'id': 'doc', 'prop': 'val'}
exception = self._get_put_error(1, transient=0)
failing_put = test_utils.FailingFunction(
search.Index.put,
exception,
3
)
add_docs_counter = test_utils.CallCounter(
gae_search_services.add_documents_to_index)
put_ctx = self.swap(search.Index, 'put', failing_put)
add_docs_ctx = self.swap(
gae_search_services,
'add_documents_to_index',
add_docs_counter)
with put_ctx, add_docs_ctx:
gae_search_services.add_documents_to_index(
[doc], 'my_index', retries=4)
self.assertEqual(add_docs_counter.times_called, 4)
result = search.Index('my_index').get('doc')
self.assertEqual(result.field('prop').value, 'val')
def test_put_error_with_transient_result(self):
docs = [{'id': 'doc1', 'prop': 'val1'},
{'id': 'doc2', 'prop': 'val2'},
{'id': 'doc3', 'prop': 'val3'}]
error = self._get_put_error(3, transient=1)
failing_put = test_utils.FailingFunction(
search.Index.put,
error,
4)
add_docs_counter = test_utils.CallCounter(
gae_search_services.add_documents_to_index)
put_ctx = self.swap(search.Index, 'put', failing_put)
add_docs_ctx = self.swap(
gae_search_services,
'add_documents_to_index',
add_docs_counter)
with put_ctx, add_docs_ctx:
gae_search_services.add_documents_to_index(
docs, 'my_index', retries=5)
self.assertEqual(add_docs_counter.times_called, 5)
for i in xrange(1, 4):
result = search.Index('my_index').get('doc' + str(i))
self.assertEqual(result.field('prop').value, 'val' + str(i))
def test_put_error_without_transient_result(self):
docs = [{'id': 'doc1', 'prop': 'val1'},
{'id': 'doc2', 'prop': 'val2'},
{'id': 'doc3', 'prop': 'val3'}]
error = self._get_put_error(3)
failing_put = test_utils.FailingFunction(search.Index.put, error, 1)
add_docs_counter = test_utils.CallCounter(
gae_search_services.add_documents_to_index)
add_docs_ctx = self.swap(
gae_search_services,
'add_documents_to_index',
add_docs_counter)
put_ctx = self.swap(search.Index, 'put', failing_put)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with add_docs_ctx, put_ctx, assert_raises_ctx as e:
gae_search_services.add_documents_to_index(docs, 'my_index')
# Assert that the method only gets called once, since the error is not
# transient.
self.assertEqual(add_docs_counter.times_called, 1)
self.assertEqual(e.exception.original_exception, error)
class SearchRemoveFromIndexTests(test_utils.GenericTestBase):
"""Test deleting documents from search indexes."""
def test_delete_single_document(self):
doc = search.Document(doc_id='doc_id', fields=[
search.TextField(name='k', value='v')])
index = search.Index('my_index')
index.put([doc])
gae_search_services.delete_documents_from_index(['doc_id'], 'my_index')
self.assertIsNone(index.get('doc_id'))
def test_delete_multiple_documents(self):
index = search.Index('my_index')
for i in xrange(10):
field = search.TextField(name='k', value='v%d' % i)
doc = search.Document(doc_id='doc%d' % i, fields=[field])
index.put([doc])
gae_search_services.delete_documents_from_index(
['doc' + str(i) for i in xrange(10)], 'my_index')
for i in xrange(10):
self.assertIsNone(index.get('doc%d' % i))
def test_doc_ids_must_be_strings(self):
with self.assertRaisesRegexp(ValueError, str(dict)):
gae_search_services.delete_documents_from_index(
['d1', {'id': 'd2'}],
'index')
def test_index_must_be_string(self):
with self.assertRaises(ValueError):
gae_search_services.delete_documents_from_index(
['doc_id'], search.Index('ind'))
def _get_delete_error(self, num_res, transient=None):
"""returns a DeleteError. with num_res results.
If transient is given, it should be an index in the
results array. The result at that index will have a transient
error code.
"""
non_trans_code = search.OperationResult.INVALID_REQUEST
trans_code = search.OperationResult.TRANSIENT_ERROR
results = [
search.DeleteResult(code=non_trans_code) for _ in range(num_res)]
if transient is not None:
results[transient] = search.PutResult(code=trans_code)
return search.DeleteError('lol', results=results)
def test_use_default_num_retries(self):
exception = self._get_delete_error(1, transient=0)
failing_delete = test_utils.FailingFunction(
search.Index.delete,
exception,
gae_search_services.DEFAULT_NUM_RETRIES
)
delete_docs_counter = test_utils.CallCounter(
gae_search_services.delete_documents_from_index)
delete_ctx = self.swap(search.Index, 'delete', failing_delete)
delete_docs_ctx = self.swap(
gae_search_services,
'delete_documents_from_index',
delete_docs_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with delete_ctx, delete_docs_ctx, assert_raises_ctx as context_mgr:
gae_search_services.delete_documents_from_index(
['doc'], 'my_index')
self.assertEqual(context_mgr.exception.original_exception, exception)
self.assertEqual(
delete_docs_counter.times_called,
gae_search_services.DEFAULT_NUM_RETRIES)
def test_use_custom_number_of_retries(self):
exception = self._get_delete_error(1, transient=0)
failing_delete = test_utils.FailingFunction(
search.Index.delete, exception, 42)
delete_docs_counter = test_utils.CallCounter(
gae_search_services.delete_documents_from_index)
delete_ctx = self.swap(search.Index, 'delete', failing_delete)
delete_docs_ctx = self.swap(
gae_search_services,
'delete_documents_from_index',
delete_docs_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with delete_ctx, delete_docs_ctx, assert_raises_ctx:
gae_search_services.delete_documents_from_index(
['id'], 'index', retries=42)
self.assertEqual(delete_docs_counter.times_called, 42)
def test_arguments_are_preserved_in_retries(self):
index = search.Index('index')
index.put([search.Document(doc_id='doc', fields=[
search.TextField(name='prop', value='val')
])])
exception = self._get_delete_error(1, transient=0)
failing_delete = test_utils.FailingFunction(
search.Index.delete, exception, 3)
delete_docs_counter = test_utils.CallCounter(
gae_search_services.delete_documents_from_index)
index_ctx = self.swap(search.Index, 'delete', failing_delete)
delete_docs_ctx = self.swap(
gae_search_services,
'delete_documents_from_index',
delete_docs_counter)
with index_ctx, delete_docs_ctx:
gae_search_services.delete_documents_from_index(
['doc'], 'index', retries=4)
self.assertEqual(delete_docs_counter.times_called, 4)
result = search.Index('my_index').get('doc')
self.assertIsNone(result)
def test_delete_error_with_transient_result(self):
error = self._get_delete_error(3, transient=1)
failing_delete = test_utils.FailingFunction(
search.Index.delete,
error,
4)
delete_docs_counter = test_utils.CallCounter(
gae_search_services.delete_documents_from_index)
index = search.Index('my_index')
for i in xrange(3):
index.put(search.Document(doc_id='d' + str(i), fields=[
search.TextField(name='prop', value='value')
]))
delete_ctx = self.swap(search.Index, 'delete', failing_delete)
delete_docs_ctx = self.swap(
gae_search_services,
'delete_documents_from_index',
delete_docs_counter)
with delete_ctx, delete_docs_ctx:
gae_search_services.delete_documents_from_index(
['d0', 'd1', 'd2'],
'my_index',
retries=5)
self.assertEqual(delete_docs_counter.times_called, 5)
for i in xrange(3):
result = search.Index('my_index').get('doc' + str(i))
self.assertIsNone(result)
def test_put_error_without_transient_result(self):
error = self._get_delete_error(3)
delete_spy = test_utils.FailingFunction(search.Index.delete, error, 1)
delete_docs_counter = test_utils.CallCounter(
gae_search_services.delete_documents_from_index)
delete_docs_ctx = self.swap(
gae_search_services,
'delete_documents_from_index',
delete_docs_counter)
delete_ctx = self.swap(search.Index, 'delete', delete_spy)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with delete_docs_ctx, delete_ctx, assert_raises_ctx as e:
gae_search_services.delete_documents_from_index(
['a', 'b', 'c'],
'my_index')
# Assert that the method only gets called once, since the error is not
# transient.
self.assertEqual(delete_docs_counter.times_called, 1)
self.assertEqual(e.exception.original_exception, error)
class SearchQueryTests(test_utils.GenericTestBase):
"""Test searching for documents in an index."""
def test_search_all_documents(self):
doc1 = search.Document(doc_id='doc1', language='en', rank=1, fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', language='en', rank=2, fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', language='en', rank=3, fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
result = gae_search_services.search('k:abc', 'my_index')[0]
self.assertIn({
'id': 'doc1', 'k': 'abc def ghi', 'rank': 1, 'language_code': 'en'
}, result)
self.assertIn({
'id': 'doc2', 'k': 'abc jkl mno', 'rank': 2, 'language_code': 'en'
}, result)
self.assertIn({
'id': 'doc3', 'k': 'abc jkl ghi', 'rank': 3, 'language_code': 'en'
}, result)
def test_respect_search_query(self):
doc1 = search.Document(doc_id='doc1', rank=1, language='en', fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', rank=1, language='en', fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', rank=1, language='en', fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
result = gae_search_services.search('k:jkl', 'my_index')[0]
self.assertNotIn({
'id': 'doc1', 'k': 'abc def ghi', 'language_code': 'en', 'rank': 1
}, result)
self.assertIn({
'id': 'doc2', 'k': 'abc jkl mno', 'language_code': 'en', 'rank': 1
}, result)
self.assertIn({
'id': 'doc3', 'k': 'abc jkl ghi', 'language_code': 'en', 'rank': 1
}, result)
def test_respect_limit(self):
doc1 = search.Document(doc_id='doc1', fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
result = gae_search_services.search('k:abc', 'my_index', limit=2)[0]
self.assertEqual(len(result), 2)
def test_use_cursor(self):
doc1 = search.Document(doc_id='doc1', language='en', rank=1, fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', language='en', rank=1, fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', language='en', rank=1, fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
result1, cursor = gae_search_services.search(
'k:abc', 'my_index', limit=2)
result2, cursor = gae_search_services.search(
'k:abc', 'my_index', cursor=cursor)
self.assertEqual(len(result1), 2)
self.assertEqual(len(result2), 1)
dict1 = {'id': 'doc1', 'k': 'abc def ghi', 'language_code': 'en',
'rank': 1}
self.assertIn(dict1, result1 + result2)
dict2 = {'id': 'doc2', 'k': 'abc jkl mno', 'language_code': 'en',
'rank': 1}
self.assertIn(dict2, result1 + result2)
dict3 = {'id': 'doc3', 'k': 'abc jkl ghi', 'language_code': 'en',
'rank': 1}
self.assertIn(dict3, result1 + result2)
def test_ids_only(self):
doc1 = search.Document(doc_id='doc1', fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
result = gae_search_services.search(
'k:abc', 'my_index', ids_only=True)[0]
self.assertIn('doc1', result)
self.assertIn('doc2', result)
self.assertIn('doc3', result)
def test_cursor_is_none_if_no_more_results(self):
doc1 = search.Document(doc_id='doc1', fields=[
search.TextField(name='k', value='abc def ghi')])
doc2 = search.Document(doc_id='doc2', fields=[
search.TextField(name='k', value='abc jkl mno')])
doc3 = search.Document(doc_id='doc3', fields=[
search.TextField(name='k', value='abc jkl ghi')])
index = search.Index('my_index')
index.put([doc1, doc2, doc3])
cursor = gae_search_services.search('k:abc', 'my_index')[1]
self.assertIsNone(cursor)
def test_default_rank_is_descending_date(self):
# Time is only saved with 1 second accuracy,
# so I'm putting a 1 second delay between puts.
dict1 = {'id': 'doc1', 'k': 'abc def'}
dict2 = {'id': 'doc2', 'k': 'abc ghi'}
dict3 = {'id': 'doc3', 'k': 'abc jkl'}
gae_search_services.add_documents_to_index([dict1], 'my_index')
time.sleep(1)
gae_search_services.add_documents_to_index([dict2], 'my_index')
time.sleep(1)
gae_search_services.add_documents_to_index([dict3], 'my_index')
result = gae_search_services.search(
'k:abc', index='my_index', ids_only=True)[0]
self.assertEqual(result, ['doc3', 'doc2', 'doc1'])
def test_search_with_custom_rank_and_language(self):
doc1 = {'id': 'doc1', 'k': 'abc def', 'rank': 3, 'language_code': 'en'}
doc2 = {'id': 'doc2', 'k': 'abc ghi', 'rank': 1, 'language_code': 'fr'}
doc3 = {'id': 'doc3', 'k': 'abc jkl', 'rank': 2, 'language_code': 'nl'}
gae_search_services.add_documents_to_index([doc1, doc2, doc3], 'index')
result = gae_search_services.search('k:abc', index='index')[0]
self.assertEqual(result, [doc1, doc3, doc2])
def test_search_using_single_sort_expression(self):
doc1 = {'id': 'doc1', 'k': 'abc ghi'}
doc2 = {'id': 'doc2', 'k': 'abc def'}
doc3 = {'id': 'doc3', 'k': 'abc jkl'}
gae_search_services.add_documents_to_index([doc1, doc2, doc3], 'index')
result = gae_search_services.search('k:abc', 'index', sort='+k')[0]
self.assertEqual(result[0].get('id'), 'doc2')
self.assertEqual(result[1].get('id'), 'doc1')
self.assertEqual(result[2].get('id'), 'doc3')
result = gae_search_services.search('k:abc', 'index', sort='-k')[0]
self.assertEqual(result[0].get('id'), 'doc3')
self.assertEqual(result[1].get('id'), 'doc1')
self.assertEqual(result[2].get('id'), 'doc2')
def test_search_using_multiple_sort_expressions(self):
doc1 = {'id': 'doc1', 'k1': 2, 'k2': 'abc ghi'}
doc2 = {'id': 'doc2', 'k1': 1, 'k2': 'abc def'}
doc3 = {'id': 'doc3', 'k1': 1, 'k2': 'abc jkl'}
gae_search_services.add_documents_to_index([doc1, doc2, doc3], 'index')
result = gae_search_services.search(
'k2:abc', 'index', sort='+k1 -k2')[0]
self.assertEqual(result[0].get('id'), 'doc3')
self.assertEqual(result[1].get('id'), 'doc2')
self.assertEqual(result[2].get('id'), 'doc1')
def test_use_default_num_retries(self):
exception = search.TransientError('oops')
failing_index_search = test_utils.FailingFunction(
search.Index.search,
exception,
gae_search_services.DEFAULT_NUM_RETRIES)
search_counter = test_utils.CallCounter(gae_search_services.search)
search_ctx = self.swap(search.Index, 'search', failing_index_search)
search_counter_ctx = self.swap(
gae_search_services, 'search', search_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with search_ctx, search_counter_ctx, assert_raises_ctx as context_mgr:
gae_search_services.search('query', 'my_index')
self.assertEqual(context_mgr.exception.original_exception, exception)
self.assertEqual(
search_counter.times_called,
gae_search_services.DEFAULT_NUM_RETRIES)
def test_use_custom_number_of_retries(self):
exception = search.TransientError('oops')
failing_index_search = test_utils.FailingFunction(
search.Index.search,
exception,
3)
search_counter = test_utils.CallCounter(gae_search_services.search)
index_ctx = self.swap(search.Index, 'search', failing_index_search)
search_counter_ctx = self.swap(
gae_search_services, 'search', search_counter)
assert_raises_ctx = self.assertRaises(
gae_search_services.SearchFailureError)
with index_ctx, search_counter_ctx, assert_raises_ctx:
gae_search_services.search('query', 'my_index', retries=3)
self.assertEqual(search_counter.times_called, 3)
def test_arguments_are_preserved_in_retries(self):
for i in xrange(3):
doc = search.Document(doc_id='doc%d' % i, fields=[
search.TextField('prop', 'val'),
search.NumberField('index', i)
])
search.Index('my_index').put(doc)
exception = search.TransientError('oops')
failing_index_search = test_utils.FailingFunction(
search.Index.search, exception, 3)
search_counter = test_utils.CallCounter(gae_search_services.search)
gae_search_ctx = self.swap(
search.Index, 'search', failing_index_search)
search_counter_ctx = self.swap(
gae_search_services, 'search', search_counter)
with gae_search_ctx, search_counter_ctx:
result, cursor = gae_search_services.search(
'prop:val',
'my_index',
sort='-index',
limit=2,
ids_only=True,
retries=4)
failing_index_search2 = test_utils.FailingFunction(
search.Index.search,
exception,
3)
search_counter2 = test_utils.CallCounter(gae_search_services.search)
gae_search_ctx2 = self.swap(
search.Index, 'search', failing_index_search2)
search_counter_ctx2 = self.swap(
gae_search_services, 'search', search_counter2)
with gae_search_ctx2, search_counter_ctx2:
result2, cursor = gae_search_services.search(
'prop:val',
'my_index',
sort='-index',
limit=2,
cursor=cursor,
ids_only=True,
retries=4)
self.assertEqual(search_counter.times_called, 4)
self.assertEqual(result, ['doc2', 'doc1'])
# Also check that the cursor is preserved.
self.assertEqual(search_counter2.times_called, 4)
self.assertEqual(result2, ['doc0'])
class SearchGetFromIndexTests(test_utils.GenericTestBase):
def test_get_document_from_index(self):
document = search.Document(doc_id='my_doc', fields=[
search.TextField(name='my_field', value='value')
])
search.Index('my_index').put(document)
result = gae_search_services.get_document_from_index(
'my_doc', 'my_index')
self.assertEqual(result.get('id'), 'my_doc')
self.assertEqual(result.get('my_field'), 'value')
| 42.314003
| 79
| 0.623813
|
f4a9328cbae72f8ad4383d107f73cfd06c30a1c1
| 3,928
|
py
|
Python
|
MaxLatestVersion/gettoken.py
|
getlove555/getbotline
|
639e157495849e12ac7dd4bae6012841cf511892
|
[
"MIT"
] | 6
|
2020-05-23T21:47:52.000Z
|
2021-03-30T00:19:08.000Z
|
MaxLatestVersion/gettoken.py
|
getlove555/getbotline
|
639e157495849e12ac7dd4bae6012841cf511892
|
[
"MIT"
] | 4
|
2020-08-01T10:10:14.000Z
|
2021-01-03T00:55:05.000Z
|
MaxLatestVersion/gettoken.py
|
LOUREN03/lourenelle
|
5448a8634d438f35df98e43ad135f232cf74d2b1
|
[
"MIT"
] | 20
|
2020-05-11T08:53:30.000Z
|
2021-07-16T09:50:20.000Z
|
from thrift.protocol import TCompactProtocol
from thrift.transport import THttpClient
from TEAM_BOT_MAX.ttypes import IdentityProvider, LoginResultType, LoginRequest, LoginType
import json, requests, livejson, LineService
def loggedIn(func):
def checkLogin(*args, **kwargs):
if args[0].isLogin:
return func(*args, **kwargs)
else:
args[0].callback.other('You want to call the function, you must login to TEAMBOTMAXv2')
return checkLogin
class getToken:
isLogin = False
def __init__(self):
self.isLogin = True
@loggedIn
def DESKTOPMAC(self):
Headers = {
'User-Agent': "Line/8.3.2",
'X-Line-Application': "DESKTOPMAC\t5.10.0\tRynTokens\tTools\t10.13.2",
"x-lal": "ja-US_US",
}
return Headers
@loggedIn
def DESKTOPWIN(self):
Headers = {
'User-Agent': "Line/8.3.2",
'X-Line-Application': "DESKTOPWIN\t5.10.0\tRynTokens\tTools\t10.13.2",
"x-lal": "ja-US_US",
}
return Headers
@loggedIn
def IOSIPAD(self):
Headers = {
'User-Agent': "Line/8.3.2",
'X-Line-Application': "IOSIPAD\t8.14.2\tRynTokens\tTools\t11.2.5",
"x-lal": "ja-US_US",
}
return Headers
@loggedIn
def CHROMEOS(self):
Headers = {
'User-Agent': "Line/8.3.2",
'X-Line-Application': "CHROMEOS\t2.1.5\tRynTokens\tTools\t11.2.5",
"x-lal": "ja-US_US",
}
return Headers
@loggedIn
def WIN10(self):
Headers = {
'User-Agent': "Line/8.3.2",
'X-Line-Application': "WIN10\t5.5.5\tRynTokens\tTools\t11.2.5",
"x-lal": "ja-US_US",
}
return Headers
@loggedIn
def token(self,to,token,msg_id,sender,nametoken):
try:
a = token
a.update({'x-lpqs' : '/api/v4/TalkService.do'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4/TalkService.do')
transport.setCustomHeaders(a)
protocol = TCompactProtocol.TCompactProtocol(transport)
clienttoken = LineService.Client(protocol)
qr = clienttoken.getAuthQrcode(keepLoggedIn=1, systemName='RynTokens')
link = "line://au/q/" + qr.verifier
#self.sendReplyMessage(msg_id, to, "Click This Link Only For 2 Minute :)\n\n{}".format(link))
data = {
"type": "template",
"altText": "Token",
"template": {
"type": "buttons",
"title": "Token %s" % nametoken,
"text": "Click This Button\nOnly For 2 Minutes",
"actions": [
{
"type": "uri",
"label": "Click Me",
"uri": link
},
{
"type": "uri",
"label": "Link ?",
"uri": 'line://app/1603968955-ORWb9RdY/?type=text&text=%s' % link
}
]
}
}
self.postTemplate(to, data)
a.update({"x-lpqs" : '/api/v4/TalkService.do', 'X-Line-Access': qr.verifier})
json.loads(requests.session().get('https://gd2.line.naver.jp/Q', headers=a).text)
a.update({'x-lpqs' : '/api/v4p/rs'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4p/rs')
transport.setCustomHeaders(a)
protocol = TCompactProtocol.TCompactProtocol(transport)
clienttoken = LineService.Client(protocol)
req = LoginRequest()
req.type = 1
req.verifier = qr.verifier
req.e2eeVersion = 1
res = clienttoken.loginZ(req)
try:
settings = livejson.File('setting.json', True, True, 4)
settings['token']['token'] = res.authToken
settings['token']['status'] = True
self.sendMessage(to, 'Success get your token,\nCek Your Private Chat')
except Exception as e:
self.sendMessage(to, str(e))
except Exception as error:
self.sendMessage(to, "Login Kampank")
self.sendMessage(to, str(error))
| 32.46281
| 102
| 0.581721
|
397db2baaf5a72e97303d97bab28509dd6d9d086
| 6,018
|
py
|
Python
|
tests/test_run.py
|
fingul/psdash
|
67638b8df867cf32868b282d574e54945f677ee3
|
[
"CC0-1.0"
] | null | null | null |
tests/test_run.py
|
fingul/psdash
|
67638b8df867cf32868b282d574e54945f677ee3
|
[
"CC0-1.0"
] | null | null | null |
tests/test_run.py
|
fingul/psdash
|
67638b8df867cf32868b282d574e54945f677ee3
|
[
"CC0-1.0"
] | null | null | null |
import os
from psdash.run import PsDashRunner
from psdash.node import LocalNode
import gevent
import socket
import unittest
import tempfile
import time
class TestRunner(unittest.TestCase):
def test_args_log(self):
_, filename = tempfile.mkstemp()
r = PsDashRunner(args=['-l', filename])
self.assertEqual(r.app.config['PSDASH_LOGS'][0], filename)
def test_args_bind(self):
r = PsDashRunner(args=['-b', '10.0.0.1'])
self.assertEqual(r.app.config['PSDASH_BIND_HOST'], '10.0.0.1')
def test_args_port(self):
r = PsDashRunner(args=['-p', '5555'])
self.assertEqual(r.app.config['PSDASH_PORT'], 5555)
def test_args_debug(self):
r = PsDashRunner(args=['-d'])
self.assertTrue(r.app.debug)
def test_default_args_dont_override_config(self):
_, filename = tempfile.mkstemp()
with open(filename, "w") as f:
f.write("PSDASH_LOGS = ['/var/log/boot.log', '/var/log/dmesg']\n")
f.flush()
os.environ['PSDASH_CONFIG'] = filename
r = PsDashRunner()
self.assertEquals(r.app.config['PSDASH_LOGS'], ['/var/log/boot.log', '/var/log/dmesg'])
del os.environ['PSDASH_CONFIG']
def test_reload_logs(self):
_, filename = tempfile.mkstemp()
r = PsDashRunner(args=['-l', filename])
pre_count = len(r.get_local_node().logs.available)
r.get_local_node().logs.add_patterns(r.app.config['PSDASH_LOGS'])
post_count = len(r.get_local_node().logs.available)
self.assertEqual(pre_count, post_count)
def test_update_net_io_counters(self):
r = PsDashRunner()
socket.getaddrinfo('example.org', 80)
counters = r.get_local_node().net_io_counters.update()
for c in counters.itervalues():
if c['rx_per_sec'] > 0 and c['tx_per_sec'] > 0:
break
else:
self.fail("Didn't find any changed network interface")
def test_local_node_is_added(self):
r = PsDashRunner()
self.assertIsInstance(r.get_local_node(), LocalNode)
def test_register_node_creates_proper_node_dict(self):
r = PsDashRunner()
now = int(time.time())
node = r.register_node('examplehost', 'example.org', 5000)
self.assertEqual(node.host, 'example.org')
self.assertEqual(node.port, 5000)
self.assertEqual(node.last_registered, now)
def test_reregister_node(self):
r = PsDashRunner()
now = int(time.time())
r.register_node('examplehost', 'example.org', 5000)
node = r.register_node('examplehost', 'example.org', 5000)
self.assertEqual(node.host, 'example.org')
self.assertEqual(node.port, 5000)
self.assertEqual(node.last_registered, now)
def test_get_all_nodes(self):
r = PsDashRunner()
r.register_node('examplehost', 'example.org', 5000)
self.assertEqual(len(r.get_nodes()), 2) # local + registered
def test_nodes_from_config(self):
config = {
'PSDASH_NODES': [
{
'name': 'test-node',
'host': 'remotehost.org',
'port': 5000
}
]
}
r = PsDashRunner(config)
self.assertEqual(len(r.get_nodes()), 2)
self.assertIn('remotehost.org:5000', r.get_nodes())
self.assertEqual(r.get_nodes()['remotehost.org:5000'].name, 'test-node')
self.assertEqual(r.get_nodes()['remotehost.org:5000'].host, 'remotehost.org')
self.assertEqual(r.get_nodes()['remotehost.org:5000'].port, 5000)
def test_register_agent(self):
jobs = []
agent_options = {
'PSDASH_AGENT': True,
'PSDASH_PORT': 5001,
'PSDASH_REGISTER_TO': 'http://localhost:5000',
'PSDASH_REGISTER_AS': 'the_agent'
}
r = PsDashRunner()
agent = PsDashRunner(agent_options)
jobs.append(gevent.spawn(r.run))
gevent.sleep(0.3)
jobs.append(gevent.spawn(agent.run))
gevent.sleep(0.3)
self.assertIn('127.0.0.1:5001', r.get_nodes())
self.assertEquals(r.get_node('127.0.0.1:5001').name, 'the_agent')
self.assertEquals(r.get_node('127.0.0.1:5001').port, 5001)
r.server.close()
agent.server.close()
gevent.killall(jobs)
def test_register_agent_without_name_defaults_to_hostname(self):
agent_options = {
'PSDASH_AGENT': True,
'PSDASH_PORT': 5001,
'PSDASH_REGISTER_TO': 'http://localhost:5000'
}
r = PsDashRunner()
agent = PsDashRunner(agent_options)
jobs = []
jobs.append(gevent.spawn(r.run))
gevent.sleep(0.3)
jobs.append(gevent.spawn(agent.run))
gevent.sleep(0.3)
self.assertIn('127.0.0.1:5001', r.get_nodes())
self.assertEquals(r.get_node('127.0.0.1:5001').name, socket.gethostname())
self.assertEquals(r.get_node('127.0.0.1:5001').port, 5001)
r.server.close()
agent.server.close()
gevent.killall(jobs)
def test_register_agent_to_auth_protected_host(self):
r = PsDashRunner({
'PSDASH_AUTH_USERNAME': 'user',
'PSDASH_AUTH_PASSWORD': 'pass'
})
agent = PsDashRunner({
'PSDASH_AGENT': True,
'PSDASH_PORT': 5001,
'PSDASH_REGISTER_TO': 'http://localhost:5000',
'PSDASH_AUTH_USERNAME': 'user',
'PSDASH_AUTH_PASSWORD': 'pass'
})
jobs = []
jobs.append(gevent.spawn(r.run))
gevent.sleep(0.3)
jobs.append(gevent.spawn(agent.run))
gevent.sleep(0.3)
self.assertIn('127.0.0.1:5001', r.get_nodes())
self.assertEquals(r.get_node('127.0.0.1:5001').name, socket.gethostname())
self.assertEquals(r.get_node('127.0.0.1:5001').port, 5001)
r.server.close()
agent.server.close()
gevent.killall(jobs)
| 34.988372
| 95
| 0.599202
|
3fdd50d19c7d5aa1fd7a245a82630fddd521c667
| 16,984
|
py
|
Python
|
torchvision/datasets/video_utils.py
|
jdsgomes/vision
|
c890a7e75ebeaaa75ae9ace4c203b7fc145df068
|
[
"BSD-3-Clause"
] | 1
|
2022-02-14T09:16:02.000Z
|
2022-02-14T09:16:02.000Z
|
torchvision/datasets/video_utils.py
|
jdsgomes/vision
|
c890a7e75ebeaaa75ae9ace4c203b7fc145df068
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/video_utils.py
|
jdsgomes/vision
|
c890a7e75ebeaaa75ae9ace4c203b7fc145df068
|
[
"BSD-3-Clause"
] | null | null | null |
import bisect
import math
import warnings
from fractions import Fraction
from typing import Any, Dict, List, Optional, Callable, Union, Tuple, TypeVar, cast
import torch
from torchvision.io import (
_probe_video_from_file,
_read_video_from_file,
read_video,
read_video_timestamps,
)
from .utils import tqdm
T = TypeVar("T")
def pts_convert(pts: int, timebase_from: Fraction, timebase_to: Fraction, round_func: Callable = math.floor) -> int:
"""convert pts between different time bases
Args:
pts: presentation timestamp, float
timebase_from: original timebase. Fraction
timebase_to: new timebase. Fraction
round_func: rounding function.
"""
new_pts = Fraction(pts, 1) * timebase_from / timebase_to
return round_func(new_pts)
def unfold(tensor: torch.Tensor, size: int, step: int, dilation: int = 1) -> torch.Tensor:
"""
similar to tensor.unfold, but with the dilation
and specialized for 1d tensors
Returns all consecutive windows of `size` elements, with
`step` between windows. The distance between each element
in a window is given by `dilation`.
"""
if tensor.dim() != 1:
raise ValueError(f"tensor should have 1 dimension instead of {tensor.dim()}")
o_stride = tensor.stride(0)
numel = tensor.numel()
new_stride = (step * o_stride, dilation * o_stride)
new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size)
if new_size[0] < 1:
new_size = (0, size)
return torch.as_strided(tensor, new_size, new_stride)
class _VideoTimestampsDataset:
"""
Dataset used to parallelize the reading of the timestamps
of a list of videos, given their paths in the filesystem.
Used in VideoClips and defined at top level so it can be
pickled when forking.
"""
def __init__(self, video_paths: List[str]) -> None:
self.video_paths = video_paths
def __len__(self) -> int:
return len(self.video_paths)
def __getitem__(self, idx: int) -> Tuple[List[int], Optional[float]]:
return read_video_timestamps(self.video_paths[idx])
def _collate_fn(x: T) -> T:
"""
Dummy collate function to be used with _VideoTimestampsDataset
"""
return x
class VideoClips:
"""
Given a list of video files, computes all consecutive subvideos of size
`clip_length_in_frames`, where the distance between each subvideo in the
same video is defined by `frames_between_clips`.
If `frame_rate` is specified, it will also resample all the videos to have
the same frame rate, and the clips will refer to this frame rate.
Creating this instance the first time is time-consuming, as it needs to
decode all the videos in `video_paths`. It is recommended that you
cache the results after instantiation of the class.
Recreating the clips for different clip lengths is fast, and can be done
with the `compute_clips` method.
Args:
video_paths (List[str]): paths to the video files
clip_length_in_frames (int): size of a clip in number of frames
frames_between_clips (int): step (in frames) between each clip
frame_rate (int, optional): if specified, it will resample the video
so that it has `frame_rate`, and then the clips will be defined
on the resampled video
num_workers (int): how many subprocesses to use for data loading.
0 means that the data will be loaded in the main process. (default: 0)
output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
"""
def __init__(
self,
video_paths: List[str],
clip_length_in_frames: int = 16,
frames_between_clips: int = 1,
frame_rate: Optional[int] = None,
_precomputed_metadata: Optional[Dict[str, Any]] = None,
num_workers: int = 0,
_video_width: int = 0,
_video_height: int = 0,
_video_min_dimension: int = 0,
_video_max_dimension: int = 0,
_audio_samples: int = 0,
_audio_channels: int = 0,
output_format: str = "THWC",
) -> None:
self.video_paths = video_paths
self.num_workers = num_workers
# these options are not valid for pyav backend
self._video_width = _video_width
self._video_height = _video_height
self._video_min_dimension = _video_min_dimension
self._video_max_dimension = _video_max_dimension
self._audio_samples = _audio_samples
self._audio_channels = _audio_channels
self.output_format = output_format.upper()
if self.output_format not in ("THWC", "TCHW"):
raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
if _precomputed_metadata is None:
self._compute_frame_pts()
else:
self._init_from_metadata(_precomputed_metadata)
self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate)
def _compute_frame_pts(self) -> None:
self.video_pts = []
self.video_fps = []
# strategy: use a DataLoader to parallelize read_video_timestamps
# so need to create a dummy dataset first
import torch.utils.data
dl: torch.utils.data.DataLoader = torch.utils.data.DataLoader(
_VideoTimestampsDataset(self.video_paths), # type: ignore[arg-type]
batch_size=16,
num_workers=self.num_workers,
collate_fn=_collate_fn,
)
with tqdm(total=len(dl)) as pbar:
for batch in dl:
pbar.update(1)
clips, fps = list(zip(*batch))
# we need to specify dtype=torch.long because for empty list,
# torch.as_tensor will use torch.float as default dtype. This
# happens when decoding fails and no pts is returned in the list.
clips = [torch.as_tensor(c, dtype=torch.long) for c in clips]
self.video_pts.extend(clips)
self.video_fps.extend(fps)
def _init_from_metadata(self, metadata: Dict[str, Any]) -> None:
self.video_paths = metadata["video_paths"]
assert len(self.video_paths) == len(metadata["video_pts"])
self.video_pts = metadata["video_pts"]
assert len(self.video_paths) == len(metadata["video_fps"])
self.video_fps = metadata["video_fps"]
@property
def metadata(self) -> Dict[str, Any]:
_metadata = {
"video_paths": self.video_paths,
"video_pts": self.video_pts,
"video_fps": self.video_fps,
}
return _metadata
def subset(self, indices: List[int]) -> "VideoClips":
video_paths = [self.video_paths[i] for i in indices]
video_pts = [self.video_pts[i] for i in indices]
video_fps = [self.video_fps[i] for i in indices]
metadata = {
"video_paths": video_paths,
"video_pts": video_pts,
"video_fps": video_fps,
}
return type(self)(
video_paths,
self.num_frames,
self.step,
self.frame_rate,
_precomputed_metadata=metadata,
num_workers=self.num_workers,
_video_width=self._video_width,
_video_height=self._video_height,
_video_min_dimension=self._video_min_dimension,
_video_max_dimension=self._video_max_dimension,
_audio_samples=self._audio_samples,
_audio_channels=self._audio_channels,
)
@staticmethod
def compute_clips_for_video(
video_pts: torch.Tensor, num_frames: int, step: int, fps: int, frame_rate: Optional[int] = None
) -> Tuple[torch.Tensor, Union[List[slice], torch.Tensor]]:
if fps is None:
# if for some reason the video doesn't have fps (because doesn't have a video stream)
# set the fps to 1. The value doesn't matter, because video_pts is empty anyway
fps = 1
if frame_rate is None:
frame_rate = fps
total_frames = len(video_pts) * (float(frame_rate) / fps)
_idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
video_pts = video_pts[_idxs]
clips = unfold(video_pts, num_frames, step)
if not clips.numel():
warnings.warn(
"There aren't enough frames in the current video to get a clip for the given clip length and "
"frames between clips. The video (and potentially others) will be skipped."
)
idxs: Union[List[slice], torch.Tensor]
if isinstance(_idxs, slice):
idxs = [_idxs] * len(clips)
else:
idxs = unfold(_idxs, num_frames, step)
return clips, idxs
def compute_clips(self, num_frames: int, step: int, frame_rate: Optional[int] = None) -> None:
"""
Compute all consecutive sequences of clips from video_pts.
Always returns clips of size `num_frames`, meaning that the
last few frames in a video can potentially be dropped.
Args:
num_frames (int): number of frames for the clip
step (int): distance between two clips
frame_rate (int, optional): The frame rate
"""
self.num_frames = num_frames
self.step = step
self.frame_rate = frame_rate
self.clips = []
self.resampling_idxs = []
for video_pts, fps in zip(self.video_pts, self.video_fps):
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
self.clips.append(clips)
self.resampling_idxs.append(idxs)
clip_lengths = torch.as_tensor([len(v) for v in self.clips])
self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
def __len__(self) -> int:
return self.num_clips()
def num_videos(self) -> int:
return len(self.video_paths)
def num_clips(self) -> int:
"""
Number of subclips that are available in the video list.
"""
return self.cumulative_sizes[-1]
def get_clip_location(self, idx: int) -> Tuple[int, int]:
"""
Converts a flattened representation of the indices into a video_idx, clip_idx
representation.
"""
video_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if video_idx == 0:
clip_idx = idx
else:
clip_idx = idx - self.cumulative_sizes[video_idx - 1]
return video_idx, clip_idx
@staticmethod
def _resample_video_idx(num_frames: int, original_fps: int, new_fps: int) -> Union[slice, torch.Tensor]:
step = float(original_fps) / new_fps
if step.is_integer():
# optimization: if step is integer, don't need to perform
# advanced indexing
step = int(step)
return slice(None, None, step)
idxs = torch.arange(num_frames, dtype=torch.float32) * step
idxs = idxs.floor().to(torch.int64)
return idxs
def get_clip(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any], int]:
"""
Gets a subclip from a list of videos.
Args:
idx (int): index of the subclip. Must be between 0 and num_clips().
Returns:
video (Tensor)
audio (Tensor)
info (Dict)
video_idx (int): index of the video in `video_paths`
"""
if idx >= self.num_clips():
raise IndexError(f"Index {idx} out of range ({self.num_clips()} number of clips)")
video_idx, clip_idx = self.get_clip_location(idx)
video_path = self.video_paths[video_idx]
clip_pts = self.clips[video_idx][clip_idx]
from torchvision import get_video_backend
backend = get_video_backend()
if backend == "pyav":
# check for invalid options
if self._video_width != 0:
raise ValueError("pyav backend doesn't support _video_width != 0")
if self._video_height != 0:
raise ValueError("pyav backend doesn't support _video_height != 0")
if self._video_min_dimension != 0:
raise ValueError("pyav backend doesn't support _video_min_dimension != 0")
if self._video_max_dimension != 0:
raise ValueError("pyav backend doesn't support _video_max_dimension != 0")
if self._audio_samples != 0:
raise ValueError("pyav backend doesn't support _audio_samples != 0")
if backend == "pyav":
start_pts = clip_pts[0].item()
end_pts = clip_pts[-1].item()
video, audio, info = read_video(video_path, start_pts, end_pts)
else:
_info = _probe_video_from_file(video_path)
video_fps = _info.video_fps
audio_fps = None
video_start_pts = cast(int, clip_pts[0].item())
video_end_pts = cast(int, clip_pts[-1].item())
audio_start_pts, audio_end_pts = 0, -1
audio_timebase = Fraction(0, 1)
video_timebase = Fraction(_info.video_timebase.numerator, _info.video_timebase.denominator)
if _info.has_audio:
audio_timebase = Fraction(_info.audio_timebase.numerator, _info.audio_timebase.denominator)
audio_start_pts = pts_convert(video_start_pts, video_timebase, audio_timebase, math.floor)
audio_end_pts = pts_convert(video_end_pts, video_timebase, audio_timebase, math.ceil)
audio_fps = _info.audio_sample_rate
video, audio, _ = _read_video_from_file(
video_path,
video_width=self._video_width,
video_height=self._video_height,
video_min_dimension=self._video_min_dimension,
video_max_dimension=self._video_max_dimension,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase=video_timebase,
audio_samples=self._audio_samples,
audio_channels=self._audio_channels,
audio_pts_range=(audio_start_pts, audio_end_pts),
audio_timebase=audio_timebase,
)
info = {"video_fps": video_fps}
if audio_fps is not None:
info["audio_fps"] = audio_fps
if self.frame_rate is not None:
resampling_idx = self.resampling_idxs[video_idx][clip_idx]
if isinstance(resampling_idx, torch.Tensor):
resampling_idx = resampling_idx - resampling_idx[0]
video = video[resampling_idx]
info["video_fps"] = self.frame_rate
assert len(video) == self.num_frames, f"{video.shape} x {self.num_frames}"
if self.output_format == "TCHW":
# [T,H,W,C] --> [T,C,H,W]
video = video.permute(0, 3, 1, 2)
return video, audio, info, video_idx
def __getstate__(self) -> Dict[str, Any]:
video_pts_sizes = [len(v) for v in self.video_pts]
# To be back-compatible, we convert data to dtype torch.long as needed
# because for empty list, in legacy implementation, torch.as_tensor will
# use torch.float as default dtype. This happens when decoding fails and
# no pts is returned in the list.
video_pts = [x.to(torch.int64) for x in self.video_pts]
# video_pts can be an empty list if no frames have been decoded
if video_pts:
video_pts = torch.cat(video_pts) # type: ignore[assignment]
# avoid bug in https://github.com/pytorch/pytorch/issues/32351
# TODO: Revert it once the bug is fixed.
video_pts = video_pts.numpy() # type: ignore[attr-defined]
# make a copy of the fields of self
d = self.__dict__.copy()
d["video_pts_sizes"] = video_pts_sizes
d["video_pts"] = video_pts
# delete the following attributes to reduce the size of dictionary. They
# will be re-computed in "__setstate__()"
del d["clips"]
del d["resampling_idxs"]
del d["cumulative_sizes"]
# for backwards-compatibility
d["_version"] = 2
return d
def __setstate__(self, d: Dict[str, Any]) -> None:
# for backwards-compatibility
if "_version" not in d:
self.__dict__ = d
return
video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64)
video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0)
# don't need this info anymore
del d["video_pts_sizes"]
d["video_pts"] = video_pts
self.__dict__ = d
# recompute attributes "clips", "resampling_idxs" and other derivative ones
self.compute_clips(self.num_frames, self.step, self.frame_rate)
| 40.056604
| 116
| 0.628062
|
b90882c596aaba10157cb2c8fc31b9401d948ac4
| 1,265
|
py
|
Python
|
modules/tools/navigation/planning/provider_localization.py
|
DavidSplit/apollo-3.0
|
9f82838e857e4c9146952946cbc34b9f35098deb
|
[
"Apache-2.0"
] | 6
|
2019-10-11T07:57:49.000Z
|
2022-02-23T15:23:41.000Z
|
modules/tools/navigation/planning/provider_localization.py
|
DavidSplit/apollo-3.0
|
9f82838e857e4c9146952946cbc34b9f35098deb
|
[
"Apache-2.0"
] | null | null | null |
modules/tools/navigation/planning/provider_localization.py
|
DavidSplit/apollo-3.0
|
9f82838e857e4c9146952946cbc34b9f35098deb
|
[
"Apache-2.0"
] | 12
|
2019-10-11T07:57:49.000Z
|
2022-03-16T05:13:00.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
# Modifications Copyright (c) 2018 LG Electronics, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class LocalizationProvider:
def __init__(self):
self.localization_pb = None
self.x = 0
self.y = 0
self.heading = 0
def update(self, localization_pb):
self.localization_pb = localization_pb
self.x = self.localization_pb.pose.position.x
self.y = self.localization_pb.pose.position.y
self.heading = self.localization_pb.pose.heading
| 38.333333
| 79
| 0.633202
|
409460ada1d5007d3b9de62aa4aa7a04ae4bf7de
| 1,468
|
py
|
Python
|
core/sentence_embeddings.py
|
venomousboxer/chabot-backend
|
1d9c4b0da5f0cfd3572edb0619a43b9d207bbd1e
|
[
"MIT"
] | 1
|
2019-08-30T05:42:40.000Z
|
2019-08-30T05:42:40.000Z
|
core/sentence_embeddings.py
|
venomousboxer/chabot-backend
|
1d9c4b0da5f0cfd3572edb0619a43b9d207bbd1e
|
[
"MIT"
] | 15
|
2020-01-28T22:39:27.000Z
|
2022-02-10T00:10:30.000Z
|
core/sentence_embeddings.py
|
venomousboxer/chabot-backend
|
1d9c4b0da5f0cfd3572edb0619a43b9d207bbd1e
|
[
"MIT"
] | 1
|
2020-04-25T06:22:30.000Z
|
2020-04-25T06:22:30.000Z
|
from InferSent.models import InferSent
import torch
import numpy as np
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
def Start_chatbot():
model_version = 1
MODEL_PATH = "../InferSent/encoder/infersent%s.pkl" % model_version
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': model_version}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
use_cuda = False
model = model.cuda() if use_cuda else model
W2V_PATH = '../data/glove.6B.300d.txt' if model_version == 1 else '../dataset/fastText/crawl-300d-2M.vec'
model.set_w2v_path(W2V_PATH)
model.build_vocab_k_words(K=570000)
dict = {}
embeddings = {}
questions = []
answers = []
with open('../data/questions.txt') as f:
content = f.readlines()
questions = [x.strip() for x in content]
with open('../data/answers.txt') as f:
content = f.readlines()
answers = [x.strip() for x in content]
for i in range(len(questions)):
dict[questions[i]] = answers[i]
embeddings[questions[i]] = model.encode([questions[i]])[0]
return model, dict, embeddings
def give_similarity_score(u, v, model, embeddings):
return cosine(embeddings[u], model.encode([v])[0])
#print(give_similarity_score('how are you today?', 'would you tell me how you are today?'))
| 31.913043
| 109
| 0.651226
|
1bf1c2bf68b98b0feaf20f65334a22472f98e1b1
| 2,562
|
py
|
Python
|
tb_rest_client/models/models_pe/coap_device_type_configuration.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 30
|
2020-06-19T06:42:50.000Z
|
2021-08-23T21:16:36.000Z
|
tb_rest_client/models/models_pe/coap_device_type_configuration.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 25
|
2021-08-30T01:17:27.000Z
|
2022-03-16T14:10:14.000Z
|
tb_rest_client/models/models_pe/coap_device_type_configuration.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 23
|
2020-07-06T13:41:54.000Z
|
2021-08-23T21:04:50.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CoapDeviceTypeConfiguration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
from tb_rest_client.api_client import ApiClient
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""CoapDeviceTypeConfiguration - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CoapDeviceTypeConfiguration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CoapDeviceTypeConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.141176
| 88
| 0.57299
|
ce95eff40ac06c7a0b24ab49ce51ae527ca9a8a8
| 417
|
py
|
Python
|
exericios_faculdade/vetores_matrizes/questao_02.py
|
Ssousuke/python
|
869bb1cc385d4f54961a131d6db46a7bf99fd743
|
[
"MIT"
] | null | null | null |
exericios_faculdade/vetores_matrizes/questao_02.py
|
Ssousuke/python
|
869bb1cc385d4f54961a131d6db46a7bf99fd743
|
[
"MIT"
] | null | null | null |
exericios_faculdade/vetores_matrizes/questao_02.py
|
Ssousuke/python
|
869bb1cc385d4f54961a131d6db46a7bf99fd743
|
[
"MIT"
] | null | null | null |
# Faça um programa que preencha um vetor com 10 números reais, calcule e mostre
# a quantidade de números negativos e a soma dos números positivos desse vetor.
vetor = []
for i in range(0, 10):
valor = int(input())
vetor.append(valor)
negativos = 0
positivos = 0
for i in vetor:
if i < 0:
negativos += 1
else:
positivos += 1
print(f'Positivos: {positivos}, Negativos: {negativos}')
| 23.166667
| 79
| 0.661871
|
37537a9d932fcfb7edbf0b1c1523a1f73919cfd9
| 2,027
|
py
|
Python
|
test/test_utils.py
|
fengzhongye/face-alignment
|
6a7731168dbb1a15f9ecd5fe4c79c992f179a622
|
[
"BSD-3-Clause"
] | 2
|
2021-02-22T12:16:34.000Z
|
2021-05-02T15:23:10.000Z
|
test/test_utils.py
|
fengzhongye/face-alignment
|
6a7731168dbb1a15f9ecd5fe4c79c992f179a622
|
[
"BSD-3-Clause"
] | 3
|
2021-09-08T02:24:48.000Z
|
2022-03-12T00:44:53.000Z
|
test/test_utils.py
|
fengzhongye/face-alignment
|
6a7731168dbb1a15f9ecd5fe4c79c992f179a622
|
[
"BSD-3-Clause"
] | 1
|
2020-02-10T05:13:12.000Z
|
2020-02-10T05:13:12.000Z
|
import sys
sys.path.append('.')
import unittest
from face_alignment.utils import *
import numpy as np
import torch
class Tester(unittest.TestCase):
def test_flip_is_label(self):
# Generate the points
heatmaps = torch.from_numpy(np.random.randint(1, high=250, size=(68, 64, 64)).astype('float32'))
flipped_heatmaps = flip(flip(heatmaps.clone(), is_label=True), is_label=True)
assert np.allclose(heatmaps.numpy(), flipped_heatmaps.numpy())
def test_flip_is_image(self):
fake_image = torch.torch.rand(3, 256, 256)
fliped_fake_image = flip(flip(fake_image.clone()))
assert np.allclose(fake_image.numpy(), fliped_fake_image.numpy())
def test_getpreds(self):
pts = torch.from_numpy(np.random.randint(1, high=63, size=(68, 2)).astype('float32'))
heatmaps = np.zeros((68, 256, 256))
for i in range(68):
if pts[i, 0] > 0:
heatmaps[i] = draw_gaussian(heatmaps[i], pts[i], 2)
heatmaps = torch.from_numpy(np.expand_dims(heatmaps, axis=0))
preds, _ = get_preds_fromhm(heatmaps)
assert np.allclose(pts.numpy(), preds.numpy(), atol=5)
def test_create_heatmaps(self):
reference_scale = 195
target_landmarks = torch.randint(0, 255, (1, 68, 2)).type(torch.float) # simulated dataset
bb = create_bounding_box(target_landmarks)
centers = torch.stack([bb[:, 2] - (bb[:, 2] - bb[:, 0]) / 2.0, bb[:, 3] - (bb[:, 3] - bb[:, 1]) / 2.0], dim=1)
centers[:, 1] = centers[:, 1] - (bb[:, 3] - bb[:, 1]) * 0.12 # Not sure where 0.12 comes from
scales = (bb[:, 2] - bb[:, 0] + bb[:, 3] - bb[:, 1]) / reference_scale
heatmaps = create_target_heatmap(target_landmarks, centers, scales)
preds = get_preds_fromhm(heatmaps, centers.squeeze(), scales.squeeze())[1]
assert np.allclose(preds.numpy(), target_landmarks.numpy(), atol=5)
if __name__ == '__main__':
unittest.main()
| 39.745098
| 119
| 0.610261
|
795962c0027739c3411dab6366b3e1c4af16695b
| 5,457
|
py
|
Python
|
tests/model_connectors/test_time_series_heating_indirect.py
|
mingzhe37/geojson-modelica-translator
|
23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b
|
[
"BSD-3-Clause-LBNL"
] | 11
|
2019-08-19T16:58:23.000Z
|
2022-01-25T14:23:49.000Z
|
tests/model_connectors/test_time_series_heating_indirect.py
|
mingzhe37/geojson-modelica-translator
|
23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b
|
[
"BSD-3-Clause-LBNL"
] | 331
|
2019-07-24T16:15:52.000Z
|
2022-03-10T04:58:15.000Z
|
tests/model_connectors/test_time_series_heating_indirect.py
|
mingzhe37/geojson-modelica-translator
|
23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b
|
[
"BSD-3-Clause-LBNL"
] | 10
|
2019-07-12T22:21:32.000Z
|
2022-02-22T06:30:25.000Z
|
"""
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
Redistribution of this software, without modification, must refer to the software by the same
designation. Redistribution of a modified version of this software (i) may not refer to the
modified version by the same designation, or by any confusingly similar designation, and
(ii) must refer to the underlying software originally provided by Alliance as “URBANopt”. Except
to comply with the foregoing, the term “URBANopt”, or any confusingly similar designation may
not be used to refer to any modified version of this software or any modified version of the
underlying software originally provided by Alliance without the prior written consent of Alliance.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
import pytest
from geojson_modelica_translator.geojson.urbanopt_geojson import (
UrbanOptGeoJson
)
from geojson_modelica_translator.model_connectors.couplings.coupling import (
Coupling
)
from geojson_modelica_translator.model_connectors.couplings.graph import (
CouplingGraph
)
from geojson_modelica_translator.model_connectors.districts.district import (
District
)
from geojson_modelica_translator.model_connectors.energy_transfer_systems.ets_cold_water_stub import (
EtsColdWaterStub
)
from geojson_modelica_translator.model_connectors.energy_transfer_systems.heating_indirect import (
HeatingIndirect
)
from geojson_modelica_translator.model_connectors.load_connectors.time_series import (
TimeSeries
)
from geojson_modelica_translator.model_connectors.networks.network_heated_water_stub import (
NetworkHeatedWaterStub
)
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
from ..base_test_case import TestCaseBase
@pytest.mark.simulation
class DistrictSystemTest(TestCaseBase):
def test_district_system(self):
project_name = "time_series_heating_indirect"
self.data_dir, self.output_dir = self.set_up(os.path.dirname(__file__), project_name)
# load in the example geojson with a single office building
filename = os.path.join(self.data_dir, "time_series_ex1.json")
self.gj = UrbanOptGeoJson(filename)
# load system parameter data
filename = os.path.join(self.data_dir, "time_series_system_params_ets.json")
sys_params = SystemParameters(filename)
# Create the time series load, ets and their coupling
time_series_load = TimeSeries(sys_params, self.gj.buildings[0])
geojson_load_id = self.gj.buildings[0].feature.properties["id"]
heating_indirect_system = HeatingIndirect(sys_params, geojson_load_id)
ts_hi_coupling = Coupling(time_series_load, heating_indirect_system)
# create heated water stub for the ets
heated_water_stub = NetworkHeatedWaterStub(sys_params)
hi_hw_coupling = Coupling(heating_indirect_system, heated_water_stub)
# create cold water stub for the load
cold_water_stub = EtsColdWaterStub(sys_params)
ts_cw_coupling = Coupling(time_series_load, cold_water_stub)
graph = CouplingGraph([
ts_hi_coupling,
hi_hw_coupling,
ts_cw_coupling,
])
district = District(
root_dir=self.output_dir,
project_name=project_name,
system_parameters=sys_params,
coupling_graph=graph
)
district.to_modelica()
root_path = os.path.abspath(os.path.join(district._scaffold.districts_path.files_dir))
self.run_and_assert_in_docker(os.path.join(root_path, 'DistrictEnergySystem.mo'),
project_path=district._scaffold.project_path,
project_name=district._scaffold.project_name)
| 45.857143
| 102
| 0.741983
|
5181d54812305017e672fb7764c09948bc888f6c
| 1,311
|
py
|
Python
|
python/graphscope/nx/readwrite/__init__.py
|
doudoubobo/GraphScope
|
ec000ba93033bb3097b5a9407b7037115f1f2a4c
|
[
"Apache-2.0"
] | null | null | null |
python/graphscope/nx/readwrite/__init__.py
|
doudoubobo/GraphScope
|
ec000ba93033bb3097b5a9407b7037115f1f2a4c
|
[
"Apache-2.0"
] | null | null | null |
python/graphscope/nx/readwrite/__init__.py
|
doudoubobo/GraphScope
|
ec000ba93033bb3097b5a9407b7037115f1f2a4c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from graphscope.nx.readwrite.adjlist import *
from graphscope.nx.readwrite.edgelist import *
from graphscope.nx.readwrite.gexf import *
from graphscope.nx.readwrite.gml import *
from graphscope.nx.readwrite.gpickle import *
from graphscope.nx.readwrite.graph6 import *
from graphscope.nx.readwrite.graphml import *
from graphscope.nx.readwrite.json_graph import *
from graphscope.nx.readwrite.leda import *
from graphscope.nx.readwrite.multiline_adjlist import *
from graphscope.nx.readwrite.nx_shp import *
from graphscope.nx.readwrite.nx_yaml import *
from graphscope.nx.readwrite.pajek import *
from graphscope.nx.readwrite.sparse6 import *
| 39.727273
| 74
| 0.78566
|
7454c70d55b8a3b478eb708a873d34590d8050c6
| 194,266
|
py
|
Python
|
python/paddle/fluid/framework.py
|
laipaang/Paddle
|
d7f35434b761707a8479b75636546a624399369a
|
[
"Apache-2.0"
] | 1
|
2020-06-24T14:53:24.000Z
|
2020-06-24T14:53:24.000Z
|
python/paddle/fluid/framework.py
|
laipaang/Paddle
|
d7f35434b761707a8479b75636546a624399369a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/framework.py
|
laipaang/Paddle
|
d7f35434b761707a8479b75636546a624399369a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
from collections import defaultdict
from collections import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
import traceback
import six
import numpy as np
import subprocess
import multiprocessing
import sys
import logging
from .. import compat as cpt
from .proto import framework_pb2
from . import core
from . import unique_name
import paddle.version as fluid_version
import warnings
__all__ = [
'Program',
'default_startup_program',
'default_main_program',
'program_guard',
'name_scope',
'cuda_places',
'cpu_places',
'cuda_pinned_places',
'in_dygraph_mode',
'is_compiled_with_cuda',
'Variable',
'ComplexVariable',
'load_op_library',
'require_version',
'device_guard',
'set_flags',
'get_flags',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None
_dygraph_current_expected_place_ = None
_current_device = None
global_prog_seed = 0
def require_version(min_version, max_version=None):
"""
Check if the installed version of PaddlePaddle is in [min_version, max_version],
if the installed version is lower than ``min_version`` or higher than ``max_version``,
an exception will be thrown, NO returns if the installed version is satisfied.
Args:
min_version (str): the minimum version required (like '1.4.0').
max_version (str, optional): the max version required (like '1.6.0'), default is None,
meaning any version equal or higher than ``min_version`` is acceptable.
Returns:
None.
Raises:
TypeError: if the type of ``min_version`` is not str.
TypeError: if the type of ``max_version`` is not str or type(None).
ValueError: if the value of ``min_version`` is not in version format.
ValueError: if the value of ``max_version`` is not in version format or None.
Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# any version >= 0.1.0 is acceptable.
fluid.require_version('0.1.0')
# if 0.1.0 <= version <= 10.0.0, it is acceptable.
fluid.require_version(min_version='0.1.0', max_version='10.0.0')
"""
if not isinstance(min_version, str):
raise TypeError(
"The type of 'min_version' in require_version must be str, but received %s."
% (type(min_version)))
if not isinstance(max_version, (str, type(None))):
raise TypeError(
"The type of 'max_version' in require_version must be str or type(None), but received %s."
% (type(max_version)))
check_format = re.match(r'\d+(\.\d+){0,3}', min_version)
if check_format is None or check_format.group() != min_version:
raise ValueError(
"The value of 'min_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % min_version)
if max_version is not None:
check_format = re.match(r'\d+(\.\d+){0,3}', max_version)
if check_format is None or check_format.group() != max_version:
raise ValueError(
"The value of 'max_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % max_version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
zero_version = ['0', '0', '0', '0']
def version_cmp(ver_a, ver_b):
for i in six.moves.range(len(ver_a)):
if int(ver_a[i]) > int(ver_b[i]):
return 1
elif int(ver_a[i]) < int(ver_b[i]):
return -1
return 0
if version_cmp(version_installed, zero_version) == 0:
if max_version is not None:
warnings.warn(
"PaddlePaddle version in [%s, %s] required, but %s installed. "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, max_version, fluid_version.full_version))
else:
warnings.warn(
"PaddlePaddle version %s or higher is required, but %s installed, "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, fluid_version.full_version))
return
min_version_split = min_version.split('.')
min_version_to_check = min_version_split + zero_version[len(
min_version_split):]
if max_version is not None:
max_version_split = max_version.split('.')
max_version_to_check = max_version_split + zero_version[len(
max_version_split):]
if version_cmp(version_installed,
max_version_to_check) > 0 or version_cmp(
version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed."
% (min_version, max_version, fluid_version.full_version))
else:
if version_cmp(version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version %s or higher is required, but %s installed, "
"please upgrade your PaddlePaddle to %s or other higher version."
% (min_version, fluid_version.full_version, min_version))
def in_dygraph_mode():
"""
:alias_main: paddle.in_dygraph_mode
:alias: paddle.in_dygraph_mode
:old_api: paddle.fluid.framework.in_dygraph_mode
This function checks whether the program runs in dynamic graph mode or not.
You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,
or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable`
and :ref:`api_fluid_dygraph_disable` api .
Returns:
bool: Whether the program is running in dynamic graph mode.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.enable_dygraph() # Now we are in dygragh mode
print(fluid.in_dygraph_mode()) # True
fluid.disable_dygraph()
print(fluid.in_dygraph_mode()) # False
"""
return _dygraph_tracer_ is not None
def _dygraph_not_support_(func):
def __impl__(*args, **kwargs):
assert not in_dygraph_mode(
), "We don't support %s in imperative mode" % func.__name__
return func(*args, **kwargs)
return __impl__
def _dygraph_only_(func):
def __impl__(*args, **kwargs):
assert in_dygraph_mode(
), "We Only support %s in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative Mode" % func.__name__
return func(*args, **kwargs)
return __impl__
# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only
# used to make Variable and VarBase has same interfaces, like numpy. Since VarBase is not exposed in our
# official docments, logically, we want to keep VarBase and logically consistent. While, actually,
# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.
# So, those APIs are listed under class Variable to generate docs only.
# TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting
# same base class.
def _fake_interface_only_(func):
def __impl__(*args, **kwargs):
raise AssertionError(
"'%s' should be called by imperative Varible in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative mode"
% func.__name__)
return __impl__
dygraph_not_support = wrap_decorator(_dygraph_not_support_)
dygraph_only = wrap_decorator(_dygraph_only_)
fake_interface_only = wrap_decorator(_fake_interface_only_)
def _dygraph_tracer():
return _dygraph_tracer_
def _current_expected_place():
return _dygraph_current_expected_place_
# TODO(zhiqiu): remove this function.
def _var_base_to_np(var_base):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
warnings.warn(
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return var_base.numpy()
def _cpu_num():
if "CPU_NUM" not in os.environ.keys():
if multiprocessing.cpu_count() > 1:
sys.stderr.write(
'!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\n'
'CPU_NUM indicates that how many CPUPlace are used in the current task.\n'
'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\n\n'
'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\n\n'
'!!! The default number of CPU_NUM=1.\n'.format(
multiprocessing.cpu_count(), multiprocessing.cpu_count()))
os.environ['CPU_NUM'] = str(1)
cpu_num = os.environ.get('CPU_NUM')
return int(cpu_num)
def _cuda_ids():
gpus_env = os.getenv("FLAGS_selected_gpus")
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
return device_ids
def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): support gpu or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()
def cuda_places(device_ids=None):
"""
**Note**:
For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.
The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.
This function creates a list of :code:`fluid.CUDAPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_gpus` would be checked first. For example, if
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.
If :code:`device_ids` is not None, it should be the device
ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
Parameters:
device_ids (list or tuple of int, optional): list of GPU device ids.
Returns:
list of fluid.CUDAPlace: Created GPU place list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_places = fluid.cuda_places()
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_ids is None:
device_ids = _cuda_ids()
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.CUDAPlace(dev_id) for dev_id in device_ids]
def cpu_places(device_count=None):
"""
This function creates a list of :code:`fluid.CPUPlace` objects, and returns the created list.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CPUPlace: Created list of CPU places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu_places = fluid.cpu_places()
"""
if device_count is None:
device_count = _cpu_num()
return [core.CPUPlace()] * device_count
def cuda_pinned_places(device_count=None):
"""
This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()
# or
cuda_pinned_places = fluid.cuda_pinned_places(1)
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_count is None:
device_count = len(_cuda_ids())
return [core.CUDAPinnedPlace()] * device_count
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@signature_safe_contextmanager
def name_scope(prefix=None):
"""
:api_attr: Static Graph
Generate hierarchical name prefix for the operators.
Note:
This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str, optional): prefix. Default is none.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.name_scope("s1"):
a = fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1
with fluid.name_scope("s2"):
c = b * 1
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
# Op are created in the default main program.
for op in fluid.default_main_program().block(0).ops:
# elementwise_add is created in /s1/
if op.type == 'elementwise_add':
assert op.desc.attr("op_namescope") == '/s1/'
# elementwise_mul is created in '/s1/s2'
elif op.type == 'elementwise_mul':
assert op.desc.attr("op_namescope") == '/s1/s2/'
# elementwise_div is created in '/s1/s3'
elif op.type == 'elementwise_div':
assert op.desc.attr("op_namescope") == '/s1/s3/'
# elementwise_sum is created in '/s4'
elif op.type == 'elementwise_sub':
assert op.desc.attr("op_namescope") == '/s4/'
# pow is created in /s1_1/
elif op.type == 'pow':
assert op.desc.attr("op_namescope") == '/s1_1/'
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if in_dygraph_mode():
yield
else:
assert prefix, "namescope prefix can not be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
try:
yield
finally:
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
def grad_var_name(var_name):
"""
Returns:
str: gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy.
Returns:
core.VarDesc.VarType: the data type in Paddle.
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
return core.VarDesc.VarType.INT16
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
elif dtype == np.int8:
return core.VarDesc.VarType.INT8
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
persistable=None,
**kwargs):
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True
if persistable else False)
class VariableMetaClass(type):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, core.VarBase)
else:
return issubclass(t, Variable)
class ParameterMetaClass(VariableMetaClass):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, ParamBase)
else:
return issubclass(t, Parameter)
def _getitem_impl_(var, item):
"""
Slice the variable.
Args:
item(int/slice/tuple) : the index.
Returns:
Sliced variable
"""
if not isinstance(item, tuple):
item = [item]
decrease_axis = []
slice_axis = []
slice_start = []
slice_end = []
slice_step = []
use_strided_slice = False
reverse_axis = []
target_block = default_main_program().current_block()
def fill_constant(shape, value, force_cpu=False, out=None):
var.block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu
})
out.stop_gradient = True
return out
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step
if start is None and end is None and step is None:
continue
if step is None:
step = 1
if start is None and end is None:
assert (step == -1)
reverse_axis.append(dim)
continue
if start is None:
start = 0
if end is None:
end = 10000000
if step != 1:
use_strided_slice = True
slice_axis.append(dim)
slice_start.append(start)
slice_end.append(end)
slice_step.append(step)
else:
decrease_axis.append(dim)
slice_axis.append(dim)
slice_start.append(slice_item)
slice_step.append(1)
if isinstance(slice_item, Variable):
temp_1 = var.block.create_var(dtype=slice_item.dtype)
fill_constant([1], 1, force_cpu=True, out=temp_1)
temp_end = target_block.create_var(dtype=slice_item.dtype)
target_block.append_op(
type='elementwise_add',
inputs={'X': slice_item,
'Y': temp_1},
outputs={'Out': temp_end},
attrs={'axis': -1})
slice_end.append(temp_end)
else:
slice_end.append(slice_item + 1
if slice_item != -1 else 10000000)
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = var.block.create_var(dtype='int32')
fill_constant([1], dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': [var]}
attrs = {
'axes': slice_axis,
'starts': [],
'ends': [],
'decrease_axis': decrease_axis
}
if (use_strided_slice == True):
attrs['strides'] = []
infer_flags = list(1 for i in range(len(slice_axis)))
# starts
if contain_var(slice_start):
inputs['StartsTensorList'] = get_new_list_tensor(slice_start)
for i, dim in enumerate(slice_start):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = slice_start
# ends
if contain_var(slice_end):
inputs['EndsTensorList'] = get_new_list_tensor(slice_end)
for i, dim in enumerate(slice_end):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = slice_end
# strides
if use_strided_slice == True:
if contain_var(slice_step):
inputs['StridesTensorList'] = get_new_list_tensor(slice_step)
for i, dim in enumerate(slice_step):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = slice_step
# infer_flags
attrs['infer_flags'] = infer_flags
out = var
if use_strided_slice == False and len(slice_axis) > 0:
# append slice_op here
slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name + "_slice"),
dtype=var.dtype)
target_block.append_op(
type="slice",
inputs=inputs,
outputs={'Out': [slice_out_var]},
attrs=attrs)
out = slice_out_var
elif use_strided_slice == True and len(slice_axis) > 0:
strided_slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_strided_slice"),
dtype=var.dtype)
target_block.append_op(
type="strided_slice",
inputs=inputs,
outputs={'Out': [strided_slice_out_var]},
attrs=attrs)
out = strided_slice_out_var
if len(reverse_axis) > 0:
reverse_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_slice_reverse"),
dtype=var.dtype)
target_block.append_op(
type="reverse",
inputs={'X': out},
outputs={'Out': [reverse_out_var]},
attrs={'axis': reverse_axis})
out = reverse_out_var
return out
@six.add_metaclass(VariableMetaClass)
class Variable(object):
"""
**Notes**:
**The constructor of Variable should not be invoked directly.**
**In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**
**In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**
In Fluid, every input and output of an OP is a variable. In most
cases, variables are used for holding different kinds of data or training
labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and
two variables in different :ref:`api_guide_Block_en` could have the same name.
There are many kinds of variables. Each kind of them has its own attributes
and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.
Most of a Variable's member variables can be set to be None. It mean
it is not available or will be specified later.
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
new_variable = fluid.dygraph.to_variable(np.arange(10))
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
need_check_feed=False,
belong_to_optimizer=False,
**kwargs):
self.block = block
if name is None:
name = unique_name.generate('_generated_var')
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
self.belong_to_optimizer = belong_to_optimizer
self.error_clip = error_clip
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError("Variable {0} has been created before. The "
"previous type is {1}; the new type is {2}. They"
" are not matched".format(self.name,
self.desc.type(), type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype,
dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError("Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if need_check_feed and is_new_var:
self.desc.set_need_check_feed(need_check_feed)
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self._stop_gradient = stop_gradient
self.is_data = is_data
@fake_interface_only
def detach(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a new Variable, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
y = x.detach()
"""
pass
@fake_interface_only
def numpy(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`
Returns:
ndarray: The numpy value of current Variable.
Returns type:
ndarray: dtype is same as current Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
print(x.numpy())
"""
pass
@fake_interface_only
def set_value(self, value):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Set a new value for this Variable.
Args:
value (Variable|np.ndarray): the new value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.ones([3, 1024], dtype='float32')
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(1024, 4)
t = to_variable(data)
linear(t) # call with default weight
custom_weight = np.random.randn(1024, 4).astype("float32")
linear.weight.set_value(custom_weight) # change existing weight
out = linear(t) # call with different weight
"""
pass
@fake_interface_only
def backward(self, backward_strategy=None):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Run backward of current Graph which starts from current Variable
Args:
backward_strategy( :ref:`api_fluid_dygraph_BackwardStrategy` ): The Backward Strategy to run backward
Returns:
NoneType: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
"""
pass
@fake_interface_only
def gradient(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Get the Gradient of Current Variable
Returns:
ndarray or tuple of ndarray: if Variable's type is LoDTensor, return numpy value of the gradient of current Variable, if Variable's type is SelectedRows, return tuple of ndarray, first element of tuple is numpy value of the gradient of current Variable, second element of tuple is numpy value of the rows of current Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: return ndarray
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
print(loss2.gradient())
# example2: return tuple of ndarray
with fluid.dygraph.guard():
embedding = fluid.dygraph.Embedding(
size=[20, 32],
param_attr='emb.w',
is_sparse=True)
x_data = np.arange(12).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, 3, 1))
x = fluid.dygraph.base.to_variable(x_data)
out = embedding(x)
out.backward()
print(embedding.weight.gradient())
"""
pass
@fake_interface_only
def clear_gradient(self):
"""
**Notes**:
**1. This API is ONLY available in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Clear (set to ``0`` ) the Gradient of Current Variable
Returns: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
"""
pass
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self):
"""
Get readable debug string of Variable.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Returns:
string: The formatted Variable string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable._to_readable_code())
"""
if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR:
var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})".\
format(i="{", e="}", name=self.name, type=self.type, shape=self.shape, dtype=self.dtype)
else:
var_str = "{name} : fluid.{type})".\
format(i="{", e="}", name=self.name, type=self.type)
if type(self) == Parameter:
if self.trainable:
var_str = "trainable param " + var_str
else:
var_str = "param " + var_str
else:
var_str = "var " + var_str
if self.persistable:
var_str = "persist " + var_str
return var_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error (bool): True if raise an exception when self is not initialized.
with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;
Returns:
str: The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable.to_string(True))
print("=============with detail===============")
print(new_variable.to_string(True, True))
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
return res_str
__repr__ = __str__
@property
def stop_gradient(self):
"""
Indicating if we stop gradient from current Variable
**Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
value2 = np.arange(10).reshape(2, 5).astype("float32")
linear = fluid.Linear(13, 5, dtype="float32")
linear2 = fluid.Linear(3, 3, dtype="float32")
a = fluid.dygraph.to_variable(value0)
b = fluid.dygraph.to_variable(value1)
c = fluid.dygraph.to_variable(value2)
out1 = linear(a)
out2 = linear2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward()
assert linear.weight.gradient() is None
assert (out1.gradient() == 0).all()
"""
return self._stop_gradient
@stop_gradient.setter
def stop_gradient(self, s):
self._stop_gradient = s
@property
def persistable(self):
"""
Indicating if we current Variable should be long-term alive
**Notes: This Property will be deprecated and this API is just to help user understand concept**
**1. All Variable's persistable is** ``False`` **except Parameters.**
**2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("persistable of current Var is: {}".format(new_variable.persistable))
"""
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
self.desc.set_persistable(p)
@property
def name(self):
"""
Indicating name of current Variable
**Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("name of current Var is: {}".format(new_variable.name))
"""
return cpt.to_text(self.desc.name())
@property
def grad_name(self):
"""
Indicating name of the gradient Variable of current Variable.
**Notes: This is a read-only property. It simply returns name of
gradient Variable from a naming convention but doesn't guarantee
the gradient exists.**
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32')
print(x.grad_name) # output is "x@GRAD"
"""
return self.name + "@GRAD"
@name.setter
def name(self, new_name):
self.desc.set_name(new_name)
@property
def shape(self):
"""
Indicating shape of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("shape of current Var is: {}".format(new_variable.shape))
"""
# convert to tuple, make it as same as numpy API.
return tuple(self.desc.shape())
@property
def dtype(self):
"""
Indicating data type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Dtype of current Var is: {}".format(new_variable.dtype))
"""
return self.desc.dtype()
@property
def lod_level(self):
"""
Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning
of ``LoD``
**Notes**:
**1. This is a read-only property**
**2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("LoD Level of current Var is: {}".format(new_variable.lod_level))
"""
if self.type == core.VarDesc.VarType.SELECTED_ROWS:
raise Exception("SelectedRows DO NOT supprt lod")
return self.desc.lod_level()
@property
def type(self):
"""
Indicating Type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Type of current Var is: {}".format(new_variable.type))
"""
return self.desc.type()
def _set_error_clip(self, error_clip):
"""
Set the error_clip.
Args:
error_clip(BaseErrorClipAttr) : The new error_clip.
Returns:
None
"""
self.error_clip = error_clip
def _set_info(self, key, value):
"""
Set key-value information for this variable.
Args:
key(str): Key for this information.
value(object): The value associated to the key.
Returns:
None
"""
if not hasattr(self, "_info"):
self._info = {}
self._info[key] = value
def _get_info(self, key):
"""
Get the information of this variable corresponding to key.
Args:
key(str): Key for this information.
Returns:
object
"""
if hasattr(self, "_info") and key in self._info:
return self._info[key]
return None
def _slice_indices(self, slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
step = 1 if slice.step is None else slice.step
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step can not be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = slice.start
start = max(start + length, lower) if start < 0 else min(start,
upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = slice.stop
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
def _detectEllipsis(self, item):
has_ellipsis = False
start = 0
end = len(self.shape)
for index, o in enumerate(item):
if o is Ellipsis:
if has_ellipsis:
raise ValueError("Index can have one ellipsis only.")
has_ellipsis = True
start = index
else:
if has_ellipsis:
end = index
return has_ellipsis, start, end
def _reconstructSliceinfo(self, item):
has_ellipsis, start, end = self._detectEllipsis(item)
if has_ellipsis:
newitem = []
for i in range(start):
newitem.append(item[i])
for i in range(start, end):
newitem.append(slice(None, None, None))
for i in range(end, len(item)):
newitem.append(item[i])
return newitem
else:
return None
def _detectContinuesSlice(self, item):
starts = []
ends = []
for index, o in enumerate(item):
if isinstance(o, int):
start = int(o)
if (index > 0 and index >= self.shape[index]) \
or (index < 0 and (index + self.shape[index]) < 0):
raise IndexError("invalid index")
start = max(start + self.shape[index], 0) if start < 0 else min(
start, self.shape[index])
starts.append(start)
ends.append(start + 1)
elif isinstance(o, slice):
start, stop, step = self._slice_indices(o, self.shape[index])
if step == 1 or step == -1:
starts.append(start)
ends.append(stop)
else:
return False, None
else:
raise IndexError("Valid index accept int or slice or ellipsis")
return True, [starts, ends]
def _cloneVar(self, copy=False):
if not copy:
return self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name),
dtype=self.dtype)
else:
return self
def _sliceVar(self, axes, starts, ends):
new_var = self._cloneVar()
self.block.append_op(
type="slice",
inputs={'Input': [self]},
outputs={'Out': [new_var]},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return new_var
def _concatVar(self, inputs, axis):
new_var = self._cloneVar()
self.block.append_op(
type="concat",
inputs={'X': inputs},
outputs={'Out': [new_var]},
attrs={'axis': axis, })
return new_var
def _sliceAndConcatVar(self, item, axis):
if isinstance(item, slice):
if self.shape[axis] < 0:
return self._cloneVar(True)
start, stop, step = self._slice_indices(item, self.shape[axis])
if step == 1:
return self._sliceVar([axis], [start], [stop])
else:
vars = []
if step > 0:
while start < stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
else:
while start > stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
return self._concatVar(vars, axis)
elif isinstance(item, int):
if self.shape[axis] < 0:
return self._cloneVar(True)
index = int(item)
if (index > 0 and index >= self.shape[axis]) \
or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1])
else:
raise IndexError("Valid index accept int or slice or tuple")
def __getitem__(self, item):
return _getitem_impl_(self, item)
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns:
list: list of OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
class ComplexVariable(object):
"""
The Variable defined on the complex number domain. It contains two common
real number Variables as its members, :attr:`real` and :attr:`imag`
holding the real part and imaginary part of complex numbers respectively.
**Notes**:
**The constructor of ComplexVariable should not be invoked directly.**
**Only support dygraph mode at present. Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph ComplexVariable with complex number data.**
Args:
real (Variable): The Variable holding real-part data.
imag (Variable): The Variable holding imaginery-part data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
a = np.array([1.0+2.0j, 0.2])
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(a, name="new_var")
print(var.name, var.dtype, var.shape)
# ({'real': u'new_var.real', 'imag': u'new_var.imag'}, 'complex128', [2L])
print(var.numpy())
# [1. +2.j 0.2+0.j]
"""
def __init__(self, real, imag):
assert real.shape == imag.shape, "The real part and imaginary part " \
"of a ComplexVariable should have the same shape!"
assert real.dtype == imag.dtype, "The real part and imaginary part " \
"of a ComplexVariable should have the same data type!"
self.real = real
self.imag = imag
if self.real.dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
]:
self._dtype = "complex64"
else:
self._dtype = "complex128"
self._shape = self.real.shape
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
@property
def name(self):
return {"real": self.real.name, "imag": self.imag.name}
@name.setter
def name(self, name):
# rename
if isinstance(name, str):
self.real.name = name + ".real"
self.imag.name = name + ".imag"
elif (isinstance(name, tuple) or isinstance(name,
list)) and len(name) == 2:
self.real.name, self.imag.name = name[0], name[1]
else:
raise ValueError(
"An invalid name assigned to the ComplexVariable, "
"which must be a string, or a tuple or a list with length 2!")
def numpy(self):
return self.real.numpy() + 1j * self.imag.numpy()
def __str__(self):
return "REAL: " + self.real.__str__() + "IMAG: " + self.imag.__str__()
__repr__ = __str__
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
def update_op_proto(self):
op_protos = get_all_op_protos()
for proto in op_protos:
if proto.type not in self.op_proto_map:
self.op_proto_map[proto.type] = proto
@staticmethod
def generated_op_attr_names():
return {
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),
core.op_proto_and_checker_maker.kOpDeviceAttrName()
}
class Operator(object):
"""
In Fluid, all the operation are represented by Operator, and Operator
is regarded as a build in an instruction of a Block. Users can use the
build in instructions to describe their neural network.
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description of Operator.
type(str): The type of operator. Default None.
inputs(dict): The input of this Operator. it is a dictionary, for every
element, key is the input parameter name, and value is a list of
variables. Default None.
outputs(dict): The output of this Operator. it is a dictionary, for
every element, key is the input parameter name, and value is a list
of variables. Default None.
attrs(dict): The attributes of this Operator. it is a dictionary, for
every element, key is attribute name, and value is the attribute value.
The attribute type should be as same as the type registered in C++ side.
Default None.
Returns:
Operator: The initialized Operator.
Raises:
ValueError: If the passed input, output and attrs doesn't match the
initializing Operator's that registered in C++ side.
Notes:
The constructor of operator should not be invoked directly. Use
Block.append_op or Block._prepend_op instead.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
# var1 += var2 + var3
cur_block.append_op(type="sum",
inputs={"X": [var1, var2, var3]},
outputs={"Out": [var1]})
"""
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_nccl_id', 'c_gen_nccl_id', 'c_comm_init', 'c_sync_calc_stream',
'c_sync_comm_stream', 'queue_generator', 'dequeue', 'enqueue'
}
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
if in_dygraph_mode():
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
self._type = type
self.attrs = attrs if attrs else {}
else:
self.block = block
self.desc = desc
# note: not add self.attrs here:
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
del attrs
op_maker = core.op_proto_and_checker_maker
if op_maker.kOpRoleAttrName() not in op_attrs:
op_attrs[op_maker.kOpRoleAttrName(
)] = self.block.program._op_role
role_var_name = op_maker.kOpRoleVarAttrName()
if len(self.block.program.
_op_role_var) != 0 and role_var_name not in op_attrs:
op_attrs[role_var_name] = self.block.program._op_role_var
if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:
del op_attrs[role_var_name]
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
else:
callstack_var_name = op_maker.kOpCreationCallstackAttrName()
op_attrs[callstack_var_name] = list(
reversed(traceback.format_stack()))[1:]
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
# set device for op with kernels, give warning for op without kernels
# when force_cpu and device_guard are used at the same time, a warning will be given.
# TODO(zhangting2020): when force_cpu is removed, clear warning below.
if _current_device is not None:
if self._has_kernel(type):
op_device = op_maker.kOpDeviceAttrName()
op_attrs[op_device] = _current_device
else:
warnings.warn("The Op(%s) is not support to set device." %
type)
if 'force_cpu' in op_attrs:
if (type is 'less_than' and op_attrs['force_cpu'] != None
) or op_attrs['force_cpu'] != False:
warnings.warn(
"The Attr(force_cpu) of Op(%s) will be deprecated in the future, "
"please use 'device_guard' instead. 'device_guard' has higher priority when they are "
"used at the same time." % type)
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, list):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for index, arg in enumerate(in_args):
if isinstance(arg, six.string_types):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
in_arg_names.append(arg.decode())
elif isinstance(arg, (Variable, core.VarBase)):
in_arg_names.append(cpt.to_text(arg.name))
else:
raise TypeError(
"The type of '%s' in operator %s should be "
"one of [basestring(), str, Varibale] in python2, "
"or one of [str, bytes, Variable] in python3."
"but received : %s" %
(in_proto.name, type, arg))
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
for m in proto.outputs:
if (m.name not in outputs) and m.dispensable:
continue
if not ((m.name in outputs) or m.dispensable):
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\", should set: [%s].")
% (type, m.name))
for out_proto in proto.outputs:
if out_proto.name not in outputs:
continue
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given."
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode?
if not in_dygraph_mode():
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if op_attrs is not None:
if not isinstance(op_attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None):
continue
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
self.desc.check_attrs()
if self._has_kernel(type):
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def _has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error):
"""
Get debug string.
Args:
throw_on_error(bool): Whether to raise exception if self is not
initialized.
Returns:
str: The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
return _debug_string_(proto, throw_on_error)
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Operator.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Operator string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
print(new_op._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
outputs_str = "{"
for i in range(0, len(self.output_names)):
outputs_str += "{name}=".format(name=self.output_names[i])
o = self.output(self.output_names[i])
outputs_str += "{value}".format(value=o)
if i != len(self.output_names) - 1:
outputs_str += ", "
outputs_str += "}"
inputs_str = "{"
for i in range(0, len(self.input_names)):
inputs_str += "{name}=".format(name=self.input_names[i])
o = self.input(self.input_names[i])
inputs_str += "{value}".format(value=o)
if i != len(self.input_names) - 1:
inputs_str += ", "
inputs_str += "}"
attr_names = sorted(self.attr_names)
attrs_str = ""
for i in range(0, len(attr_names)):
name = attr_names[i]
if skip_op_callstack and name == "op_callstack":
continue
attr_type = self.desc.attr_type(name)
if attr_type == core.AttrType.BLOCK:
a = "{name} = block[{value}]".format(
name=name, type=attr_type, value=self._block_attr_id(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
if attr_type == core.AttrType.BLOCKS:
a = "{name} = blocks{value}".format(
name=name,
type=attr_type,
value=self._blocks_attr_ids(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
a = "{name} = {value}".format(
name=name, type=attr_type, value=self.desc.attr(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
if outputs_str != "{}":
op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".\
format(outputs = outputs_str, op_type=self.type, inputs=inputs_str, attrs=attrs_str)
else:
op_str = "{op_type}(inputs={inputs}, {attrs})".\
format(op_type=self.type, inputs=inputs_str, attrs=attrs_str)
return op_str
def __str__(self):
return self._to_readable_code()
__repr__ = __str__
@property
def type(self):
return self.desc.type()
def input(self, name):
"""
Get the input arguments according to the input parameter name.
Args:
name(str): The input parameter name.
Returns:
list: return the list of argument names that associated with \
the specific parameter name.
"""
return self.desc.input(name)
def _rename_input(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's input.
new_name(str): The new name of the Operator's input.
Returns:
None
"""
self.desc._rename_input(old_name, new_name)
def _rename_output(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's output.
new_name(str): The new name of the Operator's output.
Returns:
None
"""
self.desc._rename_output(old_name, new_name)
@property
def input_names(self):
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
"""
Get output arguments by the output parameter name.
Args:
name(str): The output parameter name.
Returns:
list: return the list of argument names associated with \
the specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
return self.desc.output_names()
@property
def idx(self):
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
Whether this Operator has the attribute with name or not.
Args:
name(str): the attribute name.
Returns:
bool: True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute's name.
Args:
name(str): the attribute name.
Returns:
core.AttrType: the attribute type.
"""
return self.desc.attr_type(name)
def _set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
self._update_desc_attr(name, val)
def _remove_attr(self, name):
self.desc.remove_attr(name)
def _update_desc_attr(self, name, val):
"""
Update the value of desc's attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
if isinstance(val, Block):
self.desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
self.desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
self.desc.set_serialized_attr(name, val.serialize_to_string())
else:
self.desc._set_attr(name, val)
@property
def attr_names(self):
return self.desc.attr_names()
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def _block_attr_id(self, name):
"""
Get the block attribute's id by name.
Args:
name(str): the attribute name.
Returns:
int: the block index.
"""
return self.desc._block_attr_id(name)
def _block_attr(self, name):
"""
Get the block attribute by name.
Args:
name(str): the attribute name.
Returns:
block: the block attribute.
"""
id = self._block_attr_id(name)
assert (id >= 0 and id < len(self.block.program.blocks))
return self.block.program.blocks[id]
def _blocks_attr(self, name):
"""
Get the blocks attribute by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks attribute.
"""
attrs = []
for i in self._blocks_attr_ids(name):
assert (i >= 0 and i < len(self.block.program.blocks))
attrs.append(self.block.program.blocks[i])
return attrs
def _blocks_attr_ids(self, name):
"""
Get the blocks attribute's ids by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks ids.
"""
return self.desc._blocks_attr_ids(name)
def all_attrs(self):
"""
Get the attribute dict.
Returns:
dict: The Operator's attribute dict, name->attr.
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
attr_type = self.desc.attr_type(n)
if attr_type == core.AttrType.BLOCK:
attr_map[n] = self._block_attr(n)
continue
if attr_type == core.AttrType.BLOCKS:
attr_map[n] = self._blocks_attr(n)
continue
attr_map[n] = self.attr(n)
return attr_map
def _is_optimize_op(self):
op_maker = core.op_proto_and_checker_maker
OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
op_role = self.desc.attr(op_maker.kOpRoleAttrName())
if op_role & int(OPTIMIZE):
return True
else:
return False
class Block(object):
"""
In Fluid, a Program is consistence of multi-Block, and Block stores
VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.
One block could have some child blocks, and child block's name scopes
should inherit the parent's so that OpDesc in child block can reference
a VarDesc that is stored in the parent block.
Please reference the framework.proto for details.
Args:
program(Program): The Program that the Block belongs to.
idx(int): The block's id in the Program.
Notes:
The constructor of Block should not be invoked directly. Please
use `Program._create_block()` to create a block.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
"""
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Block.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Block string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_block._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
block_str = "{ // block "
block_str += "{}\n".format(self.idx)
for var in list(self.vars.values()):
block_str += " {}\n".format(var._to_readable_code())
block_str += "\n"
for op in self.ops:
block_str += " {}\n".format(
op._to_readable_code(skip_op_callstack))
block_str += "}"
return block_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False.
Returns:
str: The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in list(self.vars.values()):
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def _set_forward_block_idx(self, idx):
"""
Set the forward block Idx.
Args:
idx(int): the block index.
Returns:
None
"""
self.desc._set_forward_block_idx(idx)
@property
def backward_block_idx(self):
cur_block_idx = self.idx
for block in self.program.blocks:
if block.forward_block_idx == cur_block_idx:
return block.idx
return -1
@property
def idx(self):
return self.desc.id
def var(self, name):
"""
Get a Variable by name from this block.
Args:
name(str): the Variable's name.
Raises:
ValueError: The If input's type is not str, or this block
doesn't have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def _find_var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
return None
def _var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Raises:
ValueError: this block and this parent block doesn't
have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
var = self._find_var_recursive(name)
if var:
return var
else:
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
if in_dygraph_mode():
var = _varbase_creator(*args, **kwargs)
else:
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def _rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
Args:
name(str): the name that need to be renamed.
new_name(str): the name that need to rename to.
Raises:
ValueError: If this block doesn't have this the giving name,
or the type of the var with the giving name is not Parameter
or Variable.
Returns:
Variable: the Variable with the giving name.
"""
name = cpt.to_text(name)
new_name = cpt.to_text(new_name)
if not self.has_var(name):
raise ValueError("var %s is not in current block" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
if in_dygraph_mode():
var = ParamBase(
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
else:
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, _sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self._sync_with_cpp()
return var
def _remove_var(self, name):
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = None
if in_dygraph_mode():
param = ParamBase(*args, **kwargs)
else:
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
def _is_inited_by(block, var):
init_ops = []
for op in block.ops:
if var.name in op.output_arg_names:
# In startup_program, "c_broadcast" and "c_sync_comm_stream"
# are treated as initialization ops that cause error.
# Think of "c_broadcast" and "c_sync_comm_stream" as a special case here.
if op.type in ["c_broadcast", "c_sync_comm_stream"]:
continue
init_ops.append(op)
return init_ops
initializer = kwargs['initializer']
init_ops = _is_inited_by(global_block, param)
init_ops_len = len(init_ops)
if init_ops_len > 1:
raise RuntimeError("param " + param.name +
" is inited by multiple init ops " + str(
init_ops))
elif init_ops_len == 1:
# TODO already inited, do nothing, should log a warning
pass
else:
initializer(param, self)
param.stop_gradient = False
return param
def append_op(self, *args, **kwargs):
"""
Appends a new Operator according to the giving arguments.
Returns:
Operator: the append Operator.
"""
if in_dygraph_mode():
attrs = kwargs.get("attrs", {})
type = kwargs.get("type", None)
op = Operator(
block=self,
desc=None,
type=type,
inputs=None,
outputs=None,
attrs=attrs)
# record ops in tracer rather than blocks
#
# TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in dygraph mode.
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc.append_op()
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
return op
def _insert_op(self, index, *args, **kwargs):
"""
Insert a Operator according to the giving arguments.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
self._sync_with_cpp()
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _remove_op(self, index):
"""
Remove the specific position operator.
Args:
index(int): the position that the operator to insert.
Returns:
None
"""
self._sync_with_cpp()
self.desc._remove_op(index, index + 1)
del self.ops[index]
def _slice_ops(self, start, end):
"""
Return the Operator between start and end.
Args:
start(int): the start position.
end(int): the end position.
Returns:
list: the Operators between start and end.
"""
return self.ops[start:end]
def _prepend_op(self, *args, **kwargs):
if in_dygraph_mode():
type = kwargs.get("type", None)
attrs = kwargs.get("attrs", {})
op = Operator(
self, None, type=type, inputs=None, outputs=None, attrs=attrs)
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc._prepend_op()
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
return op
def _sync_with_cpp(self):
"""
Sync from the desc on the c++ end. This method is used to synchronize
the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block.
Args:
other(Block): the other block.
Raises:
ValueError: If type of input is not Block, or the `other` and this
block is not in the same topology.
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
# if the Parameter is pruned, v may be None
continue
assert isinstance(v, Variable)
new_p = None
if in_dygraph_mode():
new_p = ParamBase(
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
else:
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def _clone_variable(self, var, force_persistable=True):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
force_persistable(bool): True means setting the result variable to being persistable.
False means setting the persistable the same with that of input var.
default: True.
Returns:
Variable: the new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.RAW:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
return ret_var
class IrNode(object):
"""
Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.
"""
def __init__(self, node):
"""
Construct an IrNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node,
core.Node), 'node must be the instance of core.Node.'
self.node = node
def name(self):
"""
Return the node name.
Returns:
str: node name.
"""
return self.node.name()
def node_type(self):
"""
Return the node type.
Returns:
core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).
"""
return self.node.node_type()
def var(self):
"""
Return the node variable description.
Returns:
core.VarDesc: node variable description.
"""
return self.node.var()
def op(self):
"""
Return the node operator description.
Returns:
core.OpDesc: node operator description.
"""
return self.node.op()
def id(self):
"""
Return the node id.
Returns:
int: node id.
"""
return self.node.id()
def is_op(self):
"""
If the node is an operator, then return true.
Returns:
bool: indicate whether the node is an operator.
"""
return self.node.is_op()
def is_var(self):
"""
If the node is a variable, then return true.
Returns:
bool: indicate whether the node is a variable.
"""
return self.node.is_var()
def is_ctrl_var(self):
"""
If the node is a control dependence variable, then return true.
Returns:
bool: indicate whether the node is a control dependence variable.
"""
return self.node.is_ctrl_var()
def clear_inputs(self):
"""
Clear the node inputs. After executing the `clear_inputs` function,
the node inputs will be empty.
"""
self.node.clear_inputs()
def remove_input_by_id(self, node_id):
"""
Remove a node from inputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_input(node_id)
def remove_input(self, node):
"""
Remove a node from inputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_input(node.node)
def append_input(self, node):
"""
Append a node in inputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_input(node.node)
def clear_outputs(self):
"""
Clear the node outputs. After executing the `clear_outputs` function,
the node outputs will be empty.
"""
self.node.clear_outputs()
def remove_output_by_id(self, node_id):
"""
Remove a node from outputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_output(node_id)
def remove_output(self, node):
"""
Remove a node from outputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_output(node.node)
def append_output(self, node):
"""
Append a node in outputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_output(node.node)
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrNode): node inputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrNode): node outputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.outputs]
class IrVarNode(IrNode):
"""
Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrVarNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_var(), \
'node must be the instance of core.Node and it must be a variable node.'
super(IrVarNode, self).__init__(node)
self.node = node
def set_shape(self, shape):
"""
Set the node variable shape.
Args:
shape(list): shape to be set.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
self.node.var().set_shape(shape)
def persistable(self):
"""
If the variable node is a persistable variable, then return true.
Returns:
bool: indicate whether the variable is persistable.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().persistable()
def type(self):
"""
Return the variable type.
Returns:
core.VarDesc.VarType: the variable type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().type()
def dtype(self):
"""
Return the variable data type.
Returns:
core.VarDesc.VarType: the variable data type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().dtype()
def shape(self):
"""
Return the variable shape.
Returns:
list: the variable shape.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().shape()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrOpNode): node inputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrOpNode): node outputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.outputs]
class IrOpNode(IrNode):
"""
Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrOpNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_op(), \
'node must be the instance of core.Node and it must be a operator node.'
super(IrOpNode, self).__init__(node)
self.node = node
def rename_input(self, old_input_name, new_input_name):
"""
Rename the input of this node.
Args:
old_input_name(str): the old input name.
new_input_name(str): the new input name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_input(old_input_name, new_input_name)
def rename_output(self, old_output_name, new_output_name):
"""
Rename the output of this node.
Args:
old_output_name(str): the old output name.
new_output_name(str): the new output name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_output(old_output_name, new_output_name)
def input(self, name):
"""
Get the argument name list by the parameter name for input.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input(name)
def output(self, name):
"""
Get the argument name list by the parameter name for output.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output(name)
def set_type(self, new_type):
"""
Change the operator type into new type.
Args:
new_type(str): new operator type to be set.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().set_type(new_type)
def set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
"""
self._update_desc_attr(name, val)
def _update_desc_attr(self, name, val):
"""
Update the value of the op desc's attribute by attribute's name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
desc = self.node.op()
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
def input_arg_names(self):
"""
Return input arguments' names of this op node.
Returns:
list(str): input arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input_arg_names()
def output_arg_names(self):
"""
Return output arguments' names of this op node.
Returns:
list(str): output arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output_arg_names()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrVarNode): node inputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrVarNode): node outputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.outputs]
class IrGraph(object):
"""
Python IrGraph. Beneath it is a core.Graph, which is used for
creating a c++ Ir Pass Graph. An IrGraph is just a graph view of
a Program. In an IrGraph, both Variables and Operators are graph
nodes.
"""
def __init__(self, graph, for_test=False):
"""
Construct an IrGraph using core.Graph.
Args:
graph(core.Graph): C++ Graph.
for_test(bool): True for the test graph and false for the train graph.
"""
assert isinstance(
graph, core.Graph), 'graph must be the instance of core.Graph.'
self.graph = graph
self._for_test = for_test
def clone(self):
"""
Create a new and duplicated IrGraph.
Warns:
The method only clones the graph structure, not its attributes.
Returns:
IrGraph: A new and duplicated graph.
"""
g = self.graph.clone()
return IrGraph(g, self._for_test)
def is_test(self):
"""
If the graph is used for testing, the function returns true. Otherwise, returns false.
"""
return self._for_test
def all_nodes(self):
"""
Return all nodes included in the graph as a set.
"""
return {IrNode(node) for node in self.graph.nodes()}
def all_var_nodes(self):
"""
Return all variable nodes included in the graph as a set.
"""
return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}
def all_persistable_nodes(self):
"""
Return all persistable variable nodes included in the graph as a set.
"""
persistable_nodes = set()
for node in self.graph.nodes():
if node.is_var() and node.var() is not None and node.var(
).persistable():
persistable_nodes.add(node)
return {IrVarNode(p) for p in persistable_nodes}
def all_op_nodes(self):
"""
Return all operator nodes included in the graph as a set.
"""
return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}
def create_persistable_node(self, name, var_type, shape, var_dtype):
"""
Create a persistable variable node in the graph. In IrGraph,
it can not distinguish between persistable variables and parameters.
Args:
name(str): the name of the persistable variable node.
vart_type(core.VarDesc.VarType): the type of the persistable variable node.
shape(list): the shape of the persistable variable node.
var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.
Returns:
IrVarNode: the created persistable variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
var_desc.set_persistable(True)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node(self, name, var_type, shape, var_dtype):
"""
Create a variable node in the graph. The created variable node is
not persistable.
Args:
name(str): the name of the variable node.
vart_type(core.VarDesc.VarType): the type of the variable node.
shape(list): the shape of the variable node.
var_dtype(core.VarDesc.VarType): the data type of the variable node.
Returns:
IrVarNode: the created variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_control_dep_var(self):
"""
create a control var
"""
return IrVarNode(self.graph.create_control_dep_var())
def create_var_node_from_desc(self, var_desc):
"""
Create a variable node by using an existing VarDesc in the graph.
Depend on the giving VarDesc, the created variable node may be persistable.
Args:
var_desc(core.VarDesc): the giving variable description.
Returns:
IrVarNode: the created variable node.
"""
return IrVarNode(self.graph.create_var_node(var_desc))
def create_op_node(self, op_type, attrs, inputs, outputs):
"""
Create a operator node in the graph.
Args:
op_type(str): the type of the operator node.
attrs(dict): the attributes of the operator node.
inputs(dict): the inputs of the operator node.
outputs(dict): the outputs of the operator node.
Returns:
IrOpNode: the created operator node.
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
[var_node.name() for var_node in var_nodes])
return IrOpNode(self.graph.create_op_node(op_desc))
def create_op_node_from_desc(self, op_desc):
"""
Create a operator node by using an existing OpDesc in the graph.
Args:
op_desc(core.VarDesc): the giving operator description.
Returns:
IrOpNode: the created operator node.
"""
return IrOpNode(self.graph.create_op_node(op_desc))
def update_input_link(self, old_input_node, new_input_node, op_node):
"""
Update the input's link of a operator node.
Args:
old_input_node(IrNode): the old input node of the giving op_node.
new_input_node(IrNode): the new input node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node)
new_input_node.append_output(op_node)
op_node.append_input(new_input_node)
op_node.rename_input(old_input_node.name(), new_input_node.name())
def update_output_link(self, old_output_node, new_output_node, op_node):
"""
Update the output's link of an operator node.
Args:
old_output_node(IrNode): the old output node of the giving op_node.
new_output_node(IrNode): the new output node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_output_node.node in self.graph.nodes() and new_output_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'
old_output_node.remove_input(op_node)
op_node.remove_output(old_output_node)
new_output_node.append_input(op_node)
op_node.append_output(new_output_node)
op_node.rename_output(old_output_node.name(), new_output_node.name())
def link_to(self, node_in, node_out):
"""
Connect two nodes.
Args:
node_in(IrNode): the input node.
node_out(IrNode): the output node.
"""
assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \
'The two arguments(node_in&node_out) must be in the graph nodes.'
node_in.append_output(node_out)
node_out.append_input(node_in)
def safe_remove_nodes(self, remove_nodes):
"""
Remove nodes safely since links connected to these removed nodes are
also removed.
Args:
remove_nodes(set): the nodes prepared to be removed.
"""
if not isinstance(remove_nodes, set):
if isinstance(remove_nodes, Iterable):
remove_nodes = set(remove_nodes)
else:
remove_nodes = {remove_nodes}
original_nodes = {n.node for n in remove_nodes}
core.graph_safe_remove_nodes(self.graph, original_nodes)
def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.inputs, each_var_name)
]
for each_var_name in node.op().output_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.outputs, each_var_name)
]
else:
var_nodes[each_var_name].append(
self._find_node_by_name(node.outputs,
each_var_name))
self.graph.resolve_hazard(var_nodes)
def has_circle(self):
"""
Check if the graph has a circle.
Returns:
bool: True if the graph has a circle else False.
"""
return core.has_circle(self.graph)
def graph_num(self):
"""
Count the number of unconnected graphs in this graph.
Returns:
int: the number of unconnected graphs.
"""
return core.graph_num(self.graph)
def topology_sort(self):
"""
Perform the topology sort operation on the graph.
Notes: the `graph` can not contain a circle.
Returns:
list(IrNode): nodes in topology order.
"""
ordered_nodes = core.topology_sort(self.graph)
return [IrNode(n) for n in ordered_nodes]
def build_adjacency_list(self):
"""
Build an adjacency list of operations for the `graph`.
Returns:
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):
"""
Draw the graph. If `dot` command is installed, the drawn graph
will be saved as pdf file type, otherwise dot file type is used.
Args:
save_path(str): the save path of drawn graph.
name(str): the name of drawn graph.
marked_nodes(set(IrNode)): nodes that are needed to be marked.
Default value is None.
remove_ctr_var(bool): If it is set True, all control variable nodes
in the graph will be removed. Default value is True.
"""
def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \
+ ' -o ' + pdf_save_path, shell=True)
if exited_code != 0:
print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format(
dot_file_path))
remove_ctr_vars = set()
if remove_ctr_var:
for node in self.all_var_nodes():
if node.is_ctrl_var():
remove_ctr_vars.add(node)
self.safe_remove_nodes(remove_ctr_vars)
print('Total ops num = {}.'.format(len(self.all_op_nodes())))
if marked_nodes is not None:
if not isinstance(marked_nodes, set):
if isinstance(marked_nodes, Iterable):
marked_nodes = set(marked_nodes)
else:
marked_nodes = {marked_nodes}
marked_nodes = {n.node for n in marked_nodes}
remove_ctr_vars = {n.node for n in remove_ctr_vars}
marked_nodes = marked_nodes - remove_ctr_vars
if self.graph.has('__graphviz__marked_node__'):
self.graph.erase('__graphviz__marked_node__')
self.graph.set('__graphviz__marked_node__', marked_nodes)
if not os.path.exists(save_path):
os.makedirs(save_path)
viz_dot_path = os.path.join(save_path, name) + '.dot'
viz_pass = core.get_pass('graph_viz_pass')
viz_pass.set('graph_viz_path', viz_dot_path)
viz_pass.apply(self.graph)
_convert_to_pdf(viz_dot_path)
def to_program(self):
"""
Convert the graph into a Program.
WARN: When the graph includes backward operator nodes, the
conversion process may be failed. Usually, this function is
only used to convert a test graph.
Returns:
Program: a program converted from the graph.
"""
convert_pass = core.get_pass('graph_to_program_pass')
desc = core.ProgramDesc()
convert_pass.set_not_owned('program', desc)
convert_pass.apply(self.graph)
program = Program._construct_from_desc(desc)
return program
def _find_node_by_name(self, nodes, node_name):
"""
Find a node in the giving nodes set by the name.
"""
target_node = None
for n in nodes:
if n.name() == node_name:
target_node = n
assert target_node is not None, "Cannot find the target node in the giving set."
return target_node
def _update_desc_attr(self, desc, name, val):
"""
Update the value of desc's attribute by attribute's name.
"""
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
class Program(object):
"""
Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the
control flow op like conditional_block, while :ref:`api_fluid_layers_While` is included,
it will contain nested block.
Please reference the
`framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_
for details.
A set of Program usually contains startup program and main program.
A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main
program will contain the network structure and vars for train.
A set of Program can be used for test or train, in train program ,
Paddle will contain all content to build a train network, in test
program Paddle will prune some content which is irrelevant to test, eg.
backward ops and vars.
**Notes**:
**we have** :ref:`api_fluid_default_startup_program` **and** :ref:`api_fluid_default_main_program`
**by default, a pair of them will shared the parameters. The** :ref:`api_fluid_default_startup_program` **only run once to initialize parameters,**
:ref:`api_fluid_default_main_program` **run in every mini batch and adjust the weights.**
Returns:
Program: An empty Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
"""
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
global global_prog_seed
self._seed = global_prog_seed
self._current_role = core.op_proto_and_checker_maker.OpRole.Forward
self.__op_role_var = []
# for distribute training
# _is_distributed = True if under distributed training
self._is_distributed = False
# _is_chief = True if the trainer is the first one, usually No.0
self._is_chief = False
# _parameters_on_pservers records all the parameters distributed on parameter servers.
self._parameters_on_pservers = None
# _endpoints is a list about parameter servers ip:port, such as ["ip:port","ip:port"]
self._endpoints = []
# if current role is parameter server, the _ps_endpoint is its "ip:port"
self._ps_endpoint = None
# trainers_endpoints, it is used for distribution.
self._trainers_endpoints = []
# the distributed lookup table names
self._distributed_lookup_table = None
# use Deep gradient comrepssion or not
self._enable_dgc = False
self._use_lamb = False
self._nccl_comm_num = 1
self._use_hierarchical_allreduce = False
self._hierarchical_allreduce_inter_nranks = 0
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self._fleet_opt = None
self._program_config = None
# assigned if this program has been parsed by a pipeline optimizer
self._pipeline_opt = None
# appending gradients times
self._appending_grad_times = 0
def global_seed(self, seed=0):
"""
Set global seed for Program
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
print(prog.random_seed)
## 0
## the default random seed is 0
prog.global_seed(102)
prog1 = fluid.default_main_program()
print(prog1.random_seed)
## 102
## the random seed is 102
"""
global global_prog_seed
global_prog_seed = seed
self._seed = global_prog_seed
@property
def _op_role(self):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`_op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return self._current_role
@_op_role.setter
def _op_role(self, role):
self._current_role = role
@property
def _op_role_var(self):
"""
The auxiliary variables for :code:`_op_role` property.
See Also: :code:`Program._op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return self.__op_role_var
@signature_safe_contextmanager
def _backward_role_guard(self):
tmp_role = self._current_role
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Backward
try:
yield
finally:
self._current_role = tmp_role
@signature_safe_contextmanager
def _optimized_guard(self, param_and_grads):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
param_and_grads(list): The variables (names) to be optimized.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Optimize
self.__op_role_var = [
var.name if isinstance(var, Variable) else var
for var in param_and_grads
]
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
@signature_safe_contextmanager
def _lr_schedule_guard(self, is_with_opt=False):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
:code:`OpRoleVar` automatically. The :code:`OpRoleVar` is
set to the target learning rate.
Notes: This is a very low level API. Users should not use it directly.
Args:
is_with_opt: Only set to true if these ops a in the middle
of a bunch of optimize ops so that it can be treated
correctly. For example, sgd->lr_op->sgd->lr_op->sgd.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program.lr_schedule_guard():
>>> lr = lr * decay
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched
if is_with_opt:
self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)
# TODO(typhoonzero): how to set target learning rate var
self.__op_role_var = []
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
def __str__(self):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Program.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Program string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_program._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
program_str = ""
for block in self.blocks:
program_str += block._to_readable_code(skip_op_callstack)
program_str += '\n'
return program_str
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error (bool): raise Value error when any of required fields is not set.
with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.
Returns:
str: The debug string describe current Program.
Raises:
ValueError: If any of required fields is not set and throw_on_error is True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False)
pred = fluid.layers.fc(x, size=3)
prog_string = prog.to_string(throw_on_error=True, with_details=False)
prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)
print("program string without detail: {}".format(prog_string))
print("program string with detail: {}".format(prog_string_with_details))
"""
assert isinstance(
throw_on_error, bool
), "The type of throw_on_error parameter is wrong, expected bool, but received {}.".format(
type(throw_on_error))
assert isinstance(
with_details, bool
), "The type of with_details parameter is wrong, expected bool, but received {}.".format(
type(with_details))
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def _get_desc(self):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return self.desc
def _version(self):
return self.desc._version()
def clone(self, for_test=False):
"""
**Notes**:
**1.** :code:`Program.clone()` **method DOES NOT clone** :ref:`api_fluid_io_DataLoader` .
**2. Recommend you to use** :code:`clone` **before using** :code:`Opimizer.minimize`.
**3. This API has no effect in Dygraph Mode**
Create a new Program with forward content of original one when ``for_test=True``.
Create a new Program as same as the original one when ``for_test=False``.
Some operators, e.g., :ref:`api_fluid_layers_batch_norm` , behave differently between
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when you want to clone the program for training.
* Set for_test to True when you want to clone the program for testing.
We will prune the backward and optimize part of the program when you
use :code:`clone` after :code:`Opimizer.minimize`, but we still
recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.
For Example:
::
import paddle.fluid as fluid
img = fluid.layers.data(name='image', shape=[784])
pred = fluid.layers.fc(input=img, size=10, act='relu')
loss = fluid.layers.mean(pred)
# Here we use clone before Momentum
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
optimizer.minimize(loss)
Args:
for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`
and prune the backward and optimize part of the program. The default value is :code:`False` .
Returns:
Program: A new Program with forward content of original one when ``for_test=True``. A new Program as same as the original one when ``for_test=False``
Examples:
**Notes: The Program's order maybe different after** :code:`clone` **and
this will not affect your training or testing progress. In the following
example we give you an simple method** :code:`print_prog(program)` **to
print Program Descs inorder to make sure you have same print result
after** :code:`clone`:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
1. To clone a test program, the sample code is:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
train_program = fluid.Program()
startup_program = fluid.Program()
# startup_program is used to do some parameter init work,
# and main program is used to hold the network
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
test_program = train_program.clone(for_test=True)
print_prog(test_program)
# Due to parameter sharing usage for train and test, so we need to use startup program of train
# instead of using test startup program, while nothing is in test's startup program
# In Paddle Fluid we will share weights by using the same Variable name. In train and test program
# all parameters will have the same name and this can make train and test program sharing parameters,
# that's why we need to use startup program of train. And for startup program of test, it has nothing,
# since it is a new program.
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
2. The clone method can be avoid if you create program for training and program for testing individually.
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
def network():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
return avg_loss
train_program_2 = fluid.Program()
startup_program_2 = fluid.Program()
test_program_2 = fluid.Program()
with fluid.program_guard(train_program_2, startup_program_2):
with fluid.unique_name.guard():
avg_loss = network()
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
# the test startup program is not used.
with fluid.program_guard(test_program_2, startup_program_2):
with fluid.unique_name.guard():
avg_loss = network()
print_prog(test_program_2)
The two code snippets above will generate and print same programs.
"""
#NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
pruned_origin_block_id_map = None
if for_test:
forward_prog = Program()
forward_prog.desc, pruned_origin_block_id_map = core.prune_backward(
self.desc)
forward_prog.blocks = [
Block(forward_prog, i)
for i in six.moves.range(forward_prog.desc.num_blocks())
]
forward_prog._sync_with_cpp()
p = forward_prog._inference_optimize(prune_read_op=False)
else:
p = Program()
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p._current_role = self._current_role
p.__op_role_var = self.__op_role_var
p._appending_grad_times = self._appending_grad_times
#NOTE(zhiqiu): we sync the cloned program, to update its program by
# its desc.
p._sync_with_cpp()
p._copy_param_info_from(self)
p._copy_data_info_from(self, pruned_origin_block_id_map)
p._copy_dist_param_info_from(self)
return p
def _prune(self, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
return self._prune_with_input([], targets)
def _prune_with_input(self, feeded_var_names, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`. Prune operators and variables which are needed
to generate feeded_var
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
feeded_var_names(list|str): A list of variable names from where
pruning start. If it is set as [], this API works just like _prune()
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
#NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
if not isinstance(feeded_var_names, list):
feeded_var_names = [feeded_var_names]
if not isinstance(targets, list):
targets = [targets]
for var in feeded_var_names:
if not isinstance(var, six.string_types):
raise ValueError(
"All feeded_var_names of Program._prune_with_input() can only be "
"str, but received %s." % type(var))
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
name = t.name
elif isinstance(t, six.string_types):
name = str(t)
else:
raise ValueError(
"All targets of Program._prune_with_input() can only be "
"Variable or Operator, but received %s." % type(t))
# NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases:
# (1) the variable is leaf, it has no op that generates it;
# (2) the variable is not leaf, and we need to prune the op that generates it.
# In both cases, wo can just skip target_op of that it.
if name in feeded_var_names:
continue
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
target_op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if name in op.output_arg_names:
# NOTE(zhiqiu): Find op that generate target name.
# Skip optimize op except for optimize op in targets,
# since optimize op generates parameters.
if op._is_optimize_op() and op not in targets:
continue
else:
target_op = op
break
if target_op is None:
raise ValueError(
"The target variable used for pruning should have an "
"associated operator that generates it.")
else:
targets_idx.append([target_op.block.idx, target_op.idx])
else:
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc, pruned_origin_block_id_map = core.prune(self.desc,
set(feeded_var_names),
targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
res._copy_param_info_from(self)
res._copy_data_info_from(self, pruned_origin_block_id_map)
res._copy_dist_param_info_from(self)
return res
def _inference_optimize(self, prune_read_op=True):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
2. Remove the :code:`read_op` if exists.
3. change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Args:
prune_read_op(bool): remove the read ops that are added by py_reader
for cpp inference library
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
res = Program()
res.desc = core.ProgramDesc(self.desc)
# remove all readers and the read_op if exist
read_op_idx = 0
root_block = res.desc.block(0)
if prune_read_op:
while True:
if read_op_idx >= root_block.op_size() or root_block.op(
read_op_idx).type() == 'read':
break
read_op_idx += 1
if read_op_idx < root_block.op_size():
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
"""
**Notes**:
**1. All information about parameters will be lost after serialization**
**2. This API has no effect in Dygraph mode**
Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.
This method always use to save and load model
Args:
binary_str_type (str): the binary prootbuf string.
Returns:
Program: A deserialized Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
x = fluid.layers.data(
name='X', shape=[1000, 784], dtype='float32', append_batch_size=False)
y = fluid.layers.data(
name='Y', shape=[784, 100], dtype='float32', append_batch_size=False)
z = fluid.layers.mul(x=x, y=y)
binary_str = fluid.default_main_program().desc.serialize_to_string()
prog_restored = fluid.default_main_program().parse_from_string(binary_str)
print(fluid.default_main_program())
print(prog_restored)
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@staticmethod
def _construct_from_desc(desc):
"""
Construct a program from program desc.
Args:
desc(core.ProgramDesc): The program desc for constructing.
Returns:
Program: A program.
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@property
def random_seed(self):
"""
The default random seed for random operators in Program. ``0`` means get
the random seed from random device.
**Notes: It must be set before the operators have been added.**
Returns:
int64: Random seed in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
random_seed = prog.random_seed
x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False)
print(random_seed)
## 0
## the default random seed is 0
# Here we need to set random seed before we use fluid.layers.dropout
prog.random_seed = 1
z_var = fluid.layers.dropout(x_var, 0.7)
print(prog.random_seed)
## 1
## the random seed is change to 1
"""
return self._seed
@property
def num_blocks(self):
"""
The number of :ref:`api_guide_Block_en` in this Program.
**Notes: This API has no effect in Dygraph mode**
Returns:
int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
num_blocks = prog.num_blocks
print(num_blocks)
"""
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError(
"Program.random_seed's input seed must be an integer, but received %s."
% type(seed))
self._seed = seed
def __repr__(self):
return self.__str__()
def global_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the first :ref:`api_guide_Block_en` of this Program.
Returns:
:ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
gb_block = prog.global_block()
print(gb_block)
"""
return self.blocks[0]
def block(self, index):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the :code:`index` :ref:`api_guide_Block_en` of this Program
Args:
index (int) - The index of :ref:`api_guide_Block_en` to get
Returns:
:ref:`api_guide_Block_en`: The :code:`index` block
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
block_0 = prog.block(0)
print(block_0)
"""
return self.blocks[index]
def current_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`
is the :ref:`api_guide_Block_en` to append operators.
Returns:
:ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
current_blk = prog.current_block()
print(current_blk)
"""
return self.blocks[self.current_block_idx]
def _create_block(self, parent_idx=None):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def _rollback(self):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self.current_block_idx = self.current_block().parent_idx
def _sync_with_cpp(self):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block._sync_with_cpp()
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self.global_block()._copy_param_info_from(other.global_block())
def _copy_dist_param_info_from(self, other):
"""
Copy the information of distributed information from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self._is_distributed = other._is_distributed
self._is_chief = other._is_chief
self._parameters_on_pservers = other._parameters_on_pservers
self._endpoints = other._endpoints
self._ps_endpoint = other._ps_endpoint
self._distributed_lookup_table = other._distributed_lookup_table
def _copy_data_info_from(self, other, pruned_origin_block_id_map=None):
"""
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
pruned_origin_block_id_map(dict{int:int}): A dict which maps the block id in program
self to the block id in program other. For example, {0:0, 1:1, 2:3} means block 0 in self is
cloned from block 0 in other, etc. Default is None, which means default mapped,
{0:0, 1:1,..., n:n}.
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
if not pruned_origin_block_id_map:
pruned_origin_block_id_map = {
i: i
for i in six.moves.range(self.desc.num_blocks())
}
# NOTE(zhiqiu): All vars in cloned program exist in original program.
# The reverse is not true, due to backward pruning.
for i, block in enumerate(self.blocks):
other_block = other.blocks[pruned_origin_block_id_map[i]]
for var in list(block.vars.values()):
other_var = other_block.var(var.name)
if other_var.is_data:
var.is_data = True
if other_var.desc.need_check_feed():
var.desc.set_need_check_feed(True)
if other_var.stop_gradient:
var.stop_gradient = True
def list_vars(self):
"""
Get all :ref:`api_guide_Variable_en` from this Program. A iterable object is returned.
Returns:
iterable :ref:`api_guide_Variable_en`: The Generator will yield every variable in this program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32')
label = fluid.layers.data(name='label', shape=[128,1], dtype='int64')
for var in prog.list_vars():
print(var)
"""
for each_block in self.blocks:
for each_var in list(each_block.vars.values()):
yield each_var
def all_parameters(self):
"""
Get all :ref:`api_guide_parameter_en` from this Program. A list object is returned.
Returns:
list[ :ref:`api_guide_parameter_en` ]: The list contians all parameters in this program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
program = fluid.default_main_program()
data = fluid.data(name='x', shape=[None, 13], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
for param in program.all_parameters():
print(param)
# Here will print all parameters in current program, in this example,
# the result is like:
#
# name: "fc_0.w_0"
# type {
# type: LOD_TENSOR
# lod_tensor {
# tensor {
# data_type: FP32
# dims: 13
# dims: 10
# }
# }
# }
# persistable: true
#
# name: "fc_0.b_0"
# type {
# type: LOD_TENSOR
# lod_tensor {
# tensor {
# data_type: FP32
# dims: 10
# }
# }
# }
# persistable: true
#
# Here print(param) will print out all the properties of a parameter,
# including name, type and persistable, you can access to specific
# property of a parameter, such as param.name, param.type
"""
parameters = []
for each_block in self.blocks:
parameters.extend(each_block.all_parameters())
return parameters
@six.add_metaclass(ParameterMetaClass)
class Parameter(Variable):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its parameters.
Relative to a general Variable, a Parameter has several its own
member variables:
Args:
trainable(bool): True if the parameter need to be updated after
iterations.
optimize_attr(map): Parameter attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the parameter. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this parameter.
"""
def __init__(self,
block,
shape,
dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
**kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
Variable.__init__(
self,
block,
persistable=True,
shape=shape,
dtype=dtype,
type=type,
**kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.is_distributed = False
def __str__(self):
return self._to_readable_code()
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
class ParamBase(core.VarBase):
"""
ParamBase is derived from VarBase( Which is the Variable in Dygraph Mode ). A ParamBase is a persistable
VarBase, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its ParamBase.
Relative to a general Variable, a ParamBase has several its own
member variables:
Args:
trainable(bool): True if the ParamBase need to be updated after
iterations.
optimize_attr(map): ParamBase attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the ParamBase. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this ParamBase.
"""
@dygraph_only
def __init__(self, shape, dtype, **kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
name = kwargs.get('name', unique_name.generate('_param_base'))
super(ParamBase, self).__init__(dtype
if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name,
core.VarDesc.VarType.LOD_TENSOR, True)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.is_distributed = False
# self.block = default_main_program().global_block()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
tensor = self.value().get_tensor()
if tensor._is_initialized():
return 'Parameter: %s\n%s' % (self.name, str(tensor))
else:
return 'Parameter: %s, not initialized' % (self.name)
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The layer function in :ref:`api_fluid_layers` will create parameters, :ref:`api_paddle_data_reader_reader` ,
`NCCL <https://developer.nvidia.com/nccl>`_ handles as global variables. The :code:`startup_program` will
initialize them by the OPs in startup :ref:`api_fluid_Program` . The :ref:`api_fluid_layers` function will
append these initialization operators into startup program.
This method will return the :code:`default` or the :code:`current` startup
program. Users can use :ref:`api_fluid_program_guard` to switch :ref:`api_fluid_Program` .
Returns: current default startup :ref:`api_fluid_Program`
Returns type: :ref:`api_fluid_Program`
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(fluid.default_main_program()))
print("start up program is: {}".format(fluid.default_startup_program()))
"""
return _startup_program_
def default_main_program():
"""
This API can be used to get ``default main program`` which store the
descriptions of ``op`` and ``variable``.
For example ``z = fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
``op`` and a new ``z`` ``variable``, and they will be recorded in ``default main program``
The ``default_main_program`` is the default value for ``Program`` parameter in
a lot of ``fluid`` APIs. For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
If you want to replace the ``default main program``, you can use :ref:`api_fluid_program_guard`
Returns:
:ref:`api_fluid_Program`: a ``Program`` which holding the descriptions of ops and variables in the network.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Sample Network:
data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=50, act='relu')
fc2 = fluid.layers.fc(fc1, size=102, act='softmax')
loss = fluid.layers.cross_entropy(input=fc2, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
#print the number of blocks in the program, 1 in this case
print(fluid.default_main_program().num_blocks)
#print the description of variable 'image'
print(fluid.default_main_program().blocks[0].var('image'))
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
:api_attr: Static Graph
Change the global main program and startup program with `"with"` statement.
Layer functions in the Python `"with"` block will append operators and
variables to the new main programs.
Args:
main_program(Program): New main program inside `"with"` statement.
startup_program(Program, optional): New startup program inside `"with"`
statement. :code:`None` means not changing startup program,
default_startup_program is still used.
Default: None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10, act='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
# does not care about startup program. Just pass a temporary value.
with fluid.program_guard(main_program, fluid.Program()):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
"""
from .data_feeder import check_type
check_type(main_program, 'main_program', Program, 'fluid.program_guard')
main_program = switch_main_program(main_program)
if startup_program is not None:
check_type(startup_program, 'startup_program', Program,
'fluid.program_guard')
startup_program = switch_startup_program(startup_program)
try:
yield
finally:
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def _get_var(name, program=None):
"""
Get a variable by name from the global block of a program.
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
@signature_safe_contextmanager
def _dygraph_guard(tracer):
global _dygraph_tracer_
tmp_trace = _dygraph_tracer_
_dygraph_tracer_ = tracer
core._switch_tracer(tracer)
try:
yield
finally:
core._switch_tracer(tmp_trace)
_dygraph_tracer_ = tmp_trace
@signature_safe_contextmanager
def _dygraph_place_guard(place):
global _dygraph_current_expected_place_
tmp_place = _dygraph_current_expected_place_
_dygraph_current_expected_place_ = place
try:
yield
finally:
_dygraph_current_expected_place_ = tmp_place
def load_op_library(lib_filename):
"""
:api_attr: Static Graph
Load a dynamic library, including custom operators and kernels.
When library is loaded, ops and kernels registered in the library
will be available in PaddlePaddle main process.
Please note, the type of custom operators can't have the same type
with the existing operators in the framework.
Args:
lib_filename (str): name of dynamic library.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#fluid.load_op_library('custom_op.so')
"""
core.load_op_library(lib_filename)
OpProtoHolder.instance().update_op_proto()
def switch_device(device):
global _current_device
pre_device = _current_device
_current_device = device
return pre_device
@signature_safe_contextmanager
def device_guard(device=None):
"""
**Notes**:
**The API only supports static mode.**
A context manager that specifies the device on which the OP will be placed.
Args:
device(str|None): Specify the device to use in the context. It should be 'cpu' or 'gpu',
When it is set to 'cpu' or 'gpu', all OPs created in the context will be
placed on CPUPlace or CUDAPlace. When 'gpu' is set and the program runs on
single-card, the device index will be the same as the device on which the
executor runs. Default: None, OPs in this context will be automatically
assigned devices.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
place = fluid.CPUPlace()
if support_gpu:
place = fluid.CUDAPlace(0)
# if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)
data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32')
data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32')
shape = fluid.layers.shape(data2)
with fluid.device_guard("cpu"):
# Ops created here will be placed on CPUPlace
shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4])
with fluid.device_guard('gpu'):
# if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace
out = fluid.layers.crop_tensor(data1, shape=shape)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
result = exe.run(fetch_list=[out])
"""
index = None
if device and ':' in device:
device, index = device.split(':')
if device == 'cpu':
raise ValueError("Should not set device id for cpu.")
if device not in ['cpu', 'gpu', '', None]:
raise ValueError(
"The Attr(device) should be 'cpu' or 'gpu', and it can also be empty string or None "
"when there is no need to specify device. But received %s" % device)
if index:
device = ":".join([device, index])
pre_device = switch_device(device)
try:
yield
finally:
switch_device(pre_device)
def set_flags(flags):
"""
This function sets the GFlags value in Paddle.
Args:
flags (dict): A dict contains flags and its value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})
"""
if not isinstance(flags, dict):
raise TypeError('flags in set_flags should be a dict')
for key, value in flags.items():
if core.globals().is_public(key):
core.globals()[key] = value
else:
raise ValueError(
"Flag %s cannot set its value through this function." % (key))
def get_flags(flags):
"""
This function gets the GFlags value in Paddle.
Args:
flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.
Returns:
flag's value in Paddle.
Examples:
.. code-block:: python
import paddle.fluid as fluid
flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
res = fluid.get_flags(flags)
print(res)
# {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}
"""
flags_value = {}
if isinstance(flags, (list, tuple)):
for key in flags:
if (core.globals().is_public(key)):
value = core.globals()[key]
temp = {key: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' %
(key))
elif isinstance(flags, str):
if (core.globals().is_public(flags)):
value = core.globals()[flags]
temp = {flags: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' % (flags))
else:
raise TypeError('Flags in get_flags should be a list, tuple or string.')
return flags_value
| 35.034445
| 337
| 0.562847
|
8df8574ab78d51967aead8675e625cfdfeb9df8a
| 389
|
py
|
Python
|
models/parameters.py
|
mourtadg7/goodwin-keen-model
|
911913fb0b7389b27a1efeb86ce4336551357690
|
[
"MIT"
] | null | null | null |
models/parameters.py
|
mourtadg7/goodwin-keen-model
|
911913fb0b7389b27a1efeb86ce4336551357690
|
[
"MIT"
] | 1
|
2021-09-12T18:10:53.000Z
|
2021-09-12T18:14:17.000Z
|
models/parameters.py
|
mourtadg7/goodwin-keen-model
|
911913fb0b7389b27a1efeb86ce4336551357690
|
[
"MIT"
] | 2
|
2021-09-12T15:56:01.000Z
|
2022-01-12T18:58:21.000Z
|
alpha = 0.025 # Technological growth rate
beta = 0.02 # Population growth rate
delta = 0.01 # Deprecation rate
# Phillips Curve Parameters from Keen (1995)
phi0 = 0.04 / (1 - 0.04 ** 2)
phi1 = 0.04 ** 3 / (1 - 0.04 ** 2)
# Investment Rate Parameters from Grasselli (2012)
kappa0 = -0.0065
kappa1 = math.exp(-5)
kappa2 = 20
r = 0.03 # Real interest rate
v = 3 # Capital to output ratio
| 32.416667
| 50
| 0.663239
|
8d5342561beb95e5aeb45b6586ad1f440fa28b9e
| 1,196
|
py
|
Python
|
bitwise/gate/IMPLY.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
bitwise/gate/IMPLY.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
bitwise/gate/IMPLY.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
"""
The following classes are defined:
IMPLYGate2
"""
from .. import wire
from . import OR
from . import NOT
Wire = wire.Wire
class IMPLYGate:
"""Construct a new IMPLY gate.
Args:
input_1: An object of type Wire. The first input to the IMPLY gate.
input_2: An object of type Wire. The second input to the IMPLY gate.
output: An object of type Wire. The output of the IMPLY gate.
"""
def __init__(self, input_1, input_2, output):
wire_1 = Wire()
NOT.NOTGate(input_1, wire_1)
OR.ORGate2(wire_1, input_2, output)
self.input_1 = input_1
self.input_2 = input_2
self.output = output
def __str__(self):
str_ = ""
str_ += "input_1: " + str(self.input_1.value) + "\n"
str_ += "input_2: " + str(self.input_2.value) + "\n"
str_ += "output: " + str(self.output.value)
return str_
def __call__(self, *, input_1=None, input_2=None, output=None):
if input_1 is not None:
self.input_1.value = input_1
if input_2 is not None:
self.input_2.value = input_2
if output is not None:
self.output.value = output
| 27.181818
| 76
| 0.597826
|
b7a9d36441ef4f50311924cb9b25ddbfc8d0e477
| 12,899
|
py
|
Python
|
federatedml/nn/homo_nn/enter_point.py
|
yzjba/FATE
|
9a6d252da637b2583a0f8a51f6cb4c615850bab9
|
[
"Apache-2.0"
] | 1
|
2021-05-31T16:39:30.000Z
|
2021-05-31T16:39:30.000Z
|
federatedml/nn/homo_nn/enter_point.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 9
|
2020-11-13T18:59:35.000Z
|
2022-02-10T02:13:58.000Z
|
federatedml/nn/homo_nn/enter_point.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api import session
from arch.api.utils.log_utils import LoggerFactory
from fate_flow.entity.metric import MetricType, MetricMeta, Metric
from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged
from federatedml.framework.homo.blocks.base import HomoTransferBase
from federatedml.framework.homo.blocks.has_converged import HasConvergedTransVar
from federatedml.framework.homo.blocks.loss_scatter import LossScatterTransVar
from federatedml.framework.homo.blocks.secure_aggregator import SecureAggregatorTransVar
from federatedml.model_base import ModelBase
from federatedml.nn.homo_nn import nn_model
from federatedml.nn.homo_nn.nn_model import restore_nn_model
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.homo_nn_param import HomoNNParam
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
Logger = LoggerFactory.get_logger()
MODEL_META_NAME = "HomoNNModelMeta"
MODEL_PARAM_NAME = "HomoNNModelParam"
def _build_model_dict(meta, param):
return {MODEL_META_NAME: meta, MODEL_PARAM_NAME: param}
def _extract_param(model_dict: dict):
return model_dict.get(MODEL_PARAM_NAME, None)
def _extract_meta(model_dict: dict):
return model_dict.get(MODEL_META_NAME, None)
class HomoNNBase(ModelBase):
def __init__(self, trans_var):
super().__init__()
self.model_param = HomoNNParam()
self.aggregate_iteration_num = 0
self.transfer_variable = trans_var
def _suffix(self):
return self.aggregate_iteration_num,
def _init_model(self, param: HomoNNParam):
self.param = param
self.enable_secure_aggregate = param.secure_aggregate
self.max_aggregate_iteration_num = param.max_iter
class HomoNNServer(HomoNNBase):
def __init__(self, trans_var):
super().__init__(trans_var=trans_var)
self.model = None
self.aggregator = secure_mean_aggregator.Server(self.transfer_variable.secure_aggregator_trans_var)
self.loss_scatter = loss_scatter.Server(self.transfer_variable.loss_scatter_trans_var)
self.has_converged = has_converged.Server(self.transfer_variable.has_converged_trans_var)
def _init_model(self, param: HomoNNParam):
super()._init_model(param=param)
early_stop = self.model_param.early_stop
self.converge_func = converge_func_factory(early_stop.converge_func, early_stop.eps).is_converge
self.loss_consumed = early_stop.converge_func != "weight_diff"
def callback_loss(self, iter_num, loss):
metric_meta = MetricMeta(name='train',
metric_type="LOSS",
extra_metas={
"unit_name": "iters",
})
self.callback_meta(metric_name='loss', metric_namespace='train', metric_meta=metric_meta)
self.callback_metric(metric_name='loss',
metric_namespace='train',
metric_data=[Metric(iter_num, loss)])
def _is_converged(self):
loss = self.loss_scatter.weighted_loss_mean(suffix=self._suffix())
Logger.info(f"loss at iter {self.aggregate_iteration_num}: {loss}")
self.callback_loss(self.aggregate_iteration_num, loss)
if self.loss_consumed:
is_converged = self.converge_func(loss)
else:
is_converged = self.converge_func(self.model)
self.has_converged.remote_converge_status(is_converge=is_converged, suffix=self._suffix())
return is_converged
def fit(self, data_inst):
while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
self.model = self.aggregator.weighted_mean_model(suffix=self._suffix())
self.aggregator.send_aggregated_model(model=self.model, suffix=self._suffix())
if self._is_converged():
Logger.info(f"early stop at iter {self.aggregate_iteration_num}")
break
self.aggregate_iteration_num += 1
else:
Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")
def save_model(self):
return self.model
class HomoNNClient(HomoNNBase):
def __init__(self, trans_var):
super().__init__(trans_var=trans_var)
self.aggregator = secure_mean_aggregator.Client(self.transfer_variable.secure_aggregator_trans_var)
self.loss_scatter = loss_scatter.Client(self.transfer_variable.loss_scatter_trans_var)
self.has_converged = has_converged.Client(self.transfer_variable.has_converged_trans_var)
self.nn_model = None
def _init_model(self, param: HomoNNParam):
super()._init_model(param=param)
self.batch_size = param.batch_size
self.aggregate_every_n_epoch = param.aggregate_every_n_epoch
self.nn_define = param.nn_define
self.config_type = param.config_type
self.optimizer = param.optimizer
self.loss = param.loss
self.metrics = param.metrics
self.encode_label = param.encode_label
self.data_converter = nn_model.get_data_converter(self.config_type)
self.model_builder = nn_model.get_nn_builder(config_type=self.config_type)
def _is_converged(self, data, epoch_degree):
metrics = self.nn_model.evaluate(data)
Logger.info(f"metrics at iter {self.aggregate_iteration_num}: {metrics}")
loss = metrics["loss"]
self.loss_scatter.send_loss(loss=(loss, epoch_degree), suffix=self._suffix())
is_converged = self.has_converged.get_converge_status(suffix=self._suffix())
return is_converged
def __build_nn_model(self, input_shape):
self.nn_model = self.model_builder(input_shape=input_shape,
nn_define=self.nn_define,
optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics)
def __build_pytorch_model(self, nn_define):
self.nn_model = self.model_builder(nn_define=nn_define,
optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics)
def fit(self, data_inst, *args):
data = self.data_converter.convert(data_inst, batch_size=self.batch_size, encode_label=self.encode_label)
if self.config_type == "pytorch":
self.__build_pytorch_model(self.nn_define)
else:
self.__build_nn_model(data.get_shape()[0])
epoch_degree = float(len(data)) * self.aggregate_every_n_epoch
while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
Logger.info(f"start {self.aggregate_iteration_num}_th aggregation")
# train
self.nn_model.train(data, aggregate_every_n_epoch=self.aggregate_every_n_epoch)
# send model for aggregate, then set aggregated model to local
self.aggregator.send_weighted_model(weighted_model=self.nn_model.get_model_weights(),
weight=epoch_degree * self.aggregate_every_n_epoch,
suffix=self._suffix())
weights = self.aggregator.get_aggregated_model(suffix=self._suffix())
self.nn_model.set_model_weights(weights=weights)
# calc loss and check convergence
if self._is_converged(data, epoch_degree):
Logger.info(f"early stop at iter {self.aggregate_iteration_num}")
break
Logger.info(f"role {self.role} finish {self.aggregate_iteration_num}_th aggregation")
self.aggregate_iteration_num += 1
else:
Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")
def export_model(self):
return _build_model_dict(meta=self._get_meta(), param=self._get_param())
def _get_meta(self):
from federatedml.protobuf.generated import nn_model_meta_pb2
meta_pb = nn_model_meta_pb2.NNModelMeta()
meta_pb.params.CopyFrom(self.model_param.generate_pb())
meta_pb.aggregate_iter = self.aggregate_iteration_num
return meta_pb
def _get_param(self):
from federatedml.protobuf.generated import nn_model_param_pb2
param_pb = nn_model_param_pb2.NNModelParam()
param_pb.saved_model_bytes = self.nn_model.export_model()
return param_pb
@assert_io_num_rows_equal
def predict(self, data_inst):
data = self.data_converter.convert(data_inst, batch_size=self.batch_size, encode_label=self.encode_label)
predict = self.nn_model.predict(data)
num_output_units = predict.shape[1]
threshold = self.param.predict_param.threshold
if num_output_units == 1:
kv = [(x[0], (0 if x[1][0] <= threshold else 1, x[1][0].item())) for x in zip(data.get_keys(), predict)]
pred_tbl = session.parallelize(kv, include_key=True, partition=data_inst.get_partitions())
return data_inst.join(pred_tbl,
lambda d, pred: [d.label, pred[0], pred[1], {"0": 1 - pred[1], "1": pred[1]}])
else:
kv = [(x[0], (x[1].argmax(), [float(e) for e in x[1]])) for x in zip(data.get_keys(), predict)]
pred_tbl = session.parallelize(kv, include_key=True, partition=data_inst.get_partitions())
return data_inst.join(pred_tbl,
lambda d, pred: [d.label, pred[0].item(),
pred[1][pred[0]],
{str(v): pred[1][v] for v in range(len(pred[1]))}])
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
model_obj = _extract_param(model_dict)
meta_obj = _extract_meta(model_dict)
self.model_param.restore_from_pb(meta_obj.params)
self._init_model(self.model_param)
self.aggregate_iteration_num = meta_obj.aggregate_iter
self.nn_model = restore_nn_model(self.config_type, model_obj.saved_model_bytes)
# server: Arbiter, clients: Guest and Hosts
class HomoNNDefaultTransVar(HomoTransferBase):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
self.secure_aggregator_trans_var = SecureAggregatorTransVar(server=server, clients=clients, prefix=self.prefix)
self.loss_scatter_trans_var = LossScatterTransVar(server=server, clients=clients, prefix=self.prefix)
self.has_converged_trans_var = HasConvergedTransVar(server=server, clients=clients, prefix=self.prefix)
class HomoNNDefaultClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNDefaultTransVar())
class HomoNNDefaultServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNDefaultTransVar())
# server: Arbiter, clients: Guest and Hosts
class HomoNNGuestServerTransVar(HomoNNDefaultTransVar):
def __init__(self, server=(consts.GUEST,), clients=(consts.HOST,), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
class HomoNNGuestServerClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNGuestServerTransVar())
class HomoNNGuestServerServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNGuestServerTransVar())
# server: Arbiter, clients: Hosts
class HomoNNArbiterSubmitTransVar(HomoNNDefaultTransVar):
def __init__(self, server=(consts.ARBITER,), clients=(consts.HOST,), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
class HomoNNArbiterSubmitClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNArbiterSubmitTransVar())
class HomoNNArbiterSubmitServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNArbiterSubmitTransVar())
| 43.577703
| 119
| 0.685324
|
ad28fdea55418dbeba1fb767c070be73ace50e1f
| 1,675
|
py
|
Python
|
tests/tools.py
|
altvod/basic-notion
|
38da5cc6912d73fad530d37f1608b49d90c2034e
|
[
"MIT"
] | null | null | null |
tests/tools.py
|
altvod/basic-notion
|
38da5cc6912d73fad530d37f1608b49d90c2034e
|
[
"MIT"
] | null | null | null |
tests/tools.py
|
altvod/basic-notion
|
38da5cc6912d73fad530d37f1608b49d90c2034e
|
[
"MIT"
] | null | null | null |
import uuid
from typing import Type
from notion_client import Client
from basic_notion.database import NotionDatabase
from basic_notion.page import NotionPage, NotionPageList
from basic_notion.query import Query
def create_database_from_model(
sync_client: Client,
model: Type[NotionPage],
title: str,
parent_page_id: str,
) -> NotionDatabase:
database = NotionDatabase.make(
title=[title],
parent={'page_id': parent_page_id},
properties=model.schema,
)
data = sync_client.databases.create(**database.data)
created_database = NotionDatabase(data=data)
return created_database
_DB_REGISTRY: dict[Type[NotionPage], NotionDatabase] = {}
def get_database_from_model(
sync_client: Client,
model: Type[NotionPage],
parent_page_id: str,
) -> NotionDatabase:
if model in _DB_REGISTRY:
return _DB_REGISTRY[model]
title = f'Test Database {str(uuid.uuid4())}'
database = create_database_from_model(
sync_client=sync_client, model=model, title=title,
parent_page_id=parent_page_id,
)
return database
def get_id_list(sync_client: Client, list_model: Type[NotionPageList], database_id: str) -> list[str]:
# Get the model class of the item
model = list_model.item
# Get any property (to sort the query, which is required)
any_prop = next(iter(model.schema.properties.values()))
data = sync_client.databases.query(
**Query(database_id=database_id).sorts(
any_prop.sort.ascending
).serialize()
)
item_list = list_model(data=data)
return [item.id for item in item_list.items()]
| 28.87931
| 102
| 0.696716
|
a0869dc3cd73f48a8f261748811960cd6117f829
| 4,361
|
py
|
Python
|
sermons/views.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | null | null | null |
sermons/views.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | 44
|
2020-05-13T20:15:26.000Z
|
2022-03-04T02:58:58.000Z
|
sermons/views.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | 4
|
2020-06-05T17:59:52.000Z
|
2021-02-06T19:09:43.000Z
|
import logging
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from church_site.views import BaseListView, BaseDetailView, BaseCreateView, AdminListView, BaseUpdateView
from churches.models import Church
from sermons.forms import SermonCreateForm
from sermons.models import Sermon
from sermons.selectors import get_filtered_sermons, get_member_sermons
from speakers.models import Speaker
logger = logging.getLogger(__name__)
class SermonsListView(BaseListView):
page_title = 'Sermons - Mennoniten Gemeinde'
current_page = 'sermons'
model = Sermon
template_name = 'sermons/sermons-list.html'
context_object_name = 'sermons'
paginate_by = 18
def get_queryset(self):
return get_filtered_sermons(self.request.GET.get('church'), self.request.GET.get('speaker'))
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['current_church'] = self.request.GET.get('church') if self.request.GET.get('church') else None
context['current_speaker'] = int(self.request.GET.get('speaker')) if self.request.GET.get('speaker') else None
context['churches'] = Church.objects.all()
context['speakers'] = Speaker.objects.all()
context['page_filter'] = self.get_page_filter()
return context
def get_page_filter(self):
"""keeps the filter in get request when paginating"""
if self.request.GET.get('church') and self.request.GET.get('speaker'):
return f"church={self.request.GET.get('church')}&speaker={self.request.GET.get('speaker')}"
elif self.request.GET.get('church') and not self.request.GET.get('speaker'):
return f"church={self.request.GET.get('church')}"
elif self.request.GET.get('speaker') and not self.request.GET.get('church'):
return f"speaker={self.request.GET.get('speaker')}"
class SermonsDetailView(BaseDetailView):
current_page = 'sermons'
btn_back_href = reverse_lazy('sermons:sermons-list')
model = Sermon
template_name = 'sermons/sermons-detail.html'
context_object_name = 'sermon'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.get_page_title()
# Track views
try:
context['sermon'].views += 1
context['sermon'].save()
except Exception as e:
logger.error(e)
return context
def get_page_title(self) -> str:
return f'{self.object.event.start.strftime("%b %d, %Y")} - {self.object.title}'
class SermonsAdminListView(PermissionRequiredMixin, AdminListView):
permission_required = 'sermons.view_sermon'
model = Sermon
ordering = ('-event',)
context_object_name = 'sermons'
template_name = 'sermons/sermons-admin-list.html'
page_title = 'Sermons - Admin'
current_page = 'manage'
btn_add_href = reverse_lazy('sermons:sermons-admin-create')
paginate_by = 25
def get_queryset(self):
return get_member_sermons(self.request.user, reverse_order=True)
class SermonsAdminCreateView(PermissionRequiredMixin, BaseCreateView):
permission_required = 'sermons.add_sermon'
model = Sermon
template_name = 'sermons/sermons-admin-form.html'
form_class = SermonCreateForm
success_url = reverse_lazy('sermons:sermons-admin-list')
page_title = 'New Sermon - Admin'
current_page = 'manage'
btn_back_href = reverse_lazy('sermons:sermons-admin-list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class SermonAdminUpdateView(PermissionRequiredMixin, BaseUpdateView):
permission_required = 'sermons.change_sermon'
model = Sermon
template_name = 'sermons/sermons-admin-form.html'
form_class = SermonCreateForm
success_url = reverse_lazy('sermons:sermons-admin-list')
page_title = 'Update Sermon - Admin'
current_page = 'manage'
btn_back_href = reverse_lazy('sermons:sermons-admin-list')
def get_queryset(self):
return get_member_sermons(self.request.user)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
| 37.921739
| 118
| 0.701903
|
fd4dfcca351d821e6e69588e41f9e0c5e34fc004
| 2,144
|
py
|
Python
|
tests/test_invoice.py
|
izumimatsuo/micro-billing
|
94d7acdf5515c6f0be26fe013ae9cd1936dc03af
|
[
"MIT"
] | null | null | null |
tests/test_invoice.py
|
izumimatsuo/micro-billing
|
94d7acdf5515c6f0be26fe013ae9cd1936dc03af
|
[
"MIT"
] | null | null | null |
tests/test_invoice.py
|
izumimatsuo/micro-billing
|
94d7acdf5515c6f0be26fe013ae9cd1936dc03af
|
[
"MIT"
] | null | null | null |
import pytest
def test_select_all(client):
res = client.get('/invoices')
assert res.status_code == 200
data = res.json()
assert 1 == len(data['invoices'])
def test_select_one(client):
res = client.get('/invoices/1')
assert res.status_code == 200
data = res.json()
assert data['period_start'].startswith('2021-01-01')
def test_not_found(client):
res = client.get('/invoices/5')
assert res.status_code == 404
def test_all_days(client):
res = client.get('/invoices/data')
assert res.status_code == 200
assert 4 == len(res.content.splitlines())
assert b'"name","amount","start_date"' in res.content
assert b'"Taro",980,"2021-01-01 00:00:00"' in res.content
assert b'"Jiro",1500,"2021-01-31 00:00:00"' in res.content
assert b'"Hanako",980,"2021-02-28 00:00:00"' in res.content
def test_first_day(client):
res = client.get('/invoices/data/20210401')
assert res.status_code == 200
assert 2 == len(res.content.splitlines())
assert b'"name","amount","start_date"' in res.content
assert b'"Taro",980,"2021-01-01 00:00:00"' in res.content
def test_last_day(client):
res = client.get('/invoices/data/20210430')
assert res.status_code == 200
assert 2 == len(res.content.splitlines())
assert b'"name","amount","start_date"' in res.content
assert b'"Jiro",1500,"2021-01-31 00:00:00"' in res.content
def test_leap_year(client):
res = client.get('/invoices/data/20240229')
assert res.status_code == 200
assert 2 == len(res.content.splitlines())
assert b'"name","amount","start_date"' in res.content
assert b'"Jiro",1500,"2021-01-31 00:00:00"' in res.content
def test_not_leap_year(client):
res = client.get('/invoices/data/20220228')
assert res.status_code == 200
assert 3 == len(res.content.splitlines())
assert b'"name","amount","start_date"' in res.content
assert b'"Jiro",1500,"2021-01-31 00:00:00"' in res.content
assert b'"Hanako",980,"2021-02-28 00:00:00"' in res.content
def test_bad_request(client):
res = client.get('/invoices/data/20210431')
assert res.status_code == 400
| 31.072464
| 63
| 0.66931
|
de55a24a2f6b125910827059fd99969c3e9cfc48
| 29,096
|
py
|
Python
|
kornia/geometry/conversions.py
|
aardvarkkrill/kornia
|
e36ca3d15883a1dbbb0e7413719c0965a4b63cee
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/geometry/conversions.py
|
aardvarkkrill/kornia
|
e36ca3d15883a1dbbb0e7413719c0965a4b63cee
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/geometry/conversions.py
|
aardvarkkrill/kornia
|
e36ca3d15883a1dbbb0e7413719c0965a4b63cee
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-15T03:22:24.000Z
|
2021-05-15T03:22:24.000Z
|
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.constants import pi
__all__ = [
# functional api
"rad2deg",
"deg2rad",
"pol2cart",
"cart2pol",
"convert_points_from_homogeneous",
"convert_points_to_homogeneous",
"convert_affinematrix_to_homography",
"convert_affinematrix_to_homography3d",
"angle_axis_to_rotation_matrix",
"angle_axis_to_quaternion",
"rotation_matrix_to_angle_axis",
"rotation_matrix_to_quaternion",
"quaternion_to_angle_axis",
"quaternion_to_rotation_matrix",
"quaternion_log_to_exp",
"quaternion_exp_to_log",
"denormalize_pixel_coordinates",
"normalize_pixel_coordinates",
"normalize_quaternion",
"denormalize_pixel_coordinates3d",
"normalize_pixel_coordinates3d",
]
def rad2deg(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from radians to degrees.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: Tensor with same shape as input.
Example:
>>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)
>>> output = rad2deg(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)
def deg2rad(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from degrees to radians.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = deg2rad(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.
def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Function that converts polar coordinates to cartesian coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> rho = torch.rand(1, 3, 3)
>>> phi = torch.rand(1, 3, 3)
>>> x, y = pol2cart(rho, phi)
"""
if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(rho), type(phi)))
x = rho * torch.cos(phi)
y = rho * torch.sin(phi)
return x, y
def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function that converts cartesian coordinates to polar coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
eps (float): To avoid division by zero. Default is 1e-8
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> x = torch.rand(1, 3, 3)
>>> y = torch.rand(1, 3, 3)
>>> rho, phi = cart2pol(x, y)
"""
if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(x), type(y)))
rho = torch.sqrt(x**2 + y**2 + eps)
phi = torch.atan2(y, x)
return rho, phi
def convert_points_from_homogeneous(
points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Function that converts points from homogeneous to Euclidean space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_from_homogeneous(input) # BxNx2
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
# we check for points at infinity
z_vec: torch.Tensor = points[..., -1:]
# set the results of division by zeror/near-zero to 1.0
# follow the convention of opencv:
# https://github.com/opencv/opencv/pull/14411/files
mask: torch.Tensor = torch.abs(z_vec) > eps
scale = torch.where(mask, 1. / (z_vec + eps), torch.ones_like(z_vec))
return scale * points[..., :-1]
def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:
r"""Function that converts points from Euclidean to homogeneous space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_to_homogeneous(input) # BxNx4
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
return torch.nn.functional.pad(points, [0, 1], "constant", 1.0)
def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor:
H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.)
H[..., -1, -1] += 1.0
return H
def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].
Examples::
>>> input = torch.rand(2, 2, 3) # Bx2x3
>>> output = convert_affinematrix_to_homography(input) # Bx3x3
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)):
raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].
Examples::
>>> input = torch.rand(2, 3, 4) # Bx3x4
>>> output = convert_affinematrix_to_homography3d(input) # Bx4x4
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)):
raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3
"""
if not isinstance(angle_axis, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input size must be a (*, 3) tensor. Got {}".format(
angle_axis.shape))
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx3x3
def rotation_matrix_to_angle_axis(
rotation_matrix: torch.Tensor) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix (torch.Tensor): rotation matrix.
Returns:
torch.Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
def rotation_matrix_to_quaternion(
rotation_matrix: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (x, y, z, w) format.
Args:
rotation_matrix (torch.Tensor): the rotation matrix to convert.
eps (float): small value to avoid zero division. Default: 1e-8.
Return:
torch.Tensor: the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input) # Nx4
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
def safe_zero_division(numerator: torch.Tensor,
denominator: torch.Tensor) -> torch.Tensor:
eps: float = torch.finfo(numerator.dtype).tiny # type: ignore
return numerator / torch.clamp(denominator, min=eps)
rotation_matrix_vec: torch.Tensor = rotation_matrix.view(
*rotation_matrix.shape[:-2], 9)
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(
rotation_matrix_vec, chunks=9, dim=-1)
trace: torch.Tensor = m00 + m11 + m22
def trace_positive_cond():
sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_zero_division(m21 - m12, sq)
qy = safe_zero_division(m02 - m20, sq)
qz = safe_zero_division(m10 - m01, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_1():
sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx.
qw = safe_zero_division(m21 - m12, sq)
qx = 0.25 * sq
qy = safe_zero_division(m01 + m10, sq)
qz = safe_zero_division(m02 + m20, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_2():
sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy.
qw = safe_zero_division(m02 - m20, sq)
qx = safe_zero_division(m01 + m10, sq)
qy = 0.25 * sq
qz = safe_zero_division(m12 + m21, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_3():
sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz.
qw = safe_zero_division(m10 - m01, sq)
qx = safe_zero_division(m02 + m20, sq)
qy = safe_zero_division(m12 + m21, sq)
qz = 0.25 * sq
return torch.cat([qx, qy, qz, qw], dim=-1)
where_2 = torch.where(m11 > m22, cond_2(), cond_3())
where_1 = torch.where(
(m00 > m11) & (m00 > m22), cond_1(), where_2)
quaternion: torch.Tensor = torch.where(
trace > 0., trace_positive_cond(), where_1)
return quaternion
def normalize_quaternion(quaternion: torch.Tensor,
eps: float = 1e-12) -> torch.Tensor:
r"""Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
Return:
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([1., 0., 1., 0.])
>>> normalize_quaternion(quaternion)
tensor([0.7071, 0.0000, 0.7071, 0.0000])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
return F.normalize(quaternion, p=2, dim=-1, eps=eps)
# based on:
# https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101
# https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247
def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor:
r"""Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 1., 0.])
>>> quaternion_to_rotation_matrix(quaternion)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# normalize the input quaternion
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
# unpack the normalized quaternion components
x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1)
# compute the actual conversion
tx: torch.Tensor = 2.0 * x
ty: torch.Tensor = 2.0 * y
tz: torch.Tensor = 2.0 * z
twx: torch.Tensor = tx * w
twy: torch.Tensor = ty * w
twz: torch.Tensor = tz * w
txx: torch.Tensor = tx * x
txy: torch.Tensor = ty * x
txz: torch.Tensor = tz * x
tyy: torch.Tensor = ty * y
tyz: torch.Tensor = tz * y
tzz: torch.Tensor = tz * z
one: torch.Tensor = torch.tensor(1.)
matrix: torch.Tensor = torch.stack([
one - (tyy + tzz), txy - twz, txz + twy,
txy + twz, one - (txx + tzz), tyz - twx,
txz - twy, tyz + twx, one - (txx + tyy)
], dim=-1).view(-1, 3, 3)
if len(quaternion.shape) == 1:
matrix = torch.squeeze(matrix, dim=0)
return matrix
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape Nx4 or 4. Got {}".format(
quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def quaternion_log_to_exp(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies exponential map to log quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 3)`.
Return:
torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0.])
>>> quaternion_log_to_exp(quaternion)
tensor([0., 0., 0., 1.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape (*, 3). Got {}".format(
quaternion.shape))
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps)
# compute scalar and vector
quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q
quaternion_scalar: torch.Tensor = torch.cos(norm_q)
# compose quaternion and return
quaternion_exp: torch.Tensor = torch.cat(
[quaternion_vector, quaternion_scalar], dim=-1)
return quaternion_exp
def quaternion_exp_to_log(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies the log map to a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0., 1.])
>>> quaternion_exp_to_log(quaternion)
tensor([0., 0., 0.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# unpack quaternion vector and scalar
quaternion_vector: torch.Tensor = quaternion[..., 0:3]
quaternion_scalar: torch.Tensor = quaternion[..., 3:4]
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps)
# apply log map
quaternion_log: torch.Tensor = quaternion_vector * torch.acos(
torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q
return quaternion_log
# based on:
# https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape Nx3 or 3. Got {}".format(
angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
# based on:
# https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71
def normalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype),
torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)
])
factor: torch.Tensor = torch.tensor(
2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the z-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the x-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
| 35.353584
| 126
| 0.618676
|
c38be14607dbe25d1c1b7706a5871dc05ea34fff
| 1,536
|
py
|
Python
|
Qualification Round/cubic-ufo.py
|
enigma-pattern/GoogleCodeJam-2018
|
4b4f6b5bda68eccd92f0b31cde693462aba1282f
|
[
"MIT"
] | 42
|
2018-04-08T02:33:46.000Z
|
2021-05-29T10:19:33.000Z
|
Qualification Round/cubic-ufo.py
|
enigma-pattern/GoogleCodeJam-2018
|
4b4f6b5bda68eccd92f0b31cde693462aba1282f
|
[
"MIT"
] | 1
|
2021-05-05T10:32:33.000Z
|
2021-05-05T12:05:53.000Z
|
Qualification Round/cubic-ufo.py
|
enigma-pattern/GoogleCodeJam-2018
|
4b4f6b5bda68eccd92f0b31cde693462aba1282f
|
[
"MIT"
] | 16
|
2018-04-15T18:56:25.000Z
|
2021-07-22T22:38:03.000Z
|
# Copyright (c) 2018 kamyu. All rights reserved.
#
# Google Code Jam 2018 Qualification Round - Problem D. Cubic UFO
# https://codingcompetitions.withgoogle.com/codejam/round/00000000000000cb/00000000000079cc
#
# Time: O(1)
# Space: O(1)
#
import math
def matrix_multi(A, B):
result = [[0.0 for _ in xrange(len(B[0]))] for _ in xrange(len(A))]
for i in xrange(len(A)):
for k in xrange(len(A[0])):
if A[i][k] == 0.0:
continue
for j in xrange(len(B[0])):
result[i][j] += A[i][k] * B[k][j]
return result
def rotate_y(matrix, cosx):
sinx = math.sqrt(1.0-cosx**2)
Ry = [[cosx, 0.0, -sinx],
[ 0.0, 1.0, 0.0],
[sinx, 0.0, cosx]]
return matrix_multi(matrix, Ry)
def rotate_x(matrix, cosx):
sinx = math.sqrt(1.0-cosx**2)
Rx = [[1.0, 0.0, 0.0],
[0.0, cosx, sinx],
[0.0, -sinx, cosx]]
return matrix_multi(matrix, Rx)
def cubic_ufo():
A = float(input())
matrix = [[0.5, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 0.5]]
matrix = rotate_y(matrix, 1.0/math.sqrt(2)) # rotate y by 45 degrees
# rotate x to make shadows = rectangle projection part + two triangle projection part on xz plane
# => A = sqrt(2)*sinx + cosx
cosx = (A + math.sqrt(2*(3-A**2)))/3
matrix = rotate_x(matrix, cosx)
return matrix
for case in xrange(input()):
print 'Case #%d:' % (case+1)
for center in cubic_ufo():
print " ".join(map(str, center))
| 28.981132
| 101
| 0.549479
|
a6ed2451a6bb93221c0d7fd9d85d1193c2f2a2a6
| 656
|
py
|
Python
|
setup.py
|
ZephireNZ/PyFlick
|
3c4fae7380bcfcea9b1e0cb02444a07626261998
|
[
"MIT"
] | null | null | null |
setup.py
|
ZephireNZ/PyFlick
|
3c4fae7380bcfcea9b1e0cb02444a07626261998
|
[
"MIT"
] | null | null | null |
setup.py
|
ZephireNZ/PyFlick
|
3c4fae7380bcfcea9b1e0cb02444a07626261998
|
[
"MIT"
] | 2
|
2021-06-15T14:17:07.000Z
|
2021-11-18T07:16:04.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PyFlick",
version="0.0.2",
author="ZephireNZ",
author_email="brynley+pypi@zephire.nz",
description="Python API For Flick Electric in New Zealand",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ZephireNZ/PyFlick",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 29.818182
| 63
| 0.666159
|
0441301d54a2779936a0f432eaf25c8a2c0b8201
| 3,161
|
py
|
Python
|
modoboa/lib/tests/__init__.py
|
HarshCasper/modoboa
|
a00baa0593107992f545ee3e89cd4346b9615a96
|
[
"0BSD"
] | 1,602
|
2016-12-15T14:25:34.000Z
|
2022-03-31T16:49:25.000Z
|
modoboa/lib/tests/__init__.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 1,290
|
2016-12-14T15:39:05.000Z
|
2022-03-31T13:49:09.000Z
|
modoboa/lib/tests/__init__.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 272
|
2016-12-22T11:58:18.000Z
|
2022-03-17T15:57:24.000Z
|
"""Testing utilities."""
import socket
from django.core import management
from django.test import TestCase
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from modoboa.core import models as core_models
from .. import sysutils
try:
s = socket.create_connection(("127.0.0.1", 25))
s.close()
NO_SMTP = False
except socket.error:
NO_SMTP = True
try:
import ldap # noqa
NO_LDAP = False
except ImportError:
NO_LDAP = True
class ParametersMixin(object):
"""Add tools to manage parameters."""
@classmethod
def setUpTestData(cls): # noqa
"""Set LocalConfig instance."""
super(ParametersMixin, cls).setUpTestData()
cls.localconfig = core_models.LocalConfig.objects.first()
def set_global_parameter(self, name, value, app=None):
"""Set global parameter for the given app."""
if app is None:
app = sysutils.guess_extension_name()
self.localconfig.parameters.set_value(name, value, app=app)
self.localconfig.save()
def set_global_parameters(self, parameters, app=None):
"""Set/update global parameters for the given app."""
if app is None:
app = sysutils.guess_extension_name()
self.localconfig.parameters.set_values(parameters, app=app)
self.localconfig.save()
class ModoTestCase(ParametersMixin, TestCase):
"""All test cases must inherit from this one."""
@classmethod
def setUpTestData(cls): # noqa
"""Create a default user."""
super(ModoTestCase, cls).setUpTestData()
management.call_command("load_initial_data")
def setUp(self, username="admin", password="password"):
"""Initiate test context."""
self.assertEqual(
self.client.login(username=username, password=password), True)
def ajax_request(self, method, url, params=None, status=200):
if params is None:
params = {}
response = getattr(self.client, method)(
url, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, status)
return response.json()
def ajax_post(self, *args, **kwargs):
return self.ajax_request("post", *args, **kwargs)
def ajax_put(self, *args, **kwargs):
return self.ajax_request("put", *args, **kwargs)
def ajax_delete(self, *args, **kwargs):
return self.ajax_request("delete", *args, **kwargs)
def ajax_get(self, *args, **kwargs):
return self.ajax_request("get", *args, **kwargs)
class ModoAPITestCase(ParametersMixin, APITestCase):
"""All test cases must inherit from this one."""
@classmethod
def setUpTestData(cls): # noqa
"""Create a default user."""
super(ModoAPITestCase, cls).setUpTestData()
management.call_command("load_initial_data")
cls.token = Token.objects.create(
user=core_models.User.objects.get(username="admin"))
def setUp(self):
"""Setup."""
super(ModoAPITestCase, self).setUp()
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key)
| 30.990196
| 77
| 0.660234
|
af9e60303c9cf04eeba87f7d92b000ceca27b066
| 4,384
|
py
|
Python
|
Question_semaseg/answers/bin_loss_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 10
|
2021-12-17T06:07:25.000Z
|
2022-03-25T13:50:05.000Z
|
Question_semaseg/answers/bin_loss_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | null | null | null |
Question_semaseg/answers/bin_loss_pytorch.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 2
|
2022-03-15T02:42:09.000Z
|
2022-03-30T23:19:55.000Z
|
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 64, 64#572, 572
out_height, out_width = 64, 64#388, 388
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__()
enc1 = []
for i in range(6):
f = 3 if i == 0 else 32
enc1.append(torch.nn.Conv2d(f, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
self.enc1 = torch.nn.Sequential(*enc1)
self.out = torch.nn.Conv2d(32, 1, kernel_size=1, padding=0, stride=1)
def forward(self, x):
# block conv1
x = self.enc1(x)
x = self.out(x)
return x
CLS = {'akahara': [0,0,128],
'madara': [0,128,0]}
# get train data
def data_load(path, hf=False, vf=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[...,::-1]
xs.append(x)
gt_path = path.replace("images", "seg_images").replace(".jpg", ".png")
gt = cv2.imread(gt_path)
gt = cv2.resize(gt, (out_width, out_height), interpolation=cv2.INTER_NEAREST)
t = np.zeros((out_height, out_width, 1), dtype=np.int)
ind = (gt[...,0] > 0) + (gt[..., 1] > 0) + (gt[...,2] > 0)
t[ind] = 1
#print(gt_path)
#import matplotlib.pyplot as plt
#plt.imshow(t, cmap='gray')
#plt.show()
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t[:, ::-1])
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t[::-1])
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t[::-1, ::-1])
paths.append(path)
xs = np.array(xs)
ts = np.array(ts)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = Mynet().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)
# training
mb = 4
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(500):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.float).to(device)
opt.zero_grad()
y = model(x)
y = y.permute(0,2,3,1).contiguous()
y = torch.sigmoid(y)
loss = torch.nn.BCELoss()(y, t)
loss.backward()
opt.step()
#pred = y.argmax(dim=1, keepdim=True)
acc = y.eq(t.view_as(y)).sum().item() / mb
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
#if args.test:
# test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| 26.251497
| 89
| 0.519617
|
28991c67c708b05679a196cc488333430e102b33
| 26,790
|
py
|
Python
|
observatory/logic/space_api.py
|
spookey/observatory
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | null | null | null |
observatory/logic/space_api.py
|
spookey/observatory
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | 1
|
2020-03-28T09:51:56.000Z
|
2020-03-28T09:51:56.000Z
|
observatory/logic/space_api.py
|
spookey/dz_stats_page
|
be5cc92f53f12e6341e7e3040f26360e54cfdf7d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from logging import getLogger
from observatory.models.mapper import EnumConvert, EnumHorizon
from observatory.models.value import Value
from observatory.start.environment import SP_API_PREFIX, SP_API_REFRESH
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
class SpaceApi:
def __init__(self):
self._log = getLogger(self.__class__.__name__)
self._content = None
self._last = None
@staticmethod
def _by_key(*, key):
return Value.by_key(key=f'{SP_API_PREFIX}.{key}')
@staticmethod
def _get(*, key, idx=0):
return Value.get(key=f'{SP_API_PREFIX}.{key}', idx=idx)
def _get_all(self, *, key):
return [
{
'_idx': elem.idx,
'value': elem.elem,
}
for elem in self._by_key(key=key)
if elem is not None
]
def latest_value(self, *, key, idx=0, convert):
sensor = self._get(key=key, idx=idx)
if sensor is None or sensor.latest is None:
return None
return sensor.latest.translate(
horizon=EnumHorizon.NORMAL, convert=convert, numeric=False
)
def _indices_any(self, *keys):
result = set()
for key in keys:
result = result.union(
elem.idx for elem in self._by_key(key=key) if elem is not None
)
return sorted(result)
def _indices_all(self, first, *keys):
result = set(
elem.idx for elem in self._by_key(key=first) if elem is not None
)
for key in keys:
result = result.intersection(
elem.idx for elem in self._by_key(key=key) if elem is not None
)
return sorted(result)
@staticmethod
def next_index(indices):
if not indices:
return 0
top = max(indices)
diff = set(range(top)).difference(indices)
if diff:
return min(diff)
return 1 + top
@property
def cam_indices(self):
return self._indices_all('cam')
@property
def contact_keymasters_indices(self):
return self._indices_any(
'contact.keymasters.irc_nick',
'contact.keymasters.phone',
'contact.keymasters.email',
'contact.keymasters.twitter',
)
@property
def sensors_temperature_indices(self):
return self._indices_all(
'sensors.temperature.value',
'sensors.temperature.unit',
'sensors.temperature.location',
)
@property
def sensors_door_locked_indices(self):
return self._indices_all(
'sensors.door_locked.value', 'sensors.door_locked.location'
)
@property
def sensors_barometer_indices(self):
return self._indices_all(
'sensors.barometer.value',
'sensors.barometer.unit',
'sensors.barometer.location',
)
def sensors_radiation_indices(self, sub):
return self._indices_all(
f'sensors.radiation.{sub}.value',
f'sensors.radiation.{sub}.unit',
)
@property
def sensors_humidity_indices(self):
return self._indices_all(
'sensors.humidity.value',
'sensors.humidity.unit',
'sensors.humidity.location',
)
@property
def sensors_beverage_supply_indices(self):
return self._indices_all(
'sensors.beverage_supply.value',
'sensors.beverage_supply.unit',
)
@property
def sensors_power_consumption_indices(self):
return self._indices_all(
'sensors.power_consumption.value',
'sensors.power_consumption.unit',
'sensors.power_consumption.location',
)
@property
def sensors_wind_indices(self):
return self._indices_all(
'sensors.wind.properties.speed.value',
'sensors.wind.properties.speed.unit',
'sensors.wind.properties.gust.value',
'sensors.wind.properties.gust.unit',
'sensors.wind.properties.direction.value',
'sensors.wind.properties.direction.unit',
'sensors.wind.properties.elevation.value',
'sensors.wind.properties.elevation.unit',
'sensors.wind.location',
)
@property
def sensors_account_balance_indices(self):
return self._indices_all(
'sensors.account_balance.value',
'sensors.account_balance.unit',
)
@property
def sensors_total_member_count_indices(self):
return self._indices_all('sensors.total_member_count.value')
@property
def sensors_network_traffic_indices(self):
return self._indices_any(
'sensors.network_traffic.properties.bits_per_second.value',
'sensors.network_traffic.properties.packets_per_second.value',
)
@property
def projects_indices(self):
return self._indices_all('projects')
@property
def links_indices(self):
return self._indices_all(
'links.name',
'links.url',
)
@property
def membership_plans_indices(self):
return self._indices_all(
'membership_plans.name',
'membership_plans.value',
'membership_plans.currency',
'membership_plans.billing_interval',
)
def get_state(self):
self._log.info('gathering state')
return {
'icon': {
'open': self._get(key='state.icon.open'),
'closed': self._get(key='state.icon.closed'),
},
}
def get_events(self):
self._log.info('gathering events')
return []
def build(self):
return {
'api_compatibility': ['14'],
'space': self._get(key='space'),
'logo': self._get(key='logo'),
'url': self._get(key='url'),
'location': {
'address': self._get(key='location.address'),
'lat': self._get(key='location.lat'),
'lon': self._get(key='location.lon'),
'timezone': self._get(key='location.timezone'),
},
'spacefed': {
'spacenet': self._get(key='spacefed.spacenet'),
'spacesaml': self._get(key='spacefed.spacesaml'),
},
'cam': self._get_all(key='cam'),
'state': self.get_state(),
'events': self.get_events(),
'contact': {
'phone': self._get(key='contact.phone'),
'sip': self._get(key='contact.sip'),
'keymasters': [
{
'_idx': idx,
'name': self._get(
key='contact.keymasters.name', idx=idx
),
'irc_nick': self._get(
key='contact.keymasters.irc_nick', idx=idx
),
'phone': self._get(
key='contact.keymasters.phone', idx=idx
),
'email': self._get(
key='contact.keymasters.email', idx=idx
),
'twitter': self._get(
key='contact.keymasters.twitter', idx=idx
),
'xmpp': self._get(
key='contact.keymasters.xmpp', idx=idx
),
'mastodon': self._get(
key='contact.keymasters.mastodon', idx=idx
),
'matrix': self._get(
key='contact.keymasters.matrix', idx=idx
),
}
for idx in self.contact_keymasters_indices
],
'irc': self._get(key='contact.irc'),
'twitter': self._get(key='contact.twitter'),
'mastodon': self._get(key='contact.mastodon'),
'facebook': self._get(key='contact.facebook'),
'identica': self._get(key='contact.identica'),
'foursquare': self._get(key='contact.foursquare'),
'email': self._get(key='contact.email'),
'ml': self._get(key='contact.ml'),
'xmpp': self._get(key='contact.xmpp'),
'issue_mail': self._get(key='contact.issue_mail'),
'gopher': self._get(key='contact.gopher'),
'matrix': self._get(key='contact.matrix'),
'mumble': self._get(key='contact.mumble'),
},
'sensors': {
'temperature': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.temperature.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key='sensors.temperature.unit', idx=idx
),
'location': self._get(
key='sensors.temperature.location', idx=idx
),
'name': self._get(
key='sensors.temperature.name', idx=idx
),
'description': self._get(
key='sensors.temperature.description', idx=idx
),
}
for idx in self.sensors_temperature_indices
],
'door_locked': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.door_locked.value',
idx=idx,
convert=EnumConvert.BOOLEAN,
),
'location': self._get(
key='sensors.door_locked.location', idx=idx
),
'name': self._get(
key='sensors.door_locked.name', idx=idx
),
'description': self._get(
key='sensors.door_locked.description', idx=idx
),
}
for idx in self.sensors_door_locked_indices
],
'barometer': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.barometer.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key='sensors.barometer.unit', idx=idx
),
'location': self._get(
key='sensors.barometer.location', idx=idx
),
'name': self._get(
key='sensors.barometer.name', idx=idx
),
'description': self._get(
key='sensors.barometer.description', idx=idx
),
}
for idx in self.sensors_barometer_indices
],
'radiation': {
sub: [
{
'_idx': idx,
'value': self.latest_value(
key=f'sensors.radiation.{sub}.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key=f'sensors.radiation.{sub}.unit', idx=idx
),
'dead_time': self._get(
key=f'sensors.radiation.{sub}.dead_time',
idx=idx,
),
'conversion_factor': self._get(
key=(
f'sensors.radiation.{sub}.'
'conversion_factor'
),
idx=idx,
),
'location': self._get(
key=f'sensors.radiation.{sub}.location',
idx=idx,
),
'name': self._get(
key=f'sensors.radiation.{sub}.name', idx=idx
),
'description': self._get(
key=f'sensors.radiation.{sub}.description',
idx=idx,
),
}
for idx in self.sensors_radiation_indices(sub)
]
for sub in ['alpha', 'beta', 'gamma', 'beta_gamma']
},
'humidity': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.humidity.value',
idx=idx,
convert=EnumConvert.INTEGER,
),
'unit': self._get(
key='sensors.humidity.unit', idx=idx
),
'location': self._get(
key='sensors.humidity.location', idx=idx
),
'name': self._get(
key='sensors.humidity.name', idx=idx
),
'description': self._get(
key='sensors.humidity.description', idx=idx
),
}
for idx in self.sensors_humidity_indices
],
'beverage_supply': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.beverage_supply.value',
idx=idx,
convert=EnumConvert.INTEGER,
),
'unit': self._get(
key='sensors.beverage_supply.unit', idx=idx
),
'location': self._get(
key='sensors.beverage_supply.location', idx=idx
),
'name': self._get(
key='sensors.beverage_supply.name', idx=idx
),
'description': self._get(
key='sensors.beverage_supply.description', idx=idx
),
}
for idx in self.sensors_beverage_supply_indices
],
'power_consumption': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.power_consumption.value',
idx=idx,
convert=EnumConvert.INTEGER,
),
'unit': self._get(
key='sensors.power_consumption.unit', idx=idx
),
'location': self._get(
key='sensors.power_consumption.location', idx=idx
),
'name': self._get(
key='sensors.power_consumption.name', idx=idx
),
'description': self._get(
key='sensors.power_consumption.description',
idx=idx,
),
}
for idx in self.sensors_power_consumption_indices
],
'wind': [
{
'_idx': idx,
'properties': {
'speed': {
'value': self.latest_value(
key='sensors.wind.properties.speed.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key='sensors.wind.properties.speed.unit',
idx=idx,
),
},
'gust': {
'value': self.latest_value(
key='sensors.wind.properties.gust.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key='sensors.wind.properties.gust.unit',
idx=idx,
),
},
'direction': {
'value': self.latest_value(
key=(
'sensors.wind.properties.'
'direction.value'
),
idx=idx,
convert=EnumConvert.INTEGER,
),
'unit': self._get(
key=(
'sensors.wind.properties.'
'direction.unit'
),
idx=idx,
),
},
'elevation': {
'value': self._get(
key=(
'sensors.wind.properties.'
'elevation.value'
),
idx=idx,
),
'unit': self._get(
key=(
'sensors.wind.properties.'
'elevation.unit'
),
idx=idx,
),
},
},
'location': self._get(
key='sensors.wind.location', idx=idx
),
'name': self._get(key='sensors.wind.name', idx=idx),
'description': self._get(
key='sensors.wind.description', idx=idx
),
}
for idx in self.sensors_wind_indices
],
'network_connections': [],
'account_balance': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.account_balance.value',
idx=idx,
convert=EnumConvert.NATURAL,
),
'unit': self._get(
key='sensors.account_balance.unit', idx=idx
),
'location': self._get(
key='sensors.account_balance.location', idx=idx
),
'name': self._get(
key='sensors.account_balance.name', idx=idx
),
'description': self._get(
key='sensors.account_balance.description', idx=idx
),
}
for idx in self.sensors_account_balance_indices
],
'total_member_count': [
{
'_idx': idx,
'value': self.latest_value(
key='sensors.total_member_count.value',
idx=idx,
convert=EnumConvert.INTEGER,
),
'location': self._get(
key='sensors.total_member_count.location', idx=idx
),
'name': self._get(
key='sensors.total_member_count.name', idx=idx
),
'description': self._get(
key='sensors.total_member_count.description',
idx=idx,
),
}
for idx in self.sensors_total_member_count_indices
],
'people_now_present': [],
'network_traffic': [
{
'_idx': idx,
'properties': {
'bits_per_second': {
'value': self.latest_value(
key=(
'sensors.network_traffic.properties.'
'bits_per_second.value'
),
idx=idx,
convert=EnumConvert.NATURAL,
),
'maximum': self._get(
key=(
'sensors.network_traffic.properties.'
'bits_per_second.maximum'
),
idx=idx,
),
},
'packets_per_second': {
'value': self.latest_value(
key=(
'sensors.network_traffic.properties.'
'packets_per_second.value'
),
idx=idx,
convert=EnumConvert.NATURAL,
),
},
},
'location': self._get(
key='sensors.network_traffic.location', idx=idx
),
'name': self._get(
key='sensors.network_traffic.name', idx=idx
),
'description': self._get(
key='sensors.network_traffic.description', idx=idx
),
}
for idx in self.sensors_network_traffic_indices
],
},
'feeds': {
'blog': {
'type': self._get(key='feeds.blog.type'),
'url': self._get(key='feeds.blog.url'),
},
'wiki': {
'type': self._get(key='feeds.wiki.type'),
'url': self._get(key='feeds.wiki.url'),
},
'calendar': {
'type': self._get(key='feeds.calendar.type'),
'url': self._get(key='feeds.calendar.url'),
},
'flickr': {
'type': self._get(key='feeds.calendar.type'),
'url': self._get(key='feeds.calendar.url'),
},
},
'projects': self._get_all(key='projects'),
'links': [
{
'_idx': idx,
'name': self._get(key='links.name', idx=idx),
'description': self._get(key='links.description', idx=idx),
'url': self._get(key='links.url', idx=idx),
}
for idx in self.links_indices
],
'membership_plans': [
{
'_idx': idx,
'name': self._get(key='membership_plans.name', idx=idx),
'value': self._get(key='membership_plans.value', idx=idx),
'currency': self._get(
key='membership_plans.currency', idx=idx
),
'billing_interval': self._get(
key='membership_plans.billing_interval', idx=idx
),
'description': self._get(
key='membership_plans.description', idx=idx
),
}
for idx in self.membership_plans_indices
],
}
@property
def outdated(self):
if self._content is None:
return True
if self._last is None:
return True
if (datetime.utcnow() - self._last).total_seconds() > SP_API_REFRESH:
return True
return False
@property
def content(self):
if self.outdated:
self._log.info('rebuilding content')
self._content = self.build()
self._last = datetime.utcnow()
return self._content
def clear(self):
self._content = None
self._last = None
return all((self._content is None, self._last is None))
def reset(self):
self._log.info('resetting content')
self.clear()
return self.content
| 39.281525
| 79
| 0.387495
|
13bf14f8262c7aa7e66c52e5f45a31272f0379e4
| 10,917
|
py
|
Python
|
sympy/integrals/rationaltools.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 1
|
2020-09-09T20:40:17.000Z
|
2020-09-09T20:40:17.000Z
|
sympy/integrals/rationaltools.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 14
|
2018-02-08T10:11:03.000Z
|
2019-04-16T10:32:46.000Z
|
sympy/integrals/rationaltools.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T13:50:29.000Z
|
2022-02-04T13:50:29.000Z
|
"""This module implements tools for integrating rational functions. """
from sympy.core.function import Lambda
from sympy.core.numbers import I
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.trigonometric import atan
from sympy.polys.polyroots import roots
from sympy.polys.polytools import cancel
from sympy.polys.rootoftools import RootSum
from sympy.polys import Poly, resultant, ZZ
from sympy.solvers.solvers import solve
def ratint(f, x, **flags):
"""
Performs indefinite integration of rational functions.
Explanation
===========
Given a field :math:`K` and a rational function :math:`f = p/q`,
where :math:`p` and :math:`q` are polynomials in :math:`K[x]`,
returns a function :math:`g` such that :math:`f = g'`.
Examples
========
>>> from sympy.integrals.rationaltools import ratint
>>> from sympy.abc import x
>>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x)
(12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1)
References
==========
.. [1] M. Bronstein, Symbolic Integration I: Transcendental
Functions, Second Edition, Springer-Verlag, 2005, pp. 35-70
See Also
========
sympy.integrals.integrals.Integral.doit
sympy.integrals.rationaltools.ratint_logpart
sympy.integrals.rationaltools.ratint_ratpart
"""
if isinstance(f, tuple):
p, q = f
else:
p, q = f.as_numer_denom()
p, q = Poly(p, x, composite=False, field=True), Poly(q, x, composite=False, field=True)
coeff, p, q = p.cancel(q)
poly, p = p.div(q)
result = poly.integrate(x).as_expr()
if p.is_zero:
return coeff*result
g, h = ratint_ratpart(p, q, x)
P, Q = h.as_numer_denom()
P = Poly(P, x)
Q = Poly(Q, x)
q, r = P.div(Q)
result += g + q.integrate(x).as_expr()
if not r.is_zero:
symbol = flags.get('symbol', 't')
if not isinstance(symbol, Symbol):
t = Dummy(symbol)
else:
t = symbol.as_dummy()
L = ratint_logpart(r, Q, x, t)
real = flags.get('real')
if real is None:
if isinstance(f, tuple):
p, q = f
atoms = p.atoms() | q.atoms()
else:
atoms = f.atoms()
for elt in atoms - {x}:
if not elt.is_extended_real:
real = False
break
else:
real = True
eps = S.Zero
if not real:
for h, q in L:
_, h = h.primitive()
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
else:
for h, q in L:
_, h = h.primitive()
R = log_to_real(h, q, x, t)
if R is not None:
eps += R
else:
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
result += eps
return coeff*result
def ratint_ratpart(f, g, x):
"""
Horowitz-Ostrogradsky algorithm.
Explanation
===========
Given a field K and polynomials f and g in K[x], such that f and g
are coprime and deg(f) < deg(g), returns fractions A and B in K(x),
such that f/g = A' + B and B has square-free denominator.
Examples
========
>>> from sympy.integrals.rationaltools import ratint_ratpart
>>> from sympy.abc import x, y
>>> from sympy import Poly
>>> ratint_ratpart(Poly(1, x, domain='ZZ'),
... Poly(x + 1, x, domain='ZZ'), x)
(0, 1/(x + 1))
>>> ratint_ratpart(Poly(1, x, domain='EX'),
... Poly(x**2 + y**2, x, domain='EX'), x)
(0, 1/(x**2 + y**2))
>>> ratint_ratpart(Poly(36, x, domain='ZZ'),
... Poly(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x, domain='ZZ'), x)
((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2))
See Also
========
ratint, ratint_logpart
"""
f = Poly(f, x)
g = Poly(g, x)
u, v, _ = g.cofactors(g.diff())
n = u.degree()
m = v.degree()
A_coeffs = [ Dummy('a' + str(n - i)) for i in range(0, n) ]
B_coeffs = [ Dummy('b' + str(m - i)) for i in range(0, m) ]
C_coeffs = A_coeffs + B_coeffs
A = Poly(A_coeffs, x, domain=ZZ[C_coeffs])
B = Poly(B_coeffs, x, domain=ZZ[C_coeffs])
H = f - A.diff()*v + A*(u.diff()*v).quo(u) - B*u
result = solve(H.coeffs(), C_coeffs)
A = A.as_expr().subs(result)
B = B.as_expr().subs(result)
rat_part = cancel(A/u.as_expr(), x)
log_part = cancel(B/v.as_expr(), x)
return rat_part, log_part
def ratint_logpart(f, g, x, t=None):
r"""
Lazard-Rioboo-Trager algorithm.
Explanation
===========
Given a field K and polynomials f and g in K[x], such that f and g
are coprime, deg(f) < deg(g) and g is square-free, returns a list
of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i
in K[t, x] and q_i in K[t], and::
___ ___
d f d \ ` \ `
-- - = -- ) ) a log(s_i(a, x))
dx g dx /__, /__,
i=1..n a | q_i(a) = 0
Examples
========
>>> from sympy.integrals.rationaltools import ratint_logpart
>>> from sympy.abc import x
>>> from sympy import Poly
>>> ratint_logpart(Poly(1, x, domain='ZZ'),
... Poly(x**2 + x + 1, x, domain='ZZ'), x)
[(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'),
...Poly(3*_t**2 + 1, _t, domain='ZZ'))]
>>> ratint_logpart(Poly(12, x, domain='ZZ'),
... Poly(x**2 - x - 2, x, domain='ZZ'), x)
[(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'),
...Poly(-_t**2 + 16, _t, domain='ZZ'))]
See Also
========
ratint, ratint_ratpart
"""
f, g = Poly(f, x), Poly(g, x)
t = t or Dummy('t')
a, b = g, f - g.diff()*Poly(t, x)
res, R = resultant(a, b, includePRS=True)
res = Poly(res, t, composite=False)
assert res, "BUG: resultant(%s, %s) cannot be zero" % (a, b)
R_map, H = {}, []
for r in R:
R_map[r.degree()] = r
def _include_sign(c, sqf):
if c.is_extended_real and (c < 0) == True:
h, k = sqf[0]
c_poly = c.as_poly(h.gens)
sqf[0] = h*c_poly, k
C, res_sqf = res.sqf_list()
_include_sign(C, res_sqf)
for q, i in res_sqf:
_, q = q.primitive()
if g.degree() == i:
H.append((g, q))
else:
h = R_map[i]
h_lc = Poly(h.LC(), t, field=True)
c, h_lc_sqf = h_lc.sqf_list(all=True)
_include_sign(c, h_lc_sqf)
for a, j in h_lc_sqf:
h = h.quo(Poly(a.gcd(q)**j, x))
inv, coeffs = h_lc.invert(q), [S.One]
for coeff in h.coeffs()[1:]:
coeff = coeff.as_poly(inv.gens)
T = (inv*coeff).rem(q)
coeffs.append(T.as_expr())
h = Poly(dict(list(zip(h.monoms(), coeffs))), x)
H.append((h, q))
return H
def log_to_atan(f, g):
"""
Convert complex logarithms to real arctangents.
Explanation
===========
Given a real field K and polynomials f and g in K[x], with g != 0,
returns a sum h of arctangents of polynomials in K[x], such that:
dh d f + I g
-- = -- I log( ------- )
dx dx f - I g
Examples
========
>>> from sympy.integrals.rationaltools import log_to_atan
>>> from sympy.abc import x
>>> from sympy import Poly, sqrt, S
>>> log_to_atan(Poly(x, x, domain='ZZ'), Poly(1, x, domain='ZZ'))
2*atan(x)
>>> log_to_atan(Poly(x + S(1)/2, x, domain='QQ'),
... Poly(sqrt(3)/2, x, domain='EX'))
2*atan(2*sqrt(3)*x/3 + sqrt(3)/3)
See Also
========
log_to_real
"""
if f.degree() < g.degree():
f, g = -g, f
f = f.to_field()
g = g.to_field()
p, q = f.div(g)
if q.is_zero:
return 2*atan(p.as_expr())
else:
s, t, h = g.gcdex(-f)
u = (f*s + g*t).quo(h)
A = 2*atan(u.as_expr())
return A + log_to_atan(s, t)
def log_to_real(h, q, x, t):
r"""
Convert complex logarithms to real functions.
Explanation
===========
Given real field K and polynomials h in K[t,x] and q in K[t],
returns real function f such that:
___
df d \ `
-- = -- ) a log(h(a, x))
dx dx /__,
a | q(a) = 0
Examples
========
>>> from sympy.integrals.rationaltools import log_to_real
>>> from sympy.abc import x, y
>>> from sympy import Poly, S
>>> log_to_real(Poly(x + 3*y/2 + S(1)/2, x, domain='QQ[y]'),
... Poly(3*y**2 + 1, y, domain='ZZ'), x, y)
2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3
>>> log_to_real(Poly(x**2 - 1, x, domain='ZZ'),
... Poly(-2*y + 1, y, domain='ZZ'), x, y)
log(x**2 - 1)/2
See Also
========
log_to_atan
"""
from sympy.simplify.radsimp import collect
u, v = symbols('u,v', cls=Dummy)
H = h.as_expr().subs({t: u + I*v}).expand()
Q = q.as_expr().subs({t: u + I*v}).expand()
H_map = collect(H, I, evaluate=False)
Q_map = collect(Q, I, evaluate=False)
a, b = H_map.get(S.One, S.Zero), H_map.get(I, S.Zero)
c, d = Q_map.get(S.One, S.Zero), Q_map.get(I, S.Zero)
R = Poly(resultant(c, d, v), u)
R_u = roots(R, filter='R')
if len(R_u) != R.count_roots():
return None
result = S.Zero
for r_u in R_u.keys():
C = Poly(c.subs({u: r_u}), v)
R_v = roots(C, filter='R')
if len(R_v) != C.count_roots():
return None
R_v_paired = [] # take one from each pair of conjugate roots
for r_v in R_v:
if r_v not in R_v_paired and -r_v not in R_v_paired:
if r_v.is_negative or r_v.could_extract_minus_sign():
R_v_paired.append(-r_v)
elif not r_v.is_zero:
R_v_paired.append(r_v)
for r_v in R_v_paired:
D = d.subs({u: r_u, v: r_v})
if D.evalf(chop=True) != 0:
continue
A = Poly(a.subs({u: r_u, v: r_v}), x)
B = Poly(b.subs({u: r_u, v: r_v}), x)
AB = (A**2 + B**2).as_expr()
result += r_u*log(AB) + r_v*log_to_atan(A, B)
R_q = roots(q, filter='R')
if len(R_q) != q.count_roots():
return None
for r in R_q.keys():
result += r*log(h.as_expr().subs(t, r))
return result
| 26.117225
| 91
| 0.498947
|
aabdaa8aa3d4f5c27ad49b1d343958200d30c7e1
| 348
|
py
|
Python
|
donkeycar/parts/history.py
|
ilkka/donkey
|
f1454f720d2a6b3fc0db4f13a46a056f7a59fc19
|
[
"MIT"
] | null | null | null |
donkeycar/parts/history.py
|
ilkka/donkey
|
f1454f720d2a6b3fc0db4f13a46a056f7a59fc19
|
[
"MIT"
] | null | null | null |
donkeycar/parts/history.py
|
ilkka/donkey
|
f1454f720d2a6b3fc0db4f13a46a056f7a59fc19
|
[
"MIT"
] | 1
|
2019-01-28T13:33:40.000Z
|
2019-01-28T13:33:40.000Z
|
from collections import deque
class History:
def __init__(self, buffer_size):
self.on = True
self.history_buffer = deque(maxlen=buffer_size)
for i in range(buffer_size):
self.history_buffer.append(0)
def run(self, value):
if (value != None):
self.history_buffer.append(value)
return list(self.history_buffer)
| 23.2
| 51
| 0.701149
|
52440342b96b7db8121d26c1f8f0f741b1f70a7d
| 2,836
|
py
|
Python
|
srl_core/algo/a2c_acktr.py
|
chenzeyin9867/srl
|
309fdaa2703fa9688d993c24c217edb45a6e4332
|
[
"MIT"
] | 3
|
2021-11-15T06:57:46.000Z
|
2022-02-17T03:22:32.000Z
|
srl_core/algo/a2c_acktr.py
|
chenzeyin9867/srl
|
309fdaa2703fa9688d993c24c217edb45a6e4332
|
[
"MIT"
] | null | null | null |
srl_core/algo/a2c_acktr.py
|
chenzeyin9867/srl
|
309fdaa2703fa9688d993c24c217edb45a6e4332
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
from srl_core.algo.kfac import KFACOptimizer
class A2C_ACKTR():
def __init__(self,
actor_critic,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
alpha=None,
max_grad_norm=None,
acktr=False):
self.actor_critic = actor_critic
self.acktr = acktr
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
if acktr:
self.optimizer = KFACOptimizer(actor_critic)
else:
self.optimizer = optim.RMSprop(
actor_critic.parameters(), lr, eps=eps, alpha=alpha)
def update(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
values, action_log_probs, dist_entropy, _ = self.actor_critic.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
action_loss = -(advantages.detach() * action_log_probs).mean()
if self.acktr and self.optimizer.steps % self.optimizer.Ts == 0:
# Compute fisher, see Martens 2014
self.actor_critic.zero_grad()
pg_fisher_loss = -action_log_probs.mean()
value_noise = torch.randn(values.size())
if values.is_cuda:
value_noise = value_noise.cuda()
sample_values = values + value_noise
vf_fisher_loss = -(values - sample_values.detach()).pow(2).mean()
fisher_loss = pg_fisher_loss + vf_fisher_loss
self.optimizer.acc_stats = True
fisher_loss.backward(retain_graph=True)
self.optimizer.acc_stats = False
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
if self.acktr == False:
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
return value_loss.item(), action_loss.item(), dist_entropy.item()
| 35.012346
| 88
| 0.586037
|
649850757a426b361d09cc13cd552c9fa0215268
| 715
|
py
|
Python
|
stats/services.py
|
dbgsprw/youtube_crawler
|
22b044b7c6f7122a0c6512dc4b7b98580eaedcd9
|
[
"MIT"
] | null | null | null |
stats/services.py
|
dbgsprw/youtube_crawler
|
22b044b7c6f7122a0c6512dc4b7b98580eaedcd9
|
[
"MIT"
] | 7
|
2019-12-04T23:27:13.000Z
|
2022-02-10T07:33:06.000Z
|
stats/services.py
|
dbgsprw/youtube_statistics_collector
|
22b044b7c6f7122a0c6512dc4b7b98580eaedcd9
|
[
"MIT"
] | null | null | null |
from stats.models import Stats
def calculate_view_diff(video, start_day, end_day):
older_stat = video.stats_set.filter(created_at__gte=start_day).order_by('created_at')[0]
newer_stat = video.stats_set.filter(created_at__lte=end_day).order_by('-created_at')[0]
if newer_stat == older_stat:
older_stat = Stats(view_count=0, created_at=video.published_at)
total_seconds = (end_day - start_day).total_seconds()
return _get_average_view_per_seconds(older_stat, newer_stat) * total_seconds
def _get_average_view_per_seconds(older_stat, newer_stat):
return (newer_stat.view_count - older_stat.view_count) / \
(newer_stat.created_at - older_stat.created_at).total_seconds()
| 39.722222
| 92
| 0.767832
|
40a8984e8ae71c36fee0b6af040ca6f0ea09beb8
| 18,300
|
py
|
Python
|
photo_illum.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 1
|
2019-03-15T04:01:19.000Z
|
2019-03-15T04:01:19.000Z
|
photo_illum.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 5
|
2017-12-11T00:11:39.000Z
|
2021-07-09T17:05:16.000Z
|
photo_illum.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 2
|
2017-08-15T21:19:11.000Z
|
2017-10-12T00:36:35.000Z
|
#!/usr/bin/env python
# Python module for photometric calibration.
# It needs the Python modules ppgplot and
# mpfit to be installed.
# 03.03.2005 Fixed a serious bug in the rejection loop. Instead
# of using the remaining points we always used all points
# and rejected points until the original fit matched the data
# 15.02.2005 Fixed the range of the y-axes in the plots to more
# sensible values
# 14.02.2005 Fixed a bug when more paramters were fitted than
# data points were present
# We now rescale points to the airmass/color at which
# they are plotted (zero)
# Check that label is set
# 10.12.2004 Now takes a new argument "label" to be
# used as axis label in the color plot
import copy
import getopt
import string
import sys
import mpfit
import Numeric
from ppgplot import *
import BonnLogger
def illum_funct(p, fjac=None, X=None, Y=None):
[A,B,C,D,E,F] = p
model = A*X**2 + B*Y**2 + C*X*Y + D*X + E*Y + F
status = 0
return([status, (model-y)/err])
def phot_funct_2(p, fjac=None, y=None, err=None):
model = p[0]
status = 0
return([status, (model-y)/err])
def phot_funct_1(p, fjac=None, color=None, y=None, err=None):
model = p[0] + p[1]*color
status = 0
return([status, (model-y)/err])
def phot_funct_0(p, fjac=None, airmass=None, color1=None, color2=None, y=None, err=None):
model = p[0] + p[1]*color1 + p[2]*color2
status = 0
return([status, (model-y)/err])
def readInput(file):
f = open(file, "r")
instMagList = []
stdMagList = []
magErrList = []
colList = []
airmassList = []
for line in f.readlines():
instMag, stdMag, col, airmass, instMagErr, stdMagErr = string.split(line)
magErr = (float(instMagErr)**2. + float(stdMagErr)**2.)**0.5
magErrList.append(magErr)
instMagList.append(float(instMag))
stdMagList.append(float(stdMag))
colList.append(float(col))
airmassList.append(float(airmass))
f.close()
instMag = Numeric.array(instMagList)
stdMag = Numeric.array(stdMagList)
data = stdMag - instMag
airmass = Numeric.array(airmassList)
color = Numeric.array(colList)
magErr = Numeric.array(magErrList)
return data, airmass, color, magErr
#def photCalib(data_save, airmass_save, color_save, err_save, p, sigmareject, maxSigIter=50):
def photCalib(dictionary, p, sigmareject, maxSigIter=50):
save_len = len(data_save)
parinfos = [[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0, "limited": [0,1], "limits": [-99, 0]},{"value": p[2], "fixed": 0}],[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0}],[{"value": p[0], "fixed": 0}]]
phot_functs = [phot_funct_0, phot_funct_1, phot_funct_2]
solutions = []
for fit_type in [0,1,2]:
airmass = copy.copy(airmass_save)
color = copy.copy(color_save)
data_tmp = copy.copy(data_save)
err = copy.copy(err_save)
#first apply coefficients we are holding fixed
data = copy.copy(data_tmp)
if fit_type == 1:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i]
if fit_type == 2:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i] - p[2]*color[i]
print data_tmp[0], data[0]
data_rec = copy.copy(data)
parinfo = parinfos[fit_type]
#for j in range(len(parinfo)):
#if j in fixedList:
# print "Element", j, "is fixed at", p[j]
# parinfo[j]["fixed"] = 1
#else:
# parinfo[j]["fixed"] = 0
for i in range(maxSigIter):
old_len = len(data)
fas = [{"airmass": airmass,"color": color, "y": data, "err": err},{"color": color,"y": data, "err": err}, {"y": data, "err": err}]
fa = fas[fit_type]
phot_funct = phot_functs[fit_type]
m = mpfit.mpfit(phot_funct, functkw=fa,
parinfo=parinfo,
maxiter=1000, quiet=1)
print m.covar, m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
#airmass = copy.copy(airmass_save)
#color = copy.copy(color_save)
#data = copy.copy(data_save)
#err = copy.copy(err_save)
# Compute a 3 sigma rejection criterion
#condition = preFilter(m.params, data_save, data,
# airmass_save, airmass,
# color_save, color)
params = [0,0,0]
perror = [0,0,0]
print m.params,m.perror, m.covar
if fit_type == 0:
params = copy.copy(m.params)
perror = copy.copy(m.perror)
if fit_type == 1:
params[0] = m.params[0]
params[2] = m.params[1]
params[1] = p[1]
perror[0] = m.perror[0]
perror[2] = m.perror[1]
if fit_type == 2:
params[0] = m.params[0]
params[1] = p[1]
params[2] = p[2]
perror[0] = m.perror[0]
# Compute a 3 sigma rejection criterion
print params, data_rec[0], data[0]
condition, redchisq = SigmaCond(params, data_save, data,
airmass_save, airmass,
color_save, color, err_save, err, sigmareject)
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
airmass = Numeric.compress(condition, airmass_save)
color = Numeric.compress(condition, color_save)
err = Numeric.compress(condition, err_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
solutions.append([params, perror, redchisq, meanerr, condition])
return solutions
def SigmaCond(p, data_save, data, airmass_save, airmass, color_save, color, err_save, err, sigmareject):
if len(data_save) > 1:
#airmass = airmass[int(0.1*len(airmass)):int(0.9*len(airmass))]
#color = color[int(0.1*len(color)):int(0.9*len(color))]
#data = data[int(0.1*len(data)):int(0.9*len(data))]
mo = p[0] + p[1]*airmass + p[2]*color
mo_save = p[0] + p[1]*airmass_save + p[2]*color_save
print len(data), len(mo), len(err)
reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data-mo
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
return condition, redchisq
def makePlots(data, airmass, color, outfile, solutions, label):
file = outfile+".ps"
pgbeg(file+"/cps", 2, 3)
pgiden()
for i in range(3):
result = solutions[i]
# Airmass plot
pgpanl(1, i+1)
airMin = 1
airMax = Numeric.sort(airmass)[-1]*1.1
print result
dataAirMax = result[0][0]+result[0][1]+1
dataAirMin = result[0][0]+result[0][1]-1
dataColMax = result[0][0]+1
dataColMin = result[0][0]-1
colMinVal = Numeric.sort(color)[0]
if colMinVal < 0:
colMin = colMinVal*1.1
else:
colMin = colMinVal*0.95
colMax = Numeric.sort(color)[-1]*1.1
if result[0] and result[1]:
eqStr = "%d parameter fit: Mag-Mag(Inst) = %.2f\\(2233)%.2f + (%.2f\\(2233)%.2f) airmass + "\
"(%.2f\\(2233)%.2f) color" % \
(3-i, result[0][0], result[1][0], result[0][1], result[1][1], result[0][2], result[1][2])
else:
eqStr = "%d parameter fit not possible" % (3-i, )
fixenv([1, airMax] ,
[dataAirMin, dataAirMax],
eqStr, label=["Airmass", "Mag - Mag(Inst)"])
condition = result[4]
goodAirmass = Numeric.compress(condition, airmass)
goodData = Numeric.compress(condition, data)
goodColor = Numeric.compress(condition, color)
badAirmass = Numeric.compress(Numeric.logical_not(condition), airmass)
badData = Numeric.compress(Numeric.logical_not(condition), data)
badColor = Numeric.compress(Numeric.logical_not(condition), color)
if len(goodData):
pgsci(3)
# Rescale to zero color and filter for data within
# our plotting range
plotData = goodData-result[0][2]*goodColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, goodAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
print type(plotAirmass), type(plotData)
if len(badData):
pgsci(2)
plotData = badData-result[0][2]*badColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, badAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
pgsci(1)
a = Numeric.arange(1, airMax, 0.01)
m = result[0][0] + result[0][1] * a
pgline(a, m)
# Color Plot
pgpanl(2, i+1)
fixenv([colMin, colMax] ,
[dataColMin, dataColMax],
eqStr, label=[label, "Mag - Mag(Inst)"])
if len(goodData):
pgsci(3)
# Rescale to zero airmass and filter for data within
# our plotting range
plotData = goodData-result[0][1]*goodAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, goodColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
if len(badData):
pgsci(2)
plotData = badData-result[0][1]*badAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, badColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
pgsci(1)
a = Numeric.arange(colMin, colMax, 0.01)
m = result[0][0] + result[0][2] * a
pgline(a, m)
return
def fixenv (xrange=[0,1], yrange=[0,1], fname="none", ci = 1, label=["x", "y"]):
# set axis ranges.
pgswin(xrange[0], xrange[1], yrange[0], yrange[1])
pgsci(ci) # set color index.
pgbox() # draw axes.
pgsci(1) # back to color index 1 (white)
pglab(label[0], label[1], fname) # label the plot
return
def saveResults(file, solutions, step, sigmareject, cluster, colorused):
f = open(file+".asc", "w")
which_solution = 0
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
#c.execute("DROP TABLE IF EXISTS photometry_db")
for result in solutions:
which_solution += 1
if Numeric.sometrue(result[2]):
import os , time
user_name = os.environ['USER']
bonn_target = os.environ['BONN_TARGET']
bonn_filter = os.environ['BONN_FILTER']
time_now = time.asctime()
user = user_name #+ str(time.time())
standardstartype = os.environ['STANDARDSTARTYPE']
floatvars = {'ZP':result[0][0],'AIRMASS':result[0][1],'COLOR':result[0][2],'ZPERR':result[1][0],'AIRMASSERR':result[1][1],'COLORERR':result[1][2],'REDCHISQ':result[2],'MEANERR':result[3]}
stringvars = {'USER':user_name,'BONN_TARGET':bonn_target,'BONN_FILTER':bonn_filter,'TIME':time_now,'CHOICE':'', 'NUMBERVARS':4-which_solution,'STANDARDSTARTYPE':standardstartype,'USER': user, 'step': step, 'sigmareject':sigmareject, 'cluster':cluster,'colorused':colorused}
# make database if it doesn't exist
make_db = reduce(lambda x,y: x + ',' + y,[x + ' float(30)' for x in floatvars.keys()])
make_db += ',' + reduce(lambda x,y: x + ',' + y,[x + ' varchar(80)' for x in stringvars.keys()])
command = "CREATE TABLE IF NOT EXISTS photometry_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id), " + make_db + ")"
print command
#c.execute(command)
# insert new observation
names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
command = "INSERT INTO photometry_db (" + names + ") VALUES (" + values + ")"
print command
#c.execute(command)
f.write("%s %s %s\n" % (result[0][0], result[0][1], result[0][2]))
f.write("%s %s %s\n" % (result[1][0], result[1][1], result[1][2]))
f.write("%s#ReducedChiSq\n" % (result[2]))
f.write("%s#MeanError\n" % (result[3]))
f.write("%s\n" % (id))
else:
f.write("-1 -1 -1\n")
f.write("-1 -1 -1\n")
f.write("-1#ReducedChiSq\n")
f.write("-1#MeanError\n")
f.write("%s\n" % (id))
f.close
return id
def usage():
print "Usage:"
print "photo_abs.py -i input -f filter -n GABODSID - e ext. coeff. -c color coeff. -o output -l label"
print
print " -i, --input=STRING Input file, must have 4 columns: Instrumental Mag, Standard Mag, Color, Airmass"
print " -o, --output=STRING Output file basename"
print " -n, --night=INT GABODSID, unique numerical night identifier"
print " -e, --extinction=FLOAT Default value of extinction coefficient for one/two parameter fit"
print " -c, --color=FLOAT Default value of color term for one parameter fit"
print " -l, --label=STRING Label for color axis (e.g. B-V)"
print
print "Author:"
print " Joerg Dietrich <dietrich@astro.uni-bonn.de>"
print
return
if __name__ == "__main__":
__bonn_logger_id__ = BonnLogger.addCommand('maskBadOverscans.py',
sys.argv[1:])
try:
opts, args = getopt.getopt(sys.argv[1:],
"i:n:o:e:c:l:s:",
["input=", "night=", "extinction=",
"color=", "output=", "label=","sigmareject=","step=","cluster=","colorused="])
except getopt.GetoptError:
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print sys.argv[1:]
infile = night = extcoeff = colcoeff = outfile = label = sigmareject = step = cluster = colorused = None
for o, a in opts:
if o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-n", "--night"):
night = int(a)
elif o in ("-e", "--extinction"):
extcoeff = float(a)
elif o in ("-c", "--color"):
colcoeff = float(a)
elif o in ("-l", "--label"):
label = a
elif o in ("-s", "--sigmareject"):
sigmareject = float(a)
elif o in ("-t", "--step"):
step = a
elif o in ("-c", "--cluster"):
cluster = a
elif o in ("-u", "--colorused"):
colorused = a
else:
print "option:", o
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print cluster
#raw_input()
if not infile or night==None or not outfile or \
extcoeff==None or colcoeff==None or label==None:
#print infile, night, outfile, coeff, color
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
data, airmass, color, magErr = readInput(infile)
#solutions = photCalib(data, airmass, color, magErr, [24, extcoeff, colcoeff], sigmareject)
solutions = photCalib({'data':data, 'dataerr':dataerr, vars:{'airmass':airmass, 'color':color}, 'guesses':{'airmasscoeff': airmasscoeff, 'colorcoeff':colorcoeff}, 'sigmareject':sigmareject, fit=[{'function':func_0(airmass,color)},{'function'['color']]})
solutions = photCalib({'data':data, 'dataerr':dataerr, vars:{'X':X,'Y':Y}, 'sigmareject':sigmareject, fit=[['A','B','C','D','E','F'],['color']]})
makePlots(data, airmass, color, outfile, solutions, label)
saveResults(outfile, solutions, step, sigmareject, cluster, colorused)
BonnLogger.updateStatus(__bonn_logger_id__, 0)
| 38.689218
| 279
| 0.564809
|
ef3a8cec2243c6a5b660ecc78e6d1ac39c3c8f38
| 2,946
|
py
|
Python
|
common/vec_env/dummy_vec_env.py
|
nju-fuzy/baselines
|
c97a379944632dcc769167b4b0381f6d61729a4f
|
[
"MIT"
] | 1
|
2021-05-21T11:57:44.000Z
|
2021-05-21T11:57:44.000Z
|
common/vec_env/dummy_vec_env.py
|
nju-fuzy/baselines
|
c97a379944632dcc769167b4b0381f6d61729a4f
|
[
"MIT"
] | null | null | null |
common/vec_env/dummy_vec_env.py
|
nju-fuzy/baselines
|
c97a379944632dcc769167b4b0381f6d61729a4f
|
[
"MIT"
] | null | null | null |
import numpy as np
from gym import spaces
from . import VecEnv
from .util import copy_obs_dict, dict_to_obs, obs_space_info
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
obs_space = env.observation_space
self.keys, shapes, dtypes = obs_space_info(obs_space)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
self.specs = [e.spec for e in self.envs]
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
if isinstance(self.envs[e].action_space, spaces.Discrete):
action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
return dict_to_obs(copy_obs_dict(self.buf_obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
| 35.926829
| 157
| 0.595044
|
113c657234e858926dd0cfbdaada188f63312a33
| 453
|
py
|
Python
|
cmdline.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
cmdline.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
cmdline.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
import argparse
import os
def parse_command_line() -> object:
# Get commnad line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', type=str, default=os.path.join( 'data' , 'quixote' ), help='Model directory')
parser.add_argument('--mode', type=str, default='character', help='Vocabulary mode: character or word')
return parser.parse_args()
| 41.181818
| 115
| 0.730684
|
c66dacdbc637fe57ea6bd0b6c4dc949eb794022f
| 131
|
py
|
Python
|
secrets.py
|
haugstve/Coursera_Capstone
|
89138706b52bad53d53c30c83b1ad324cfa0eae3
|
[
"MIT"
] | 1
|
2019-03-20T11:27:42.000Z
|
2019-03-20T11:27:42.000Z
|
secrets.py
|
haugstve/Coursera_Capstone
|
89138706b52bad53d53c30c83b1ad324cfa0eae3
|
[
"MIT"
] | null | null | null |
secrets.py
|
haugstve/Coursera_Capstone
|
89138706b52bad53d53c30c83b1ad324cfa0eae3
|
[
"MIT"
] | null | null | null |
CLIENT_ID = 'Your CLIENT_ID goes here' # your Foursquare ID
CLIENT_SECRET = 'Your CLIENT_SECRET goes here' # your Foursquare Secret
| 65.5
| 71
| 0.793893
|
b39f6463680e8368495685ba258f041646535434
| 1,950
|
py
|
Python
|
config/settings/local.py
|
mkim0818/my-concert
|
0cc71bb8da29c1d807cd828e219ebc4ad7a25ce9
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
mkim0818/my-concert
|
0cc71bb8da29c1d807cd828e219ebc4ad7a25ce9
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
mkim0818/my-concert
|
0cc71bb8da29c1d807cd828e219ebc4ad7a25ce9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='r1&=1f3*&1j)&&&1rug@phzjz95=n#=)o34yt7(e^pcr07(+hn')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| 30.952381
| 99
| 0.495897
|
d312eeee2c7810c33d39f0c8f11da9f2b73357e5
| 5,085
|
py
|
Python
|
akshare/index/index_cni.py
|
Joeklepko/akshare
|
b290fad80cd6fed992b2b18496582cd6c7ae0d90
|
[
"MIT"
] | 1
|
2021-08-21T14:50:39.000Z
|
2021-08-21T14:50:39.000Z
|
akshare/index/index_cni.py
|
Joeklepko/akshare
|
b290fad80cd6fed992b2b18496582cd6c7ae0d90
|
[
"MIT"
] | null | null | null |
akshare/index/index_cni.py
|
Joeklepko/akshare
|
b290fad80cd6fed992b2b18496582cd6c7ae0d90
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/6/16 15:18
Desc: 国证指数
http://www.cnindex.com.cn/index.html
"""
import zipfile
import pandas as pd
import requests
def index_cni_all() -> pd.DataFrame:
"""
国证指数-所有指数
http://www.cnindex.com.cn/zh_indices/sese/index.html?act_menu=1&index_type=-1
:return: 国证指数-所有指数
:rtype: pandas.DataFrame
"""
url = "http://www.cnindex.com.cn/index/indexList"
params = {
"channelCode": "-1",
"rows": "2000",
"pageNum": "1",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["rows"])
temp_df.columns = [
"_",
"_",
"指数代码",
"_",
"_",
"_",
"_",
"_",
"指数简称",
"_",
"_",
"_",
"样本数",
"收盘点位",
"涨跌幅",
"_",
"PE滚动",
"_",
"成交量",
"成交额",
"总市值",
"自由流通市值",
"_",
"_",
]
temp_df = temp_df[
[
"指数代码",
"指数简称",
"样本数",
"收盘点位",
"涨跌幅",
"PE滚动",
"成交量",
"成交额",
"总市值",
"自由流通市值",
]
]
return temp_df
def index_cni_hist(index: str = "399001") -> pd.DataFrame:
"""
指数历史行情数据
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param index: 指数代码
:type index: str
:return: 指数历史行情数据
:rtype: pandas.DataFrame
"""
url = "http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat"
params = {
"indexCode": index,
"startDate": "",
"endDate": "",
"frequency": "day",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["data"])
temp_df.columns = [
"日期",
"_",
"最高价",
"开盘价",
"最低价",
"收盘价",
"_",
"涨跌幅",
"成交额",
"成交量",
"_",
]
temp_df = temp_df[
[
"日期",
"开盘价",
"最高价",
"最低价",
"收盘价",
"涨跌幅",
"成交量",
"成交额",
]
]
temp_df["涨跌幅"] = temp_df["涨跌幅"].str.replace("%", "")
temp_df["涨跌幅"] = temp_df["涨跌幅"].astype("float")
temp_df["涨跌幅"] = temp_df["涨跌幅"] / 100
return temp_df
def index_cni_detail(index: str = '399005', date: str = '2020-11') -> pd.DataFrame:
"""
国证指数-样本详情-指定日期的样本成份
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param index: 指数代码
:type index: str
:param date: 指定月份
:type date: str
:return: 指定日期的样本成份
:rtype: pandas.DataFrame
"""
url = 'http://www.cnindex.com.cn/sample-detail/download'
params = {
'indexcode': index,
'dateStr': date
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
return temp_df
def index_cni_detail_hist(index: str = '399005') -> pd.DataFrame:
"""
国证指数-样本详情-历史样本
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param index: 指数代码
:type index: str
:return: 历史样本
:rtype: pandas.DataFrame
"""
url = 'http://www.cnindex.com.cn/sample-detail/download-history'
params = {
'indexcode': index
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(r.content)
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
temp_df.columns = [
'日期',
'样本代码',
'样本简称',
'所属行业',
'自由流通市值',
'总市值',
'权重',
]
return temp_df
def index_cni_detail_hist_adjust(index: str = '399231') -> pd.DataFrame:
"""
国证指数-样本详情-历史调样
http://www.cnindex.com.cn/module/index-detail.html?act_menu=1&indexCode=399001
:param index: 指数代码
:type index: str
:return: 历史调样
:rtype: pandas.DataFrame
"""
url = 'http://www.cnindex.com.cn/sample-detail/download-adjustment'
params = {
'indexcode': index
}
r = requests.get(url, params=params)
try:
temp_df = pd.read_excel(r.content, engine="openpyxl")
except zipfile.BadZipFile as e:
return
temp_df['样本代码'] = temp_df['样本代码'].astype(str).str.zfill(6)
return temp_df
if __name__ == "__main__":
index_cni_all_df = index_cni_all()
print(index_cni_all_df)
index_cni_hist_df = index_cni_hist(index="399005")
print(index_cni_hist_df)
index_cni_detail_df = index_cni_detail(index='399005', date='2020-11')
print(index_cni_detail_df)
index_cni_detail_hist_df = index_cni_detail_hist(index='399005')
print(index_cni_detail_hist_df)
index_cni_detail_hist_adjust_df = index_cni_detail_hist_adjust(index='399005')
print(index_cni_detail_hist_adjust_df)
| 23.219178
| 83
| 0.534513
|
3b0759af91584507699635fa3dbbca55b6057a1e
| 4,179
|
py
|
Python
|
mendeley/models/files.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 103
|
2015-01-12T00:40:51.000Z
|
2022-03-29T07:02:06.000Z
|
mendeley/models/files.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 26
|
2015-01-10T04:08:41.000Z
|
2021-02-05T16:31:37.000Z
|
mendeley/models/files.py
|
membranepotential/mendeley-python-sdk
|
0336f0164f4d409309e813cbd0140011b5b2ff8f
|
[
"Apache-2.0"
] | 43
|
2015-03-04T18:11:06.000Z
|
2022-03-13T02:33:34.000Z
|
import json
import os
import re
from mendeley.models.annotations import Annotation
from mendeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <mendeley.models.documents.UserDocument>` or
:class:`CatalogDocument <mendeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <mendeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <mendeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
| 33.97561
| 119
| 0.603494
|
30b556ce98f818711859a552268bb9944b81c9f0
| 1,884
|
py
|
Python
|
randomizer/slot.py
|
jrobeson/ColdSteel3Tools
|
2db0f9b577f237ed1cffcde3ce7362eb76ebc094
|
[
"MIT"
] | 4
|
2021-05-12T16:48:06.000Z
|
2021-11-11T23:41:03.000Z
|
randomizer/slot.py
|
jrobeson/ColdSteel3Tools
|
2db0f9b577f237ed1cffcde3ce7362eb76ebc094
|
[
"MIT"
] | 6
|
2021-05-16T14:44:12.000Z
|
2021-09-18T00:15:04.000Z
|
randomizer/slot.py
|
jrobeson/ColdSteel3Tools
|
2db0f9b577f237ed1cffcde3ce7362eb76ebc094
|
[
"MIT"
] | 2
|
2021-05-16T13:17:43.000Z
|
2021-09-14T01:34:54.000Z
|
import random, csv, json
from randomizer.base import BaseRandomizer
class SlotRandomizer(BaseRandomizer):
def __init__(self, projectName=None, seed=None, programMode=True) -> None:
super().__init__(projectName=projectName, seed=seed, programMode=programMode)
random.seed(self.seed)
self.inputPath += 'slot'
def randomize(self, randomizeBase=True, randomizeGrowth=True, excludeGuest=False):
with open('result.txt', 'a', encoding='utf-8') as resultFile, open('ref/char.json') as charFile:
chars = json.load(charFile)
resultFile.write('\nEP Randomizer Results:\n')
inputPath = self.inputPath
with open(f'{inputPath}/SlotEp.csv', newline='', encoding='utf-8') as epFile:
statusReader = csv.DictReader(epFile)
headers = statusReader.fieldnames
slots = list(statusReader)
skipId = 16 if excludeGuest else 48
for slot in slots:
charId = slot['character_id']
if int(charId) >= skipId: continue
resultFile.write(f'\n{chars[charId]}:\n')
if randomizeBase:
baseEp = random.randrange(50, 251, 5)
slot['base_ep'] = baseEp
resultFile.write(f'Base EP: {baseEp}\n')
if randomizeGrowth:
growthEp = [random.randrange(0, 161, 5) for _ in range(7)]
growthEp.sort()
for i in range(7):
slot[f'increase_{i + 1}'] = growthEp[i]
resultFile.write(f'EP Growth: {growthEp}\n')
with open(f'{inputPath}/SlotEp.csv', 'w', newline='', encoding='utf-8') as epFile:
writer = csv.DictWriter(epFile, fieldnames=headers)
writer.writeheader()
writer.writerows(slots)
| 43.813953
| 104
| 0.571125
|
4a0f770ab9a598cbd754b2fd243fd0f7fb7fba9e
| 25,330
|
py
|
Python
|
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cspad.cbf_metrology
#
from __future__ import absolute_import, division, print_function
from six.moves import range
import os, sys, random
from iotbx.phil import parse
from libtbx import easy_run
from libtbx.utils import Sorry
import six
from six.moves import zip
phil_scope = parse("""
method = *hierarchical expanding
.type = choice
reflections = reindexedstrong *indexed integrated
.type = choice
.help = Which subset of reflections
tag = cspad
.type = str
.help = Name of this refinement run. Output filenames will use this tag.
start_at_hierarchy_level = 0
.type = int
.help = Start refinement at this hierarchy level
refine_to_hierarchy_level = 2
.type = int
.help = maximum level to refine cspad to
refine_distance = True
.type = bool
.help = If true, allow root hierarchy level to refine in Z. Otherwise fix this \
axis. Regardless, higher hierarchy levels will refine in Z.
refine_energy = False
.type = bool
.help = If true, when refining level 0, also refine beam energy. Subsequent hierarchy \
levels will fix the energy in place.
flat_refinement = False
.type = bool
.help = If True, do not refine tilt (Tau2 and Tau3) when refining panel positions. Further, \
don't refine distance at levels 1 or higher (respects refine_distance for level 0).
flat_refinement_with_distance = False
.type = bool
.help = If True, and if using flat refinement, then use constraints to allow disance \
to refine at levels 1 and higher.
n_subset = None
.type = int
.help = Refine a random subset of the provided files
split_dataset = False
.type = bool
.help = After refining the full set of images, if split_dataset is True, the \
data will be split in two using odd and even file numbers and each half \
will be refined independently. For each half, _1 or _2 is appended to \
the tag. If used with n_subset, each half will have n_subset/2 images.
data_phil = None
.type = str
.help = Optional phil file with all experiments and reflections for use during \
refinement. If not provided, the program will use whatever directories \
were specified.
rmsd_filter {
enable = True
.type = bool
.help = If enabled, between each round of hierarchical refinement, filter \
the images by positional RMSD
iqr_multiplier = 1.5
.type = float
.help = Interquartile multiplier
}
n_subset_method = *random n_refl significance_filter
.type = choice
.help = Algorithm to be used for choosing the n_subset images/experiments for \
refinement. n_refl chooses the set with the largest numbers of reflections \
listed in the reflection table files, thus giving maximal coverage of the detector tiles \
with the fewest refineable parameters. Significance_filter chooses the subset of \
images with maximum reflections above an I/sigI cutoff
n_refl_panel_list = None
.type = ints
.help = If n_subset_method is n_refl, specify which panels to search on.
panel_filter = None
.type = ints
.help = Specify a list of panels to include during refinement. Default (None) is to use \
all panels.
output_lcls_geometry = True
.type = bool
.help = If True, convert final refined geometry to LCLS format
""", process_includes=True)
refine_defaults_scope = parse("""
output.include_unused_reflections=False
refinement {
refinery.engine = SparseLevMar
parameterisation {
beam.fix=all
auto_reduction {
action=remove
min_nref_per_parameter=3
}
}
reflections {
outlier {
algorithm=sauter_poon
separate_panels=True
separate_experiments=False
}
}
}
""")
def is_even(filename):
import re
return int(re.findall(r'\d+', filename)[-1][-1]) % 2 == 0
refine_scope = parse("""
include scope dials.command_line.refine.phil_scope
""", process_includes=True)
def run(args):
print("Parsing input...")
if "-c" in args or "-h" in args or "--help" in args:
phil_scope.show(attributes_level=2)
return
user_phil = []
paths = []
refine_phil_file = None
for arg in args:
if os.path.isfile(arg):
try:
if os.path.splitext(arg)[1] == ".phil":
refine_phil_file = arg
continue
except Exception as e:
raise Sorry("Unrecognized file %s"%arg)
if os.path.isdir(arg):
paths.append(arg)
else:
try:
user_phil.append(parse(arg))
except Exception as e:
raise Sorry("Unrecognized argument: %s"%arg)
params = phil_scope.fetch(sources=user_phil).extract()
merged_scope = refine_scope.fetch(refine_defaults_scope)
if refine_phil_file is not None:
merged_scope = merged_scope.fetch(parse(file_name = refine_phil_file))
print("Gathering file names...")
all_exp = []
all_ref = []
if params.data_phil is None:
for path in paths:
exp, ref = find_files(path, params.reflections)
all_exp.extend(exp)
all_ref.extend(ref)
if params.split_dataset:
even_exp = []
odd_exp = []
even_ref = []
odd_ref = []
for exp, ref in zip(all_exp, all_ref):
if is_even(exp):
even_exp.append(exp)
even_ref.append(ref)
else:
odd_exp.append(exp)
odd_ref.append(ref)
base_tag = params.tag
base_n_subset = params.n_subset
params.n_subset = base_n_subset // 2
params.tag = base_tag + "_1"
odd_combine_phil = write_combine_phil(params, odd_exp, odd_ref)
params.tag = base_tag + "_2"
even_combine_phil = write_combine_phil(params, even_exp, even_ref)
params.tag = base_tag
params.n_subset = base_n_subset
full_combine_phil = write_combine_phil(params, odd_exp+even_exp, odd_ref+even_ref)
print("Refining full dataset using tag", params.tag)
refine(params, merged_scope, full_combine_phil)
params.tag = base_tag + "_1"
print("Refining odd numbered data using tag", params.tag)
refine(params, merged_scope, odd_combine_phil)
params.tag = base_tag + "_2"
print("Refining even numbered data using tag", params.tag)
refine(params, merged_scope, even_combine_phil)
else:
combine_phil = write_combine_phil(params, all_exp, all_ref)
refine(params, merged_scope, combine_phil)
else:
assert len(paths) == 0
assert params.n_subset is None
print("Refining full dataset using tag", params.tag)
refine(params, merged_scope, params.data_phil)
if params.split_dataset:
input_scope = parse("""
input {
experiments = None
.type = str
.multiple = True
.help = "The experiment list file path"
reflections = None
.type = str
.multiple = True
.help = "The reflection table file path"
}
""")
input_params = input_scope.fetch(parse(file_name = params.data_phil)).extract()
even_exp = []
odd_exp = []
even_ref = []
odd_ref = []
for f in input_params.input.experiments:
if is_even(f):
even_exp.append(f)
else:
odd_exp.append(f)
for f in input_params.input.reflections:
if is_even(f):
even_ref.append(f)
else:
odd_ref.append(f)
base_tag = params.tag
params.tag = base_tag + "_1"
odd_combine_phil = write_combine_phil(params, odd_exp, odd_ref)
params.tag = base_tag + "_2"
even_combine_phil = write_combine_phil(params, even_exp, even_ref)
params.tag = base_tag + "_1"
print("Refining odd numbered data using tag", params.tag)
refine(params, merged_scope, odd_combine_phil)
params.tag = base_tag + "_2"
print("Refining even numbered data using tag", params.tag)
refine(params, merged_scope, even_combine_phil)
def find_files(path, reflections):
all_exp = []
all_ref = []
for filename in os.listdir(path):
if reflections in filename:
extension = os.path.splitext(filename)[1]
if extension not in ['.pickle', '.mpack', '.refl']: continue
if extension == ".pickle":
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_refined_experiments.json")
else:
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_refined.expt")
if not os.path.exists(exp_path):
if extension == ".pickle":
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_experiments.json")
else:
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_indexed.expt")
if not os.path.exists(exp_path): continue
all_exp.append(exp_path)
all_ref.append(os.path.join(path, filename))
return all_exp, all_ref
def write_combine_phil(params, all_exp, all_ref):
combine_phil = "%s_combine.phil"%params.tag
f = open(combine_phil, 'w')
for exp_path, ref_path in zip(all_exp, all_ref):
f.write("input {\n")
f.write(" experiments = %s\n"%exp_path)
f.write(" reflections = %s\n"%ref_path)
f.write("}\n")
f.close()
return combine_phil
def refine(params, merged_scope, combine_phil):
print("Combining experiments...")
command = "dials.combine_experiments reference_from_experiment.average_detector=True reference_from_experiment.average_hierarchy_level=0 output.experiments_filename=%s_combined.expt output.reflections_filename=%s_combined.refl %s"%(params.tag, params.tag, combine_phil)
if params.n_subset is not None:
command += " n_subset=%d n_subset_method=%s"%(params.n_subset, params.n_subset_method)
if params.n_refl_panel_list is not None:
command += " n_refl_panel_list=%s"%(",".join(["%d"%p for p in params.n_refl_panel_list]))
if params.refine_energy:
command += " reference_from_experiment.beam=0"
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
if params.method == 'hierarchical':
refine_hierarchical(params, merged_scope, combine_phil)
elif params.method == 'expanding':
refine_expanding(params, merged_scope, combine_phil)
def refine_hierarchical(params, merged_scope, combine_phil):
if params.panel_filter is not None:
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter])))
combined_path = "%s_combined.refl"%params.tag
data = easy_pickle.load(combined_path)
sel = None
for panel_id in params.panel_filter:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(combined_path, data.select(sel))
for i in range(params.start_at_hierarchy_level, params.refine_to_hierarchy_level+1):
if params.rmsd_filter.enable:
input_name = "filtered"
else:
if i == params.start_at_hierarchy_level:
input_name = "combined"
else:
input_name = "refined"
if params.rmsd_filter.enable:
command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
if i == params.start_at_hierarchy_level:
command = command%("%s_combined.expt"%params.tag, "%s_combined.refl"%params.tag,
"%s_filtered.expt"%params.tag, "%s_filtered.refl"%params.tag)
else:
command = command%("%s_refined_level%d.expt"%(params.tag, i-1), "%s_refined_level%d.refl"%(params.tag, i-1),
"%s_filtered_level%d.expt"%(params.tag, i-1), "%s_filtered_level%d.refl"%(params.tag, i-1))
command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
print("Refining at hierarchy level", i)
refine_phil_file = "%s_refine_level%d.phil"%(params.tag, i)
if i == 0:
fix_list = ['Tau1'] # fix detector rotz
if not params.refine_distance:
fix_list.append('Dist')
if params.flat_refinement:
fix_list.extend(['Tau2','Tau3'])
diff_phil = "refinement.parameterisation.detector.fix_list=%s\n"%",".join(fix_list)
if params.refine_energy:
diff_phil += " refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
else:
# Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
if params.flat_refinement and params.flat_refinement_with_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
elif params.flat_refinement:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything
if i == params.start_at_hierarchy_level:
command = "dials.refine %s %s_%s.expt %s_%s.refl"%(refine_phil_file, params.tag, input_name, params.tag, input_name)
else:
command = "dials.refine %s %s_%slevel%d.expt %s_%s_level%d.refl"%(refine_phil_file, params.tag, input_name, i-1, params.tag, input_name, i-1)
diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i
command += " output.experiments=%s_refined_level%d.expt output.reflections=%s_refined_level%d.refl"%( \
params.tag, i, params.tag, i)
scope = merged_scope.fetch(parse(diff_phil))
f = open(refine_phil_file, 'w')
f.write(refine_scope.fetch_diff(scope).as_str())
f.close()
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
output_geometry(params)
def refine_expanding(params, merged_scope, combine_phil):
assert params.start_at_hierarchy_level == 0
if params.rmsd_filter.enable:
input_name = "filtered"
command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
command = command%("%s_combined.expt"%params.tag, "%s_combined.refl"%params.tag,
"%s_filtered.expt"%params.tag, "%s_filtered.refl"%params.tag)
command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
else:
input_name = "combined"
# --------------------------
if params.panel_filter is not None:
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter])))
combined_path = "%s_combined.refl"%params.tag
data = easy_pickle.load(combined_path)
sel = None
for panel_id in params.panel_filter:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(combined_path, data.select(sel))
# ----------------------------------
# this is the order to refine the CSPAD in
steps = {}
steps[0] = [2, 3]
steps[1] = steps[0] + [0, 1]
steps[2] = steps[1] + [14, 15]
steps[3] = steps[2] + [6, 7]
steps[4] = steps[3] + [4, 5]
steps[5] = steps[4] + [12, 13]
steps[6] = steps[5] + [8, 9]
steps[7] = steps[6] + [10, 11]
for s, panels in six.iteritems(steps):
rest = []
for p in panels:
rest.append(p+16)
rest.append(p+32)
rest.append(p+48)
panels.extend(rest)
levels = {0: (0,1)} # levels 0 and 1
for i in range(7):
levels[i+1] = (2,) # level 2
previous_step_and_level = None
for j in range(8):
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in steps[j]])))
combined_path = "%s_%s.refl"%(params.tag, input_name)
output_path = "%s_step%d.refl"%(params.tag, j)
data = easy_pickle.load(combined_path)
sel = None
for panel_id in steps[j]:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(output_path, data.select(sel))
for i in levels[j]:
print("Step", j , "refining at hierarchy level", i)
refine_phil_file = "%s_refine_step%d_level%d.phil"%(params.tag, j, i)
if i == 0:
if params.refine_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Tau1" # fix detector rotz
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1" # fix detector rotz, distance
if params.flat_refinement:
diff_phil += ",Tau2,Tau3" # Also fix x and y rotations
diff_phil += "\n"
if params.refine_energy:
diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
else:
# Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
if params.flat_refinement and params.flat_refinement_with_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
elif params.flat_refinement:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything
if previous_step_and_level is None:
command = "dials.refine %s %s_%s.expt %s_step%d.refl"%( \
refine_phil_file, params.tag, input_name, params.tag, j)
else:
p_step, p_level = previous_step_and_level
if p_step == j:
command = "dials.refine %s %s_refined_step%d_level%d.expt %s_refined_step%d_level%d.refl"%( \
refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
else:
command = "dials.refine %s %s_refined_step%d_level%d.expt %s_step%d.refl"%( \
refine_phil_file, params.tag, p_step, p_level, params.tag, j)
diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i
output_experiments = "%s_refined_step%d_level%d.expt"%(params.tag, j, i)
command += " output.experiments=%s output.reflections=%s_refined_step%d_level%d.refl"%( \
output_experiments, params.tag, j, i)
scope = merged_scope.fetch(parse(diff_phil))
f = open(refine_phil_file, 'w')
f.write(refine_scope.fetch_diff(scope).as_str())
f.close()
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
# In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
# panels will have been left behind. Read back the new metrology, compute the shift applied to the panels refined
# in this step,and apply that shift to the unrefined panels in this step
if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
from dxtbx.model.experiment_list import ExperimentListFactory
from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
from scitbx.array_family import flex
from scitbx.matrix import col
from libtbx.test_utils import approx_equal
experiments = ExperimentListFactory.from_json_file(output_experiments, check_format=False)
assert len(experiments.detectors()) == 1
detector = experiments.detectors()[0]
# Displacements: deltas along the vector normal to the detector
displacements = flex.double()
# Iterate through the panel groups at this level
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
# Were there panels refined in this step in this panel group?
if params.panel_filter:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group) if list(detector).index(panel) in params.panel_filter]
else:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
if not any(test): continue
# Compute the translation along the normal of this panel group. This is defined as distance in dials.refine
displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))
# Even though the panels are constrained to move the same amount, there is a bit a variation.
stats = flex.mean_and_variance(displacements)
displacement = stats.mean()
print("Average displacement along normals: %f +/- %f"%(stats.mean(), stats.unweighted_sample_standard_deviation()))
# Verify the variation isn't significant
for k in range(1, len(displacements)):
assert approx_equal(displacements[0], displacements[k])
# If all of the panel groups in this level moved, no need to do anything.
if len(displacements) != len(list(iterate_detector_at_level(detector.hierarchy(), 0, i))):
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
if params.panel_filter:
test = [list(detector).index(panel) in steps[j] and list(detector).index(panel) in params.panel_filter for panel in iterate_panels(panel_group)]
else:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
# If any of the panels in this panel group moved, no need to do anything
if any(test): continue
# None of the panels in this panel group moved in this step, so need to apply displacement from other panel
# groups at this level
fast = col(panel_group.get_local_fast_axis())
slow = col(panel_group.get_local_slow_axis())
ori = col(panel_group.get_local_origin())
normal = fast.cross(slow)
panel_group.set_local_frame(fast, slow, (ori.dot(fast)*fast) + (ori.dot(slow)*slow) + (normal*displacement))
# Check the new displacements. Should be the same across all panels.
displacements = []
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))
for k in range(1, len(displacements)):
assert approx_equal(displacements[0], displacements[k])
experiments.as_file(output_experiments)
previous_step_and_level = j,i
output_geometry(params)
def output_geometry(params):
print("Creating files to deploy to psana calibration directory...")
if params.refine_to_hierarchy_level > 2:
deploy_level = 2
else:
deploy_level = params.refine_to_hierarchy_level
if params.method == 'hierarchical':
command = "cxi.experiment_json_to_cbf_def %s_refined_level%d.expt output_def_file=%s_refined_detector_level%d.def"%(params.tag, deploy_level, params.tag, deploy_level)
elif params.method == 'expanding':
command = "cxi.experiment_json_to_cbf_def %s_refined_step7_level%d.expt output_def_file=%s_refined_detector_level%d.def"%(params.tag, deploy_level, params.tag, deploy_level)
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
if params.output_lcls_geometry:
command = "cxi.cbfheader2slaccalib cbf_header=%s_refined_detector_level%d.def out_metrology_file=0-end.data.%s"%(params.tag, deploy_level, params.tag)
print(command)
result = easy_run.fully_buffered(command=command)
errmsg = "\n".join(result.stderr_lines)
if "ImportError" in errmsg and "PSCalib.GeometryAccess" in errmsg:
print("Not converting to LCLS geometry as PSDM is not available")
print("Done.")
else:
result.raise_if_errors()
result.show_stdout()
print("Done. Soft link 0-end.data.%s to 0-end.data in the geometry folder of your calibration folder for your experiment to deploy this metrology."%params.tag)
if __name__ == "__main__":
run(sys.argv[1:])
| 43.005093
| 271
| 0.680261
|
51bc60a8feccf33a991c03ff55e36d810ebbf8fa
| 597
|
py
|
Python
|
scripts/todo.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
scripts/todo.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
scripts/todo.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
def _to_plot(data):
gridstyle = {"xgrid_line_color": None}
curve_opts = opts.Curve( # pylint: disable=no-member
line_width=4, responsive=True, color=get_color_cycle(),
)
group_by = []
if len(data.ElementName.unique()) > 1:
group_by.append("Element")
if len(data.InstanceName.unique()) > 1:
group_by.append("Instance")
return (
data.rename(columns={"ElementName": "Element", "InstanceName": "Instance"})
.hvplot(x="Datetime", y="Value", by=group_by)
.opts(curve_opts,)
.opts(show_grid=True)
)
| 35.117647
| 84
| 0.599665
|
cba3e4831238649cdd6a95ba07257b484343a69b
| 2,277
|
py
|
Python
|
cactusbot/sepal.py
|
CactusBot/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 23
|
2016-02-16T05:09:11.000Z
|
2016-09-20T14:22:51.000Z
|
cactusbot/sepal.py
|
Alkali-Metal/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 190
|
2016-09-30T05:31:59.000Z
|
2018-12-22T08:46:49.000Z
|
cactusbot/sepal.py
|
Alkali-Metal/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 16
|
2016-10-09T16:51:48.000Z
|
2017-10-25T05:29:10.000Z
|
"""Interact with Sepal."""
import json
import logging
from .packets import MessagePacket, Packet
from .services.websocket import WebSocket
class Sepal(WebSocket):
"""Interact with Sepal."""
def __init__(self, channel, service=None):
super().__init__("wss://cactus.exoz.one/sepal")
self.logger = logging.getLogger(__name__)
self.channel = channel
self.service = service
self.parser = SepalParser()
async def send(self, packet_type, **kwargs):
"""Send a packet to Sepal."""
packet = {
"type": packet_type,
"channel": self.channel
}
packet.update(kwargs)
await super().send(json.dumps(packet))
async def initialize(self):
"""Send a subscribe packet."""
await self.send("subscribe")
async def parse(self, packet):
"""Parse a Sepal packet."""
try:
packet = json.loads(packet)
except (TypeError, ValueError):
self.logger.exception("Invalid JSON: %s.", packet)
return None
else:
self.logger.debug(packet)
return packet
async def handle(self, packet):
"""Convert a JSON packet to a CactusBot packet."""
assert self.service is not None, "Must have a service to handle"
if "event" not in packet:
return
event = packet["event"]
if not hasattr(self.parser, "parse_" + event.lower()):
return
data = await getattr(self.parser, "parse_" + event)(packet)
if data is None:
return
if isinstance(data, (list, tuple)):
for packet in data:
await self.service.handle(event, packet)
else:
await self.service.handle(event, data)
class SepalParser:
"""Parse Sepal packets."""
async def parse_repeat(self, packet):
"""Parse the incoming repeat packets."""
if "response" in packet["data"]:
return MessagePacket.from_json(packet["data"]["response"])
async def parse_config(self, packet):
"""Parse the incoming config packets."""
return [Packet("config", key=key, values=values)
for key, values in packet["data"].items()]
| 25.58427
| 72
| 0.582784
|
d40e72305aafcb13d79fe80a23f9c3f28fa9c502
| 3,193
|
py
|
Python
|
indico/util/roles.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 1
|
2018-11-12T21:29:26.000Z
|
2018-11-12T21:29:26.000Z
|
indico/util/roles.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 9
|
2020-09-08T09:25:57.000Z
|
2022-01-13T02:59:05.000Z
|
indico/util/roles.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 3
|
2020-07-20T09:09:44.000Z
|
2020-10-19T00:29:49.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import csv
from flask import flash, session
from indico.core.errors import UserValueError
from indico.modules.events.roles.forms import ImportMembersCSVForm
from indico.modules.users import User
from indico.util.i18n import _, ngettext
from indico.util.string import to_unicode, validate_email
from indico.web.flask.templating import get_template_module
from indico.web.util import jsonify_data, jsonify_template
class ImportRoleMembersMixin(object):
"""Import members from a CSV file into a role."""
logger = None
def import_members_from_csv(self, f):
reader = csv.reader(f.read().splitlines())
emails = set()
for num_row, row in enumerate(reader, 1):
if len(row) != 1:
raise UserValueError(_('Row {}: malformed CSV data').format(num_row))
email = to_unicode(row[0]).strip().lower()
if email and not validate_email(email):
raise UserValueError(_('Row {row}: invalid email address: {email}').format(row=num_row, email=email))
if email in emails:
raise UserValueError(_('Row {}: email address is not unique').format(num_row))
emails.add(email)
users = set(User.query.filter(~User.is_deleted, User.all_emails.in_(emails)))
users_emails = {user.email for user in users}
unknown_emails = emails - users_emails
new_members = users - self.role.members
return new_members, users, unknown_emails
def _process(self):
form = ImportMembersCSVForm()
if form.validate_on_submit():
new_members, users, unknown_emails = self.import_members_from_csv(form.source_file.data)
if form.remove_existing.data:
deleted_members = self.role.members - users
for member in deleted_members:
self.logger.info('User {} removed from role {} by {}'.format(member, self.role, session.user))
self.role.members = users
else:
self.role.members |= users
for user in new_members:
self.logger.info('User {} added to role {} by {}'.format(user, self.role, session.user))
flash(ngettext("{} member has been imported.",
"{} members have been imported.",
len(users)).format(len(users)), 'success')
if unknown_emails:
flash(ngettext("There is no user with this email address: {}",
"There are no users with these email addresses: {}",
len(unknown_emails)).format(', '.join(unknown_emails)), 'warning')
tpl = get_template_module('events/roles/_roles.html')
return jsonify_data(html=tpl.render_role(self.role, collapsed=False, email_button=False))
return jsonify_template('events/roles/import_members.html', form=form, role=self.role)
| 43.739726
| 117
| 0.639524
|
ab74115d134f7f85741b18c9cdffb66c814c52ba
| 639
|
py
|
Python
|
tests/unicamp/test_uni_2009p1f2_olimpiadas.py
|
hiroshisiq/AEDlogis2021
|
0e153bd52f2b6cff465d4ffab31bfc28d003b24f
|
[
"MIT"
] | 1
|
2021-06-03T22:46:10.000Z
|
2021-06-03T22:46:10.000Z
|
tests/unicamp/test_uni_2009p1f2_olimpiadas.py
|
hiroshisiq/AEDlogis2021
|
0e153bd52f2b6cff465d4ffab31bfc28d003b24f
|
[
"MIT"
] | 8
|
2021-05-02T00:47:32.000Z
|
2021-05-15T23:51:44.000Z
|
tests/unicamp/test_uni_2009p1f2_olimpiadas.py
|
hiroshisiq/problem_solving
|
0e153bd52f2b6cff465d4ffab31bfc28d003b24f
|
[
"MIT"
] | null | null | null |
import pytest
from tests.run_script import run_script, read_text
RESOURCES_PATH = './tests/unicamp/resources/2009p1f2'
EXECUTABLE_PATH = './problems/unicamp/uni_2009p1f2_olimpiadas.py'
@pytest.mark.parametrize(
'input_path, output_path',
[(f'{RESOURCES_PATH}/case_1.in', f'{RESOURCES_PATH}/case_1.out'),
(f'{RESOURCES_PATH}/case_2.in', f'{RESOURCES_PATH}/case_2.out'),
(f'{RESOURCES_PATH}/case_3.in', f'{RESOURCES_PATH}/case_3.out')]
)
def test_2009p1f2_olimpiadas(input_path: str, output_path: str):
got = run_script(EXECUTABLE_PATH, input_path)
expected = read_text(output_path)
assert got == expected
| 31.95
| 69
| 0.741784
|
5313989d1617583462f31456541a3b466a6dcb38
| 3,285
|
py
|
Python
|
WebDev/2021_05_14_WebClass-7/project1/project1/settings.py
|
Arc29/2021-22-Classes
|
03439d1e70a050758e9f698036a92110cf63cf71
|
[
"MIT"
] | 458
|
2021-04-20T10:24:39.000Z
|
2022-03-31T11:37:35.000Z
|
WebDev/2021_05_14_WebClass-7/project1/project1/settings.py
|
Shineri/2021-22-Classes
|
9dd13c9f896ec83eb14170500fedeb20d416b1ba
|
[
"MIT"
] | 5
|
2021-04-29T02:04:15.000Z
|
2021-11-26T01:40:35.000Z
|
WebDev/2021_05_14_WebClass-7/project1/project1/settings.py
|
Shineri/2021-22-Classes
|
9dd13c9f896ec83eb14170500fedeb20d416b1ba
|
[
"MIT"
] | 59
|
2021-04-19T19:37:27.000Z
|
2022-03-18T08:58:44.000Z
|
"""
Django settings for project1 project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-9n)j_emh+^tzs$vngo8*t^iq#2*b_gp-h5a0^5q3+9^_$%#7s6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.664063
| 91
| 0.701065
|
9eef9ccc8b8b776d3cb58f97fad65b975615b67a
| 1,318
|
py
|
Python
|
tests/cupyx_tests/scipy_tests/linalg_tests/test_matfuncs.py
|
ameyachawla99/cupy
|
43d78a7d6fbbe0d9ca35177a14da31a483a827ef
|
[
"MIT"
] | null | null | null |
tests/cupyx_tests/scipy_tests/linalg_tests/test_matfuncs.py
|
ameyachawla99/cupy
|
43d78a7d6fbbe0d9ca35177a14da31a483a827ef
|
[
"MIT"
] | null | null | null |
tests/cupyx_tests/scipy_tests/linalg_tests/test_matfuncs.py
|
ameyachawla99/cupy
|
43d78a7d6fbbe0d9ca35177a14da31a483a827ef
|
[
"MIT"
] | null | null | null |
from cupyx.scipy.linalg.matfuncs import sinhm
import cupy
import unittest
class TestMatFuncsNan(unittest.TestCase):
def test_sinhm_values(self):
# Testing whether it is producing right output or not
z = cupy.zeros((2, 2))
z[0][0] = 1
z[0][1] = 2
z[1][0] = 3
z[1][1] = 1
r = cupy.zeros((2, 2))
r[0][0] = 1.1752011936438014
r[0][1] = 3.626860407847019
r[1][0] = 10.017874927409903
r[1][1] = 1.1752011936438014
z = sinhm(z)
for i in range(z.shape[0]):
for j in range(z.shape[1]):
assert r[i][j] == z[i][j]
def test_values_nan(self):
# Testing for nan values in matrix
r = cupy.zeros((3, 3))
r.fill(cupy.nan)
self.assertRaises(ValueError, sinhm, r)
def test_values_inf(self):
# Testing for nan values in matrix
z = cupy.zeros((2, 2))
z[0][0] = cupy.inf
self.assertRaises(ValueError, sinhm, z)
def test_shape(self):
# Testing for matrix shape
k = cupy.zeros((3, 2))
self.assertRaises(ValueError, sinhm, k)
def test_dimension_count(self):
# Testing whether it is as 2D array or not
g = cupy.zeros((3, 3, 3))
self.assertRaises(ValueError, sinhm, g)
| 25.843137
| 61
| 0.556904
|
ad11af8e674ca148b7c367c7d7b265deb76eccad
| 590
|
py
|
Python
|
solutions/basics2/cookie.py
|
mrparkonline/py_basics
|
821d388e23cebdb0ac6c741a67d2c7d336ec717e
|
[
"MIT"
] | null | null | null |
solutions/basics2/cookie.py
|
mrparkonline/py_basics
|
821d388e23cebdb0ac6c741a67d2c7d336ec717e
|
[
"MIT"
] | null | null | null |
solutions/basics2/cookie.py
|
mrparkonline/py_basics
|
821d388e23cebdb0ac6c741a67d2c7d336ec717e
|
[
"MIT"
] | null | null | null |
# Cookie Selling Program
# input
start = float(input('Enter your starting money: '))
num_cookies = input('Enter the number of cookies sold: ')
num_big = input('Enter the number of big cookies sold: ')
# processing
total_cookies = len(num_cookies) + len(num_big)
profit_cookies = len(num_cookies) * 1.25 - len(num_cookies) * 0.50
profit_big = len(num_big) * 2 - len(num_big) * 0.75
total_profit = profit_cookies + profit_big
total_money = start + total_profit
# output
print('We sold', total_cookies, 'cookies.')
print('Our profit was:', total_profit)
print('We now have:', total_money)
| 29.5
| 66
| 0.730508
|
2cd3e8f05c04f91070afdb27f8f87f1c2c7af6e3
| 1,261
|
py
|
Python
|
streamlit/GroMoPo_app.py
|
Gromopo/GroMoPo
|
dfe40ebb429762403dbd249593bf00b22255efc0
|
[
"CC0-1.0"
] | null | null | null |
streamlit/GroMoPo_app.py
|
Gromopo/GroMoPo
|
dfe40ebb429762403dbd249593bf00b22255efc0
|
[
"CC0-1.0"
] | 20
|
2021-11-12T15:23:33.000Z
|
2022-02-02T08:49:00.000Z
|
streamlit/GroMoPo_app.py
|
Gromopo/GroMoPo
|
dfe40ebb429762403dbd249593bf00b22255efc0
|
[
"CC0-1.0"
] | 1
|
2021-11-03T16:31:28.000Z
|
2021-11-03T16:31:28.000Z
|
import streamlit as st
# Our app libs - possibly move to own folder
# from utils import helpers
from utils.multipage import MultiPage
from pages import home, about, submit_model, model_finder
from pathlib import Path
import platform
#st.set_page_config(layout="wide")
st.sidebar.title('Navigation')
app = MultiPage()
app.add_page("Home", home.app) # Name, Function
app.add_page("Model Finder", model_finder.app)
app.add_page("Submit Model", submit_model.app)
app.add_page("About", about.app)
app.run()
st.sidebar.title("Contribute")
st.sidebar.info("This an open source project and you are very welcome to **contribute** your awesome comments,"
" questions, resources and groundwater models to the source code")
st.sidebar.title("About")
st.sidebar.info("This app is maintained and argued on by the GroMoPo mob")
# FIXME this should work independant of the system we are on. Make sure this works on all platforms including streamlit.io
if platform.system() == 'Windows':
main_path = Path(".")
else:
main_path = Path("streamlit")
img_path = main_path.joinpath('pages','img','GroMoPo_logo_V1.png')
st.sidebar.image(str(img_path), caption=None, width=None, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
| 34.081081
| 131
| 0.750991
|
05ec71370706982a61f732cc1b88e363a77924ea
| 2,754
|
py
|
Python
|
tests/core/full_node/test_sync_store.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | 2
|
2021-06-29T14:05:41.000Z
|
2021-07-15T19:28:26.000Z
|
tests/core/full_node/test_sync_store.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | 31
|
2021-06-26T23:11:46.000Z
|
2022-03-29T00:12:30.000Z
|
tests/core/full_node/test_sync_store.py
|
Storch-Network/chialite
|
587fc53e8ef452e07c6f3f266f58962d065feb5c
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import pytest
from chialite.full_node.sync_store import SyncStore
from chialite.util.hash import std_hash
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestStore:
@pytest.mark.asyncio
async def test_basic_store(self):
store = await SyncStore.create()
await SyncStore.create()
# Save/get sync
for sync_mode in (False, True):
store.set_sync_mode(sync_mode)
assert sync_mode == store.get_sync_mode()
# clear sync info
await store.clear_sync_info()
store.set_peak_target(std_hash(b"1"), 100)
assert store.get_sync_target_hash() == std_hash(b"1")
assert store.get_sync_target_height() == 100
peer_ids = [std_hash(bytes([a])) for a in range(3)]
assert store.get_peers_that_have_peak([]) == set()
assert store.get_peers_that_have_peak([std_hash(b"block1")]) == set()
assert store.get_heaviest_peak() is None
assert len(store.get_peak_of_each_peer()) == 0
store.peer_has_block(std_hash(b"block10"), peer_ids[0], 500, 10, True)
store.peer_has_block(std_hash(b"block1"), peer_ids[0], 300, 1, False)
store.peer_has_block(std_hash(b"block1"), peer_ids[1], 300, 1, True)
store.peer_has_block(std_hash(b"block10"), peer_ids[2], 500, 10, False)
store.peer_has_block(std_hash(b"block1"), peer_ids[2], 300, 1, False)
assert store.get_heaviest_peak()[0] == std_hash(b"block10")
assert store.get_heaviest_peak()[1] == 10
assert store.get_heaviest_peak()[2] == 500
assert len(store.get_peak_of_each_peer()) == 2
store.peer_has_block(std_hash(b"block1"), peer_ids[2], 500, 1, True)
assert len(store.get_peak_of_each_peer()) == 3
assert store.get_peak_of_each_peer()[peer_ids[0]][2] == 500
assert store.get_peak_of_each_peer()[peer_ids[1]][2] == 300
assert store.get_peak_of_each_peer()[peer_ids[2]][2] == 500
assert store.get_peers_that_have_peak([std_hash(b"block1")]) == set(peer_ids)
assert store.get_peers_that_have_peak([std_hash(b"block10")]) == {peer_ids[0], peer_ids[2]}
store.peer_disconnected(peer_ids[0])
assert store.get_heaviest_peak()[2] == 500
assert len(store.get_peak_of_each_peer()) == 2
assert store.get_peers_that_have_peak([std_hash(b"block10")]) == {peer_ids[2]}
store.peer_disconnected(peer_ids[2])
assert store.get_heaviest_peak()[2] == 300
store.peer_has_block(std_hash(b"block30"), peer_ids[0], 700, 30, True)
assert store.get_peak_of_each_peer()[peer_ids[0]][2] == 700
assert store.get_heaviest_peak()[2] == 700
| 39.913043
| 99
| 0.663399
|
a25042678bf8ba9cdf34ff3c081589d92238f6af
| 4,133
|
py
|
Python
|
src/github3/decorators.py
|
butsyk/github3.py
|
72fa5125fce75c916733839963554765c907e9e7
|
[
"BSD-3-Clause"
] | null | null | null |
src/github3/decorators.py
|
butsyk/github3.py
|
72fa5125fce75c916733839963554765c907e9e7
|
[
"BSD-3-Clause"
] | null | null | null |
src/github3/decorators.py
|
butsyk/github3.py
|
72fa5125fce75c916733839963554765c907e9e7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module provides decorators to the rest of the library."""
from functools import wraps
from requests.models import Response
import os
from io import BytesIO as StringIO
class RequestsStringIO(StringIO):
"""Shim compatibility for string IO."""
def read(self, n=-1, *args, **kwargs):
"""Ignore extra args and kwargs."""
# StringIO is an old-style class, so can't use super
return StringIO.read(self, n)
def requires_auth(func):
"""Decorator to note which object methods require authorization."""
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
if hasattr(self, "session") and self.session.has_auth():
return func(self, *args, **kwargs)
else:
from .exceptions import error_for
# Mock a 401 response
r = generate_fake_error_response(
'{"message": "Requires authentication"}'
)
raise error_for(r)
return auth_wrapper
def requires_basic_auth(func):
"""Specific (basic) authentication decorator.
This is used to note which object methods require username/password
authorization and won't work with token based authorization.
"""
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
if hasattr(self, "session") and self.session.auth:
return func(self, *args, **kwargs)
else:
from .exceptions import error_for
# Mock a 401 response
r = generate_fake_error_response(
'{"message": "Requires username/password authentication"}'
)
raise error_for(r)
return auth_wrapper
def requires_app_credentials(func):
"""Require client_id and client_secret to be associated.
This is used to note and enforce which methods require a client_id and
client_secret to be used.
"""
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
client_id, client_secret = self.session.retrieve_client_credentials()
if client_id and client_secret:
return func(self, *args, **kwargs)
else:
from .exceptions import error_for
# Mock a 401 response
r = generate_fake_error_response(
'{"message": "Requires username/password authentication"}'
)
raise error_for(r)
return auth_wrapper
def requires_app_bearer_auth(func):
"""Require the use of application authentication.
.. versionadded:: 1.2.0
"""
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
from . import session
if isinstance(self.session.auth, session.AppBearerTokenAuth):
return func(self, *args, **kwargs)
else:
from . import exceptions
raise exceptions.MissingAppBearerAuthentication(
"This method requires GitHub App authentication."
)
return auth_wrapper
def requires_app_installation_auth(func):
"""Require the use of App's installation authentication.
.. versionadded:: 1.2.0
"""
@wraps(func)
def auth_wrapper(self, *args, **kwargs):
from . import session
if isinstance(self.session.auth, session.AppInstallationTokenAuth):
return func(self, *args, **kwargs)
else:
from . import exceptions
raise exceptions.MissingAppInstallationAuthentication(
"This method requires GitHub App authentication."
)
return auth_wrapper
def generate_fake_error_response(msg, status_code=401, encoding="utf-8"):
"""Generate a fake Response from requests."""
r = Response()
r.status_code = status_code
r.encoding = encoding
r.raw = RequestsStringIO(msg.encode())
r._content_consumed = True
r._content = r.raw.read()
return r
# Use mock decorators when generating documentation, so all functino signatures
# are displayed correctly
if os.getenv("GENERATING_DOCUMENTATION", None) == "github3":
requires_auth = requires_basic_auth = lambda x: x # noqa # (No coverage)
| 28.115646
| 79
| 0.638519
|
127b52e4cd8a53557a326b0657026cc90374cb43
| 102
|
py
|
Python
|
srs/process_data.py
|
kubekmonika/get-data-from-gios-api
|
5e27925527307ef72f189ecaa1422a8cc9e02c9e
|
[
"MIT"
] | null | null | null |
srs/process_data.py
|
kubekmonika/get-data-from-gios-api
|
5e27925527307ef72f189ecaa1422a8cc9e02c9e
|
[
"MIT"
] | null | null | null |
srs/process_data.py
|
kubekmonika/get-data-from-gios-api
|
5e27925527307ef72f189ecaa1422a8cc9e02c9e
|
[
"MIT"
] | null | null | null |
"""
This is a file which will contain functions to process the downloaded data
to desired format.
"""
| 20.4
| 74
| 0.754902
|
2cb4297e3effae797da7f649566bd71922e12914
| 4,244
|
py
|
Python
|
fedelemflowlist/subset_list.py
|
hottleta/Federal-LCA-Commons-Elementary-Flow-List
|
a5ba34acc2af66b802f07c1f59208af135027882
|
[
"CC0-1.0"
] | 1
|
2020-07-09T14:28:25.000Z
|
2020-07-09T14:28:25.000Z
|
fedelemflowlist/subset_list.py
|
hottleta/Federal-LCA-Commons-Elementary-Flow-List
|
a5ba34acc2af66b802f07c1f59208af135027882
|
[
"CC0-1.0"
] | 1
|
2021-03-18T14:27:28.000Z
|
2021-03-18T14:27:28.000Z
|
fedelemflowlist/subset_list.py
|
hottleta/Federal-LCA-Commons-Elementary-Flow-List
|
a5ba34acc2af66b802f07c1f59208af135027882
|
[
"CC0-1.0"
] | 1
|
2021-09-20T05:02:28.000Z
|
2021-09-20T05:02:28.000Z
|
# subset_list.py (fedelemflowlist)
# !/usr/bin/env python3
# coding=utf-8
"""
Functions to filter a flow list for subsets
Functions correspond with a subset names stored in the subsets dictionary
"""
import pandas as pd
from fedelemflowlist.globals import inputpath
subsets = {"freshwater_resources":"get_freshwater_resource_flows",
"water_resources":"get_water_resource_flows",
"land_use":"get_land_use_flows",
#"mineral_resources":"get_mineral_resource_flows",
#"energy":"get_energy_flows",
#"metal_emissions":"get_metal_emission_flows",
"HAP":"get_hazardous_air_pollutant_flows"}
inventory_unit = {"freshwater_resources":"kg",
"water_resources":"kg",
"land_use":"m2*a",
"mineral_resources":"kg",
"energy":"MJ",
"metal_emissions":"kg",
"HAP":"kg"}
def get_subsets() -> list():
"""
Returns a list of all availabile inventory subsets
return: list of inventory subsets
"""
list_of_inventories = list(subsets)
return list_of_inventories
def get_inventory_unit(subset):
"""
Returns the inventory unit for the selected subset
:param subset: dictionary key
return: (str) unit for inventory method.
"""
unit = inventory_unit[subset]
return unit
def get_freshwater_resource_flows(fl):
"""
Subsets the flow list for all freshwater resource flows,
excluding resource/air
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Flowable"]=="Water, fresh"]
flows = flows[flows["Context"].str.startswith("resource")]
flows = flows[~flows["Context"].str.startswith("resource/air")]
return flows
def get_water_resource_flows(fl):
"""
Subsets the flow list for all water resource flows,
excluding resource/air
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Flowable"].str.startswith("Water")]
flows = flows[flows["Context"].str.startswith("resource")]
flows = flows[~flows["Context"].str.startswith("resource/air")]
return flows
def get_land_use_flows(fl):
"""
Subsets the flow list for all land use resource flows
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Class"]=="Land"]
return flows
def get_mineral_resource_flows(fl):
"""
Subsets the flow list for all mineral resource flows
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Class"]=="Geological"]
flows = flows[flows["Context"].str.startswith("resource")]
flows = flows[~flows["Flowable"].str.contains(" ore")]
flows = flows[flows["Unit"]=="kg"]
return flows
def get_energy_flows(fl):
"""
Subsets the flow list for all energy flows
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Unit"]=="MJ"]
flows = flows[flows["Context"].str.startswith("resource")]
return flows
def get_metal_emission_flows(fl):
"""
Subsets the flow list for all emissions of metals
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Context"].str.startswith("emission")]
#TODO Update with list of metals
metals = ['Aluminum',
'Antimony']
flows = flows[flows['Flowable'].isin(metals)]
return flows
def get_hazardous_air_pollutant_flows(fl):
"""
Subsets the flow list for all HAP emissions based on list of flows from
EPA.
:param fl: df in standard flowlist format
:return: df in standard flowlist format
"""
flows = fl[fl["Context"].str.startswith("emission/air")]
haps = pd.read_csv(inputpath+'HAP_flows.csv', usecols=['Flowable'])
# HAPs sourced from EPA via script write_HAP_flows.py
# https://www.epa.gov/haps/initial-list-hazardous-air-pollutants-modifications
flows = flows[flows['Flowable'].isin(haps.Flowable)]
return flows
| 29.678322
| 82
| 0.653629
|
e2d0be367cd2dc8d71a655fb7a1df6038007fc3c
| 1,018
|
py
|
Python
|
intro/summary-exercises/examples/plot_cumulative_wind_speed_prediction.py
|
MarcvdSluys/scipy-lecture-notes
|
849762a741ba556a7765c9f2d90a10a7104dfccf
|
[
"CC-BY-4.0"
] | null | null | null |
intro/summary-exercises/examples/plot_cumulative_wind_speed_prediction.py
|
MarcvdSluys/scipy-lecture-notes
|
849762a741ba556a7765c9f2d90a10a7104dfccf
|
[
"CC-BY-4.0"
] | null | null | null |
intro/summary-exercises/examples/plot_cumulative_wind_speed_prediction.py
|
MarcvdSluys/scipy-lecture-notes
|
849762a741ba556a7765c9f2d90a10a7104dfccf
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Cumulative wind speed prediction
================================
Generate the image cumulative-wind-speed-prediction.png
for the interpolate section of scipy.rst.
"""
import numpy as np
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
max_speeds = np.load('max-speeds.npy')
years_nb = max_speeds.shape[0]
cprob = (np.arange(years_nb, dtype=np.float32) + 1)/(years_nb + 1)
sorted_max_speeds = np.sort(max_speeds)
speed_spline = UnivariateSpline(cprob, sorted_max_speeds)
nprob = np.linspace(0, 1, 100)
fitted_max_speeds = speed_spline(nprob)
fifty_prob = 1. - 0.02
fifty_wind = speed_spline(fifty_prob)
plt.figure()
plt.plot(sorted_max_speeds, cprob, 'o')
plt.plot(fitted_max_speeds, nprob, 'g--')
plt.plot([fifty_wind], [fifty_prob], 'o', ms=8., mfc='y', mec='y')
plt.text(30, 0.05, '$V_{50} = %.2f \, m/s$' % fifty_wind)
plt.plot([fifty_wind, fifty_wind], [plt.axis()[2], fifty_prob], 'k--')
plt.xlabel('Annual wind speed maxima [$m/s$]')
plt.ylabel('Cumulative probability')
| 30.848485
| 70
| 0.708251
|
6d90f549ff22982469410e82293d0fafafd0de6b
| 1,546
|
py
|
Python
|
djangoecommerce/catalog/models.py
|
roneysousa/djangoecommerce
|
9eacd2231ef2c6a54f57e0e2a6f1ffe04f5bfb4c
|
[
"MIT"
] | null | null | null |
djangoecommerce/catalog/models.py
|
roneysousa/djangoecommerce
|
9eacd2231ef2c6a54f57e0e2a6f1ffe04f5bfb4c
|
[
"MIT"
] | null | null | null |
djangoecommerce/catalog/models.py
|
roneysousa/djangoecommerce
|
9eacd2231ef2c6a54f57e0e2a6f1ffe04f5bfb4c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Category(models.Model):
"""docstring for """
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Identificador', max_length=100)
create = models.DateTimeField('Criado em', auto_now_add=True)
modified = models.DateTimeField('Modificado em', auto_now=True)
class Meta:
verbose_name = 'Categoria'
verbose_name_plural = 'Categorias'
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('catalog:category', kwargs={'slug':self.slug})
class Product(models.Model):
"""docstring for """
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Identificador', max_length=100)
category = models.ForeignKey('catalog.Category', verbose_name='Categoria')
description = models.TextField('Descrição', blank=True)
price = models.DecimalField('Preço', decimal_places=4, max_digits=15)
create = models.DateTimeField('Criado em', auto_now_add=True)
modified = models.DateTimeField('Modificado em', auto_now=True)
class Meta:
verbose_name = 'Produto'
verbose_name_plural = 'Produtos'
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('catalog:product', kwargs={'slug':self.slug})
| 32.893617
| 82
| 0.65718
|
4335ca804db98a79312141632b21c119bd8f09b2
| 10,587
|
py
|
Python
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/build/lib.linux-x86_64-2.7/vsmclient/v1/vsms.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 5
|
2017-03-21T09:11:55.000Z
|
2018-11-19T14:44:36.000Z
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/vsmclient/v1/vsms.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 3
|
2018-02-06T06:17:10.000Z
|
2020-07-10T17:29:47.000Z
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/build/lib.linux-x86_64-2.7/vsmclient/v1/vsms.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 7
|
2018-02-06T03:54:13.000Z
|
2021-09-08T10:51:38.000Z
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
import urllib
from vsmclient import base
class Volume(base.Resource):
"""A vsm is an extra block level storage to the OpenStack instances."""
def __repr__(self):
try:
return "<Volume: %s>" % self.id
except AttributeError:
return "<VSM: summary>"
def delete(self):
"""Delete this vsm."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the display_name or display_description for this vsm."""
self.manager.update(self, **kwargs)
def attach(self, instance_uuid, mountpoint):
"""Set attachment metadata.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
"""
return self.manager.attach(self, instance_uuid, mountpoint)
def detach(self):
"""Clear attachment metadata."""
return self.manager.detach(self)
def reserve(self, vsm):
"""Reserve this vsm."""
return self.manager.reserve(self)
def unreserve(self, vsm):
"""Unreserve this vsm."""
return self.manager.unreserve(self)
def begin_detaching(self, vsm):
"""Begin detaching vsm."""
return self.manager.begin_detaching(self)
def roll_detaching(self, vsm):
"""Roll detaching vsm."""
return self.manager.roll_detaching(self)
def initialize_connection(self, vsm, connector):
"""Initialize a vsm connection.
:param connector: connector dict from nova.
"""
return self.manager.initialize_connection(self, connector)
def terminate_connection(self, vsm, connector):
"""Terminate a vsm connection.
:param connector: connector dict from nova.
"""
return self.manager.terminate_connection(self, connector)
def set_metadata(self, vsm, metadata):
"""Set or Append metadata to a vsm.
:param type : The :class: `Volume` to set metadata on
:param metadata: A dict of key/value pairs to set
"""
return self.manager.set_metadata(self, metadata)
def upload_to_image(self, force, image_name, container_format,
disk_format):
"""Upload a vsm to image service as an image."""
self.manager.upload_to_image(self, force, image_name, container_format,
disk_format)
def force_delete(self):
"""Delete the specified vsm ignoring its current state.
:param vsm: The UUID of the vsm to force-delete.
"""
self.manager.force_delete(self)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None, source_volid=None,
display_name=None, display_description=None,
vsm_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None):
"""
Create a vsm.
:param size: Size of vsm in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the vsm
:param display_description: Description of the vsm
:param vsm_type: Type of vsm
:rtype: :class:`Volume`
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on vsm creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source vsm to clone from
"""
if metadata is None:
vsm_metadata = {}
else:
vsm_metadata = metadata
body = {'vsm': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'vsm_type': vsm_type,
'user_id': user_id,
'project_id': project_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'metadata': vsm_metadata,
'imageRef': imageRef,
'source_volid': source_volid,
}}
return self._create('/vsms', body, 'vsm')
def get(self, vsm_id):
"""
Get a vsm.
:param vsm_id: The ID of the vsm to delete.
:rtype: :class:`Volume`
"""
return self._get("/vsms/%s" % vsm_id, "vsm")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all vsms.
:rtype: list of :class:`Volume`
"""
print ' comes to list'
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in search_opts.iteritems():
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
ret = self._list("/conductor%s%s" % (detail, query_string),
"conductor")
return ret
def delete(self, vsm):
"""
Delete a vsm.
:param vsm: The :class:`Volume` to delete.
"""
self._delete("/vsms/%s" % base.getid(vsm))
def update(self, vsm, **kwargs):
"""
Update the display_name or display_description for a vsm.
:param vsm: The :class:`Volume` to delete.
"""
if not kwargs:
return
body = {"vsm": kwargs}
self._update("/vsms/%s" % base.getid(vsm), body)
def _action(self, action, vsm, info=None, **kwargs):
"""
Perform a vsm "action."
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/vsms/%s/action' % base.getid(vsm)
return self.api.client.post(url, body=body)
def host_status(self, req=None):
"""
Perform a vsm "action."
"""
body = {'request': req}
url = '/conductor/host_status'
return self.api.client.post(url, body=body)
def create_storage_pool(self, body):
"""
create a storage pool
"""
url = '/storage_pool/create'
return self.api.client.post(url, body=body)
def get_storage_group_list(self):
url = '/storage_pool/get_storage_group_list'
return self.api.client.get(url)
def get_pool_size_list(self):
url = '/storage_pool/get_pool_size_list'
return self.api.client.get(url)
def list_storage_pool(self, req=None, search_opts=None):
"""
Perform a vsm "action."
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in search_opts.iteritems():
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
#body = {'request': req}
url = "/storage_pool/list_storage_pool%s" % (query_string)
return self.api.client.get(url)
#serer_api
def get_server_list(self, req=None):
"""
host list
"""
url = "/cluster/servers"
return self.api.client.get(url)
def add_servers(self, req=None, opts=None):
"""
add servers
"""
url = "/cluster/servers/add"
return self.api.client.post(url, body=opts)
def remove_servers(request, opts=None):
"""
remove servers
"""
url = "/cluster/servers/del"
return self.api.client.post(url, body=opts)
#zone_api
def get_zone_list(self, req=None):
"""
get zone list
"""
url = "/cluster/zones"
return self.api.client.get(url)
def create_zone(self, req=None, opts=None):
"""
create a zone
"""
url = "/cluster/zones/add"
return self.api.client.post(url, body=opts)
#cluster list
def get_cluster_list(self, req=None):
"""
get cluster list
"""
url = "/clusters"
return self.api.client.get(url)
def create_cluster(self, req=None, opts=None):
"""
create cluster
"""
url = "/clusters"
return self.api.client.post(url, body=opts)
def resource_info(self, req=None):
"""
Perform a vsm "action."
"""
body = {'request': req}
url = '/conductor/resource_info'
return self.api.client.post(url, body=body)
def asm_settings(self, req=None):
"""
Perform a vsm "action."
"""
body = {'request': req}
url = '/conductor/asm_settings'
return self.api.client.post(url, body=body)
def initialize_connection(self, vsm, connector):
"""
Initialize a vsm connection.
:param vsm: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
return self._action('os-initialize_connection', vsm,
{'connector': connector})[1]['connection_info']
def terminate_connection(self, vsm, connector):
"""
Terminate a vsm connection.
:param vsm: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
self._action('os-terminate_connection', vsm,
{'connector': connector})
def summary(self):
"""
summary
"""
url = "/vsms/summary"
return self._get(url, 'vsm-summary')
| 29.738764
| 79
| 0.565788
|
dc45636a5042cba997a059f022aaace695498a5f
| 1,843
|
py
|
Python
|
thefuck/rules/cd_correction.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 75,504
|
2015-04-08T18:22:19.000Z
|
2022-03-31T23:59:52.000Z
|
thefuck/rules/cd_correction.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 1,160
|
2015-04-17T18:47:12.000Z
|
2022-03-30T20:42:26.000Z
|
thefuck/rules/cd_correction.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 4,399
|
2015-04-17T18:36:04.000Z
|
2022-03-31T07:01:03.000Z
|
"""Attempts to spellcheck and correct failed cd commands"""
import os
import six
from thefuck.specific.sudo import sudo_support
from thefuck.rules import cd_mkdir
from thefuck.utils import for_app, get_close_matches
__author__ = "mmussomele"
MAX_ALLOWED_DIFF = 0.6
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
@sudo_support
@for_app('cd')
def match(command):
"""Match function copied from cd_mkdir.py"""
return (
command.script.startswith('cd ') and any((
'no such file or directory' in command.output.lower(),
'cd: can\'t cd to' in command.output.lower(),
'does not exist' in command.output.lower()
)))
@sudo_support
def get_new_command(command):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script_parts[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
if dest[0] == '':
cwd = os.sep
dest = dest[1:]
elif six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command)
return u'cd "{0}"'.format(cwd)
| 29.725806
| 96
| 0.637005
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.