content stringlengths 5 1.05M |
|---|
file_name = input("Digite o nome do arquivo: ")
with open(file_name, 'r') as file:
k = int(file.readline().split('=')[1].replace("\n", ''))
sequence = file.readlines()[0].replace("\n", '')
kmers = []
for i in range(len(sequence)):
kmer = sequence[i:i+k]
if (len(kmer) < k):
break
kmers.append(kmer)
kmers.sort()
file = open("k"+str(k)+"mer.txt", "w")
for kmer in kmers:
file.write(kmer + ",")
file.close() |
import tensorflow as tf
from .utils import anchor_generator_builder, box_list_ops, shape_utils, \
box_list, post_processing, target_assigner, post_processing_builder, ops
from ..processor import model_config as config
from .utils import standard_fields as fields
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
# rpn_features_to_crop - FirstStageFeatureExtractor/resnet_v1_50/resnet_v1_50/block3/unit_6/bottleneck_v1/Relu
# rpn_box_predictor_features Conv/Relu6
def crop_and_resize_to_input(rpn_box_predictor_features,
preprocessed_inputs, box_encodings,
class_predictions_with_background, rpn_features_to_crop):
image_shape = tf.shape(preprocessed_inputs)
# rpn_box_encodings, rpn_objectness_predictions_with_background = _predict_rpn_proposals(rpn_box_predictor_features, box_encodings,
# class_predictions_with_background)
first_stage_anchor_generator = anchor_generator_builder.build("grid_anchor_generator")
num_anchors_per_location = (
first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
box_predictions = _first_stage_box_predictor_predict([rpn_box_predictor_features], [box_encodings],
[class_predictions_with_background],
num_anchors_per_location)
predictions_box_encodings = tf.concat(
box_predictions[BOX_ENCODINGS], axis=1)
print("squeeze predictions_box_encodings.shape:", predictions_box_encodings.shape)
rpn_box_encodings = tf.squeeze(predictions_box_encodings, axis=2)
print("rpn_box_encodings.shape:", rpn_box_encodings.shape)
rpn_objectness_predictions_with_background = tf.concat(
box_predictions[CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
first_stage_anchor_generator = anchor_generator_builder.build("grid_anchor_generator")
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors_boxlist = box_list_ops.concatenate(
first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
_anchors = anchors_boxlist
cropped_regions = _predict_second_stage_1(rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
_anchors.get(),
image_shape)
return cropped_regions
def _postprocess_rpn(
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes, first_stage_max_proposals):
"""Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
on the result.
Note that the behavior of this function is slightly modified during
training --- specifically, we stop the gradient from passing through the
proposal boxes and we only return a balanced sampled subset of proposals
with size `second_stage_batch_size`.
Args:
rpn_box_encodings_batch: A 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background_batch: A 3-D float tensor of
shape [batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of
images in the batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, max_num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
proposal_scores: A float tensor with shape
[batch_size, max_num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
"""
first_stage_nms_score_threshold = 0.0
first_stage_nms_iou_threshold = 0.7
first_stage_max_proposals = first_stage_max_proposals
rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
print("rpn_box_encodings_batch name:", rpn_box_encodings_batch.name)
print("rpn_box_encodings_batch: shape", rpn_box_encodings_batch.shape)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
print("_batch_decode_boxes 1")
proposal_boxes = _batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax_without_background = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)[:, :, 1]
clip_window = _compute_clip_window(image_shapes)
(proposal_boxes, proposal_scores, _, _, _,
num_proposals) = post_processing.batch_multiclass_non_max_suppression(
tf.expand_dims(proposal_boxes, axis=2),
tf.expand_dims(rpn_objectness_softmax_without_background,
axis=2),
first_stage_nms_score_threshold,
first_stage_nms_iou_threshold,
first_stage_max_proposals,
first_stage_max_proposals,
clip_window=clip_window)
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
return normalized_proposal_boxes, proposal_scores, num_proposals
def _compute_clip_window(image_shapes):
"""Computes clip window for non max suppression based on image shapes.
This function assumes that the clip window's left top corner is at (0, 0).
Args:
image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing
shapes of images in the batch. Each row represents [height, width,
channels] of an image.
Returns:
A 2-D float32 tensor of shape [batch_size, 4] containing the clip window
for each image in the form [ymin, xmin, ymax, xmax].
"""
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights),
clip_heights, clip_widths], axis=1))
return clip_window
def _batch_decode_boxes(box_encodings, anchor_boxes):
"""Decodes box encodings with respect to the anchor boxes.
Args:
box_encodings: a 4-D tensor with shape
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
representing box encodings.
anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]
representing decoded bounding boxes. If using a shared box across
classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
Returns:
decoded_boxes: a
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
float tensor representing bounding box predictions (for each image in
batch, proposal and class). If using a shared box across classes the
shape will instead be
[batch_size, num_anchors, 1, self._box_coder.code_size].
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
num_classes = combined_shape[2]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
print("tiled_anchor_boxes:", tiled_anchor_boxes.name)
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
_proposal_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'proposal')
_box_coder = _proposal_target_assigner.box_coder
decoded_boxes = _box_coder.decode(
tf.reshape(box_encodings, [-1, _box_coder.code_size]),
tiled_anchors_boxlist)
print("combined_shape[0]:", combined_shape[0])
print("combined_shape[1]:", combined_shape[1])
print("num_classes:", num_classes)
print("decoded_boxes.get():", decoded_boxes.get())
decoded_boxes_reahpe = tf.reshape(decoded_boxes.get(),
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
return decoded_boxes_reahpe
def _image_batch_shape_2d(image_batch_shape_1d):
"""Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.
Example:
If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D
image batch tensor would be [[300, 300, 3], [300, 300, 3]]
Args:
image_batch_shape_1d: 1-D tensor of the form [batch_size, height,
width, channels].
Returns:
image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is
of the form [height, width, channels].
"""
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _first_stage_box_predictor_predict(image_features, box_encodings, class_predictions_with_backgrounds,
num_predictions_per_locations):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
box_encodings_list = []
class_predictions_list = []
num_classes = 1
num_class_slots = num_classes + 1
_proposal_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'proposal')
_box_coder = _proposal_target_assigner.box_coder
_box_code_size = _box_coder.code_size
for (image_feature, box_encoding, class_predictions_with_background, num_predictions_per_location) in zip(
image_features, box_encodings, class_predictions_with_backgrounds, num_predictions_per_locations):
combined_feature_map_shape = (shape_utils.combined_static_and_dynamic_shape(image_feature))
print("_box_code_size:", _box_code_size)
print("num_predictions_per_location:", num_predictions_per_location)
print("combined_feature_map_shape[1]:", combined_feature_map_shape[1])
print("combined_feature_map_shape[2]:", combined_feature_map_shape[2])
print("box_encodings:", box_encoding.shape)
shapes = tf.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location,
1,
_box_code_size])
box_encoding_reshape = tf.reshape(box_encoding, shapes)
print("box_encoding_reshape:", box_encoding_reshape.shape)
box_encodings_list.append(box_encoding_reshape)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
tf.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
class_predictions_list.append(class_predictions_with_background)
return {
BOX_ENCODINGS: box_encodings_list,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list
}
def _predict_second_stage_1(rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors,
image_shape):
image_shape_2d = _image_batch_shape_2d(image_shape)
proposal_boxes_normalized, _, num_proposals = _postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, first_stage_max_proposals=100)
cropped_regions = (
_compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
return cropped_regions
def _compute_second_stage_input_feature_maps(features_to_crop,
proposal_boxes_normalized):
def get_box_inds(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
_initial_crop_size = 14
cropped_regions = tf.image.crop_and_resize(
features_to_crop,
_flatten_first_two_dimensions(proposal_boxes_normalized),
get_box_inds(proposal_boxes_normalized),
(_initial_crop_size, _initial_crop_size))
return cropped_regions
def _flatten_first_two_dimensions(inputs):
"""Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.
Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
[A * B, ..., depth].
Args:
inputs: A float tensor with shape [A, B, ..., depth]. Note that the first
two and last dimensions must be statically defined.
Returns:
A float tensor with shape [A * B, ..., depth] (where the first and last
dimension are statically defined.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
# rpn_features_to_crop - FirstStageFeatureExtractor/resnet_v1_50/resnet_v1_50/block3/unit_6/bottleneck_v1/Relu
# rpn_box_predictor_features Conv/Relu6
def second_stage_box_predictor(preprocessed_inputs, box_encoding_reshape, class_prediction_reshape,
rpn_features_to_crop,
rpn_box_encodings,
rpn_objectness_predictions_with_background,
true_image_shapes,
rpn_box_predictor_features):
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
first_stage_anchor_generator = anchor_generator_builder.build("grid_anchor_generator")
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors_boxlist = box_list_ops.concatenate(
first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
_anchors = anchors_boxlist
print("second_stage_box_predictor _postprocess_rpn")
image_shape_2d = _image_batch_shape_2d(image_shape)
num_anchors_per_location = (
first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
box_predictions = _first_stage_box_predictor_predict([rpn_box_predictor_features], [rpn_box_encodings],
[rpn_objectness_predictions_with_background],
num_anchors_per_location)
predictions_box_encodings = tf.concat(
box_predictions[BOX_ENCODINGS], axis=1)
print("squeeze predictions_box_encodings.shape:", predictions_box_encodings.shape)
rpn_box_encodings = tf.squeeze(predictions_box_encodings, axis=2)
print("rpn_box_encodings.shape:", rpn_box_encodings.shape)
rpn_objectness_predictions_with_background = tf.concat(
box_predictions[CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
proposal_boxes_normalized, _, num_proposals = _postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
_anchors.get(), image_shape_2d, first_stage_max_proposals=100)
prediction_dict = {
'rpn_box_predictor_features': rpn_box_predictor_features,
'rpn_features_to_crop': rpn_features_to_crop,
'image_shape': image_shape,
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
}
refined_box_encodings = tf.squeeze(
box_encoding_reshape,
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
class_prediction_reshape,
axis=1, name='all_class_predictions_with_background')
_parallel_iterations = 16
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, _parallel_iterations)
prediction_dict1 = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
}
prediction_dict.update(prediction_dict1)
result_output = second_postprocess(prediction_dict, true_image_shapes)
return result_output
# def _predict_second_stage(rpn_box_encodings,
# rpn_objectness_predictions_with_background,
# rpn_features_to_crop,
# anchors,
# image_shape,
# true_image_shapes):
#
#
# refined_box_encodings = tf.squeeze(
# box_predictions[box_predictor.BOX_ENCODINGS],
# axis=1, name='all_refined_box_encodings')
# class_predictions_with_background = tf.squeeze(
# box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
# axis=1, name='all_class_predictions_with_background')
#
# absolute_proposal_boxes = ops.normalized_to_image_coordinates(
# proposal_boxes_normalized, image_shape, self._parallel_iterations)
#
# prediction_dict = {
# 'refined_box_encodings': refined_box_encodings,
# 'class_predictions_with_background':
# class_predictions_with_background,
# 'num_proposals': num_proposals,
# 'proposal_boxes': absolute_proposal_boxes,
# 'box_classifier_features': box_classifier_features,
# 'proposal_boxes_normalized': proposal_boxes_normalized,
# }
#
# return prediction_dict
def second_postprocess(prediction_dict, true_image_shapes):
"""Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a different interpretation).
If number_of_stages=1, the returned results represent proposals from the
first stage RPN and are padded to have self.max_num_proposals for each
image; otherwise, the results can be interpreted as multiclass detections
from the full two-stage model and are padded to self._max_detections.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
and `anchors` fields. Otherwise we expect prediction_dict to
additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`,
`proposal_boxes` and, optionally, `mask_predictions` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detection, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
(this entry is only created if rpn_mode=False)
num_detections: [batch]
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
postprocessed_tensors = _postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=None)
return _add_output_tensor_nodes(postprocessed_tensors)
def _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
if masks is not None:
tf.add_to_collection(output_collection_name,
outputs[detection_fields.detection_masks])
return outputs
def _postprocess_box_classifier(
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
"""Converts predictions from the second stage box classifier to detections.
Args:
refined_box_encodings: a 3-D float tensor with shape
[total_num_padded_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings. If using a shared
box across classes the shape will instead be
[total_num_padded_proposals, 1, 4]
class_predictions_with_background: a 3-D tensor float with shape
[total_num_padded_proposals, num_classes + 1] containing class
predictions (logits) for each of the proposals. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: a 3-D float tensor with shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in absolute coordinates.
num_proposals: a 1-D int32 tensor of shape [batch] representing the number
of proposals predicted for each image in the batch.
image_shapes: a 2-D int32 tensor containing shapes of input image in the
batch.
mask_predictions: (optional) a 4-D float tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask prediction logits.
Returns:
A dictionary containing:
`detection_boxes`: [batch, max_detection, 4]
`detection_scores`: [batch, max_detections]
`detection_classes`: [batch, max_detections]
`num_detections`: [batch]
`detection_masks`:
(optional) [batch, max_detections, mask_height, mask_width]. Note
that a pixel-wise sigmoid score converter is applied to the detection
masks.
"""
_first_stage_max_proposals = 100
max_num_proposals = _first_stage_max_proposals
num_classes = 90
_proposal_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'proposal')
_box_coder = _proposal_target_assigner.box_coder
_second_stage_nms_fn, second_stage_score_conversion_fn = post_processing_builder.build(config.FASTER_RCNN)
refined_box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1,
max_num_proposals,
refined_box_encodings.shape[1],
_box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(
class_predictions_with_background,
[-1, max_num_proposals, num_classes + 1]
)
print("_batch_decode_boxes 2")
refined_decoded_boxes_batch = _batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch = (
second_stage_score_conversion_fn(
class_predictions_with_background_batch))
class_predictions_batch = tf.reshape(
tf.slice(class_predictions_with_background_batch,
[0, 0, 1], [-1, -1, -1]),
[-1, max_num_proposals, num_classes])
clip_window = _compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = mask_predictions.shape[2].value
mask_width = mask_predictions.shape[3].value
mask_predictions = tf.sigmoid(mask_predictions)
mask_predictions_batch = tf.reshape(
mask_predictions, [-1, max_num_proposals,
num_classes, mask_height, mask_width])
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,
num_detections) = _second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
masks=mask_predictions_batch)
detections = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections: tf.to_float(num_detections)
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
"""Tools for recording and reporting timeline of abstract events.
You can store any events provided that they can be stringified.
"""
__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
import collections
import datetime
import time
class _EventRecord(object):
"""Internal class. Attaches extra information to an event."""
def __init__(self, event, time_started=None, time_elapsed=None):
self._event = event
self._time_started = time_started or time.time()
self._time_elapsed = None
if time_elapsed:
self.time_elapsed = time_elapsed
@property
def event(self):
return self._event
@property
def time_started(self):
return self._time_started
def _TimeElapsedGet(self):
if self.has_finished:
time_elapsed = self._time_elapsed
else:
time_elapsed = time.time() - self._time_started
return datetime.timedelta(seconds=time_elapsed)
def _TimeElapsedSet(self, time_elapsed):
if isinstance(time_elapsed, datetime.timedelta):
self._time_elapsed = time_elapsed.seconds
else:
self._time_elapsed = time_elapsed
time_elapsed = property(_TimeElapsedGet, _TimeElapsedSet)
@property
def has_finished(self):
return self._time_elapsed is not None
def GetTimeStartedFormatted(self):
return time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(self._time_started))
def GetTimeElapsedRounded(self):
return datetime.timedelta(seconds=int(self.time_elapsed.seconds))
def Finish(self):
if not self.has_finished:
self._time_elapsed = time.time() - self._time_started
class _Transition(collections.namedtuple('_Transition', ('from_', 'to_'))):
"""Internal class. Represents transition point between events / states."""
def __str__(self):
return '%s => %s' % (self.from_, self.to_)
class EventHistory(collections.Sequence):
"""Records events and provides human readable events timeline."""
def __init__(self, records=None):
self._records = records or []
def __len__(self):
return len(self._records)
def __iter__(self):
return iter(self._records)
def __getitem__(self, index):
return self._records[index]
@property
def last(self):
if self._records:
return self._records[-1]
def AddEvent(self, event):
if self.last:
self.last.Finish()
evrec = _EventRecord(event)
self._records.append(evrec)
return evrec
def GetTotalTime(self):
if self._records:
total_time_elapsed = sum(evrec.time_elapsed.seconds
for evrec in self._records)
return datetime.timedelta(seconds=int(total_time_elapsed))
def GetTransitionEventHistory(self):
records = []
if self._records:
for num, next_evrec in enumerate(self._records[1:], start=1):
evrec = self._records[num - 1]
records.append(_EventRecord(
_Transition(evrec.event, next_evrec.event), evrec.time_started,
evrec.time_elapsed))
if not self.last.has_finished:
records.append(_EventRecord(
_Transition(self.last.event,
'NOW'), self.last.time_started, self.last.time_elapsed))
return EventHistory(records)
@staticmethod
def _GetReport(history, report_name):
report = [report_name]
for num, evrec in enumerate(history, start=1):
time_elapsed = str(evrec.GetTimeElapsedRounded())
if not evrec.has_finished:
time_elapsed.append(' (not finished)')
report.append('%d) %s: %s: %s' % (num, evrec.GetTimeStartedFormatted(),
evrec.event, time_elapsed))
report.append('Total Time: %s' % history.GetTotalTime())
return '\n'.join(report)
def GetEventReport(self):
return EventHistory._GetReport(self, 'Timeline of events:')
def GetTransitionEventReport(self):
return EventHistory._GetReport(self.GetTransitionEventHistory(),
'Timeline of transition events:')
|
"""
Add a "featured" field to objects so admins can better direct top content.
"""
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class(
'featured',
models.BooleanField(
_('featured'),
default=False,
),
)
def handle_modeladmin(self, modeladmin):
modeladmin.add_extension_options(_('Featured'), {
'fields': ('featured',),
'classes': ('collapse',),
})
|
import numpy as np
import matplotlib.pyplot as plt
import datetime
import glob2
import xarray as xr
import pandas as pd
#plt.close("all")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
dircInput1 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/02_semi_raw/09_ENVISAT_MIPAS_with_AGEparams/'
dircInput2 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/03_cleaned/09_ENVISAT_MIPAS_with_AGEparams_cleaned/'
species = 'OCS'
def open_data(year):
df = xr.open_mfdataset(dircInput1 + f'MIPAS_OCS_REN_HN2_ecmwf_3d_24_1.5_{str(year)[2:]}*.nc',
combine='by_coords').to_dataframe()
df = df.reset_index()
return df#df.reset_index(inplace=True)
# def flag(df):
# df.dropna(subset=[species], inplace=True)
# return df
def name(df):
df.rename(columns={
'alt': 'ALT',
},
inplace = True)
return df
def rescale(df):
df['AGE'] = df['AGE'] * 12
return df
def clean():
df = open_data(year)
# df = flag(df, )
df = name(df)
df.reset_index('time', inplace=True)
return df
###############################################################################
if __name__ == "__main__":
year = np.arange(2002,2012+1,1)
for year in year:
df = clean()
df.to_xarray().to_netcdf(dircInput2+f'MIPAS_OCS_REN_HN2_ecmwf_3d_24_1.5_{year}_cleaned.nc')
print (year, 'finished')
df = xr.open_dataset('C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/03_cleaned/09_ENVISAT_MIPAS_with_AGEparams_cleaned/'+'MIPAS_OCS_REN_HN2_ecmwf_3d_24_1.5_2004_cleaned.nc').to_dataframe()
view=df.describe() |
#
# Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import logging
import sys
from odahuflow.trainer.helpers.log import setup_logging
from odahuflow.trainer.helpers.mlflow_helper import parse_model_training_entity, train_models, save_models, \
get_or_create_experiment
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action='store_true', help="more extensive logging")
parser.add_argument("--mt-file", '--mt', type=str, required=True,
help="json/yaml file with a mode training resource")
parser.add_argument("--target", type=str, default='mlflow_output',
help="directory where result model will be saved")
args = parser.parse_args()
# Setup logging
setup_logging(args)
try:
# Parse ModelTraining entity
model_training = parse_model_training_entity(args.mt_file).model_training
# Force local artifact location to copy local artifacts to GPPI archive
experiment_id = get_or_create_experiment(model_training.spec.model.name,
artifact_location='/ml_experiment')
# Start MLflow training process
mlflow_run_id = train_models(model_training, experiment_id=experiment_id)
# Save MLflow models as odahuflow artifact
save_models(mlflow_run_id, model_training, args.target)
except Exception as e:
error_message = f'Exception occurs during model training. Message: {e}'
if args.verbose:
logging.exception(error_message)
else:
logging.error(error_message)
sys.exit(2)
if __name__ == '__main__':
main()
|
from setuptools import setup
setup(name='Dis_Probability',
version='0.2',
description='This is a fundamental package that helps compute both binomial and Gaussain probability',
packages=['Dist_Probability'],
author = "Oluwaseun Adeyo",
author_email = "johnadeyo@hotmail.com",
zip_safe=False)
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Exploration-related jobs."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import logging
from core.domain import caching_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_jobs_one_off
from core.domain import exp_services
from core.domain import rights_manager
from core.platform import models
from core.tests import test_utils
import feconf
(exp_models, classifier_models) = models.Registry.import_models(
[models.NAMES.exploration, models.NAMES.classifier])
class ExplorationMigrationJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExplorationMigrationJobTests, self).setUp()
# Setup user who will own the test explorations.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_mapreduce_tasks()
def test_migration_job_does_not_convert_up_to_date_exp(self):
"""Tests that the exploration migration job does not convert an
exploration that is already the latest states schema version.
"""
# Create a new, default exploration that should not be affected by the
# job.
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'EndExploration')
init_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
yaml_before_migration = exploration.to_yaml()
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
# Verify the exploration is exactly the same after migration.
updated_exp = exp_fetchers.get_exploration_by_id(self.VALID_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
after_converted_yaml = updated_exp.to_yaml()
self.assertEqual(after_converted_yaml, yaml_before_migration)
def test_migration_job_does_not_have_validation_fail_on_default_exp(self):
"""Tests that the exploration migration job does not have a validation
failure for a default exploration (of states schema version 0), due to
the exploration having a null interaction ID in its initial state.
"""
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title=self.EXP_TITLE)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
# Verify the new exploration has been migrated by the job.
updated_exp = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
# Ensure the states structure within the exploration was changed.
self.assertNotEqual(
updated_exp.to_dict()['states'], self.VERSION_0_STATES_DICT)
def test_migration_job_skips_deleted_explorations(self):
"""Tests that the exploration migration job skips deleted explorations
and does not attempt to migrate.
"""
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title=self.EXP_TITLE)
exp_services.save_new_exploration(self.albert_id, exploration)
# Note: This creates a summary based on the upgraded model (which is
# fine). A summary is needed to delete the exploration.
exp_services.regenerate_exploration_and_contributors_summaries(
self.NEW_EXP_ID)
# Delete the exploration before migration occurs.
exp_services.delete_exploration(self.albert_id, self.NEW_EXP_ID)
# Ensure the exploration is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
# This running without errors indicates the deleted exploration is
# being ignored, since otherwise exp_fetchers.get_exploration_by_id
# (used within the job) will raise an error.
self.process_and_flush_pending_mapreduce_tasks()
# Ensure the exploration is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
def test_exploration_migration_job_output(self):
"""Test that Exploration Migration job output is correct."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title=self.EXP_TITLE)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExplorationMigrationJobManager.get_output(job_id))
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
def test_migration_job_creates_appropriate_classifier_models(self):
"""Tests that the exploration migration job creates appropriate
classifier data models for explorations.
"""
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exp_model = exp_models.ExplorationModel(
id=self.NEW_EXP_ID, category='category', title=self.EXP_TITLE,
objective='Old objective', language_code='en', tags=[],
blurb='', author_notes='', states_schema_version=41,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states={
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'next_content_id_index': 0,
'param_changes': [],
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
}
}
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_3',
'normalizedStrSet': ['InputString']
}
},
'rule_type': 'Equals',
}],
'tagged_skill_misconception_id': None,
'training_data': [
'answer1', 'answer2', 'answer3'
],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_2',
'unicode_str': '',
},
},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'default_outcome',
'html': ''
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'next_content_id_index': 4,
'param_changes': [],
'recorded_voiceovers': {
'voiceovers_mapping': {
'ca_placeholder_2': {},
'content': {},
'default_outcome': {},
'feedback_1': {},
'rule_input_3': {},
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'ca_placeholder_2': {},
'content': {},
'default_outcome': {},
'feedback_1': {},
'rule_input_3': {},
}
},
},
}, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(
self.NEW_EXP_ID, self.albert_id)
commit_message = (
'New exploration created with title \'%s\'.' % self.EXP_TITLE)
exp_model.commit(self.albert_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(
self.NEW_EXP_ID)
exp_summary_model = exp_models.ExpSummaryModel(
id=self.NEW_EXP_ID, title=self.EXP_TITLE, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.update_timestamps()
exp_summary_model.put()
exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
initial_state_name = list(exploration.states.keys())[0]
# Store classifier model for the new exploration.
classifier_model_id = (
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', self.NEW_EXP_ID,
exploration.version, datetime.datetime.utcnow(), {},
initial_state_name, feconf.TRAINING_JOB_STATUS_COMPLETE, 1))
# Store training job model for the classifier model.
classifier_models.StateTrainingJobsMappingModel.create(
self.NEW_EXP_ID, exploration.version, initial_state_name,
{'TextClassifier': classifier_model_id})
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
with self.swap(feconf, 'MIN_TOTAL_TRAINING_EXAMPLES', 2):
with self.swap(feconf, 'MIN_ASSIGNED_LABELS', 1):
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExplorationMigrationJobManager.get_output(job_id))
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
new_exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
initial_state_name = list(new_exploration.states.keys())[0]
self.assertLess(exploration.version, new_exploration.version)
classifier_exp_mapping_model = (
classifier_models.StateTrainingJobsMappingModel.get_models(
self.NEW_EXP_ID, new_exploration.version,
[initial_state_name]))[0]
self.assertEqual(
classifier_exp_mapping_model.algorithm_ids_to_job_ids[
'TextClassifier'], classifier_model_id)
def test_migration_job_fails_with_invalid_exploration(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID)
exploration_model.language_code = 'invalid_language_code'
exploration_model.commit(
self.albert_id, 'Changed language_code.', [])
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION, None,
[self.VALID_EXP_ID])
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
with self.swap(logging, 'error', _mock_logging_function):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
observed_log_messages,
['Exploration %s failed non-strict validation: '
'Invalid language_code: invalid_language_code'
% (self.VALID_EXP_ID)])
class ExpSnapshotsMigrationJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExpSnapshotsMigrationJobTests, self).setUp()
# Setup user who will own the test explorations.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_mapreduce_tasks()
def test_migration_job_does_not_convert_up_to_date_exp(self):
"""Tests that the exploration migration job does not convert a
snapshot that is already the latest states schema version.
"""
# Create a new, default exploration that should not be affected by the
# job.
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
init_state = exploration.states[exploration.init_state_name]
self.set_interaction_for_state(init_state, 'EndExploration')
init_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExpSnapshotsMigrationJob.create_new()
exp_jobs_one_off.ExpSnapshotsMigrationJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExpSnapshotsMigrationJob.get_output(job_id))
expected_output = [
'[u\'SUCCESS - Snapshot is already at latest schema version\', 1]']
self.assertEqual(actual_output, expected_output)
def test_migration_job_succeeds_on_default_exploration(self):
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
# Bring the main exploration to schema version 42.
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION, None,
[self.VALID_EXP_ID])
migration_change_list = [
exp_domain.ExplorationChange({
'cmd': (
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION),
'from_version': '41',
'to_version': '42'
})
]
swap_states_schema_42 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 42)
swap_exp_schema_47 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 47)
with swap_states_schema_42, swap_exp_schema_47:
exp_services.update_exploration(
self.albert_id, self.VALID_EXP_ID, migration_change_list,
'Ran Exploration Migration job.')
job_id = exp_jobs_one_off.ExpSnapshotsMigrationJob.create_new()
exp_jobs_one_off.ExpSnapshotsMigrationJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExpSnapshotsMigrationJob.get_output(job_id))
expected_output = [
'[u\'SUCCESS - Model saved\', 1]',
'[u\'SUCCESS - Model upgraded\', 1]',
'[u\'SUCCESS - Snapshot is already at latest schema version\', 1]']
self.assertEqual(sorted(actual_output), sorted(expected_output))
def test_migration_job_skips_deleted_explorations(self):
"""Tests that the exploration migration job skips deleted explorations
and does not attempt to migrate.
"""
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title=self.EXP_TITLE)
exp_services.save_new_exploration(self.albert_id, exploration)
# Note: This creates a summary based on the upgraded model (which is
# fine). A summary is needed to delete the exploration.
exp_services.regenerate_exploration_and_contributors_summaries(
self.NEW_EXP_ID)
# Delete the exploration before migration occurs.
exp_services.delete_exploration(self.albert_id, self.NEW_EXP_ID)
# Ensure the exploration is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExpSnapshotsMigrationJob.create_new()
exp_jobs_one_off.ExpSnapshotsMigrationJob.enqueue(job_id)
# This running without errors indicates the deleted exploration is
# being ignored, since otherwise exp_fetchers.get_exploration_by_id
# (used within the job) will raise an error.
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExpSnapshotsMigrationJob.get_output(job_id))
expected_output_choices = [
'[u\'INFO - Exploration does not exist\', [u\'%s-1\', u\'%s-2\']]' %
(self.NEW_EXP_ID, self.NEW_EXP_ID),
'[u\'INFO - Exploration does not exist\', [u\'%s-2\', u\'%s-1\']]' %
(self.NEW_EXP_ID, self.NEW_EXP_ID)
]
self.assertEqual(len(actual_output), 1)
self.assertIn(actual_output[0], expected_output_choices)
# Ensure the exploration is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
def test_migration_job_detects_invalid_exploration(self):
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID)
exploration_model.language_code = 'invalid_language_code'
exploration_model.commit(
self.albert_id, 'Changed language_code.', [])
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION, None,
[self.VALID_EXP_ID])
job_id = exp_jobs_one_off.ExpSnapshotsMigrationJob.create_new()
exp_jobs_one_off.ExpSnapshotsMigrationJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExpSnapshotsMigrationJob.get_output(job_id))
expected_output_message = (
'[u\'INFO - Exploration %s-1 failed non-strict validation\', '
'[u\'Invalid language_code: invalid_language_code\']]'
% self.VALID_EXP_ID)
self.assertIn(expected_output_message, actual_output)
def test_migration_job_detects_exploration_that_is_not_up_to_date(self):
swap_states_schema_41 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41)
swap_exp_schema_46 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46)
with swap_states_schema_41, swap_exp_schema_46:
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
self.assertLess(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
swap_states_schema_42 = self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', 42)
swap_exp_schema_47 = self.swap(
exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 47)
with swap_states_schema_42, swap_exp_schema_47:
job_id = exp_jobs_one_off.ExpSnapshotsMigrationJob.create_new()
exp_jobs_one_off.ExpSnapshotsMigrationJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
exp_jobs_one_off.ExpSnapshotsMigrationJob.get_output(
job_id))
expected_output = [
'[u\'FAILURE - Exploration is not at latest schema version\', '
'[u\'%s\']]' % self.VALID_EXP_ID,
]
self.assertEqual(sorted(actual_output), sorted(expected_output))
|
import random
eat_reactions = ['''_{0}_, you try to eat _{1}_, but you can\'t do it.
So you leave, with the taste of failure hanging in your mouth.''',
'_{0}_, you try to gobble up _{1}_. They prove to be a tight fit, but you manage to eat them.',
'_{0}_, you advance toward _{1}_, but you turn back and run, because they want to eat you too.',
'_{0}_, you finish eating _{1}_, and have a long nap, the sign of a good meal.']
pet_reactions = ['_{0}_, you pet _{1}_, as they smile from your petting.',
'_{0}_, you try to pet _{1}_, but they hid somewhere.',
'_{0}_, you take your hand near _{1}_, but they bite you.',
'_{0}_, you try petting _{1}_ but then they hiss and run away.']
drink_reactions = ['_{0}_, you pierce {1} with a straw, as they cry out in pain.',
'_{0}_, you try to drink _{1}_, but you realize they aren\'t liquid.',
'_{0}_, you try to drink _{1}_, but they have a mirror. So now you\'re drinking yourself.']
hug_reactions = ['_{0}_, you try to hug _{1}_, but they run away because they don\'t understand your affection.',
'_{0}_, you hug _{1}_. and they smile, because they didn\'t know they needed it.',
'_{0}_, you hug _{1}_, and they hug you back, the sign of a good friendship.',
'_{0}_, you try to hug _{1}_, but they pull out a knife because they think you were gonna mug them.', ]
fart_reactions = ['*farting noises*', 'Toot',
'*Blerrrtttt*', '**no.**', '_ew_',
'https://tenor.com/view/dance-moves-dancing-singer-groovy-gif-17029825']
ask_answers_list = ['No', 'Yes', 'Perhaps' 'Can\'t say for sure', 'For sure!',
'Why not?', 'hell naw', 'no, just no.', 'Definitely 👌']
toss_outcomes = ['Heads', 'Tails', 'Nothing']
uptime_footers = ["I'm tired. I need coffee.", "Man, being a bot is a tiring job.", "I wanna take a nap.",
"Sometimes I wonder why I exist. Am I an insignificant creature in a vast endless space?"]
countlines_responses = ["I am made of _{0}_ lines of python code. Pretty cool, huh?",
r"My owner has written _{0}_ lines of code in my brain. I a-a-am ... _glitches out_",
"I have _{0}_ lines of python code as my insides. That's pretty cool to me, you know...",
"Oh no! How did _{0}_ lines of python code get inside me?\n_I'm scared..._",
"I am made of _{0}_ lines of python code. What can I say except 😎",
"Some poor soul wrote _{0}_ lines of python code to give me a life. I hope he's okay"]
spongebob_text_responses = ["You didn't enter any text, genius", "Enter some text, smartass",
"Look who didn't specify the text!"]
good_games = ["GTA San Andreas", "Undertale", "Cyberpunk", "[Insert a good game's title here]",
"[Insert AAA title here]"]
def ask_qn(msg):
if len(msg.split()) == 1:
return 'You didn\'t ask the question, smartass'
answer = random.choice(ask_answers_list)
return f'_{answer}_'
def coin_flip():
result = random.choice(toss_outcomes)
return f'You tossed a coin and got... **_{result}_**.'
def roll_dice():
result = random.randint(1, 6)
return f'You rolled a die and got a _**{result}**_'
def eat_func(author, user, bot):
if user.id == bot.user.id:
return '''For the record, I **DO NOT** appreciate being eaten.
Even though I am digital and you would probably get electrocuted.'''
elif not author == user:
return random.choice(eat_reactions).format(author.display_name, user.display_name)
else:
return 'You try to eat yourself, but fail miserably'
def pet_func(author, user, bot):
if user.id == bot.user.id:
return 'Well, what can I say? I do like people petting me :)'
elif not author == user:
return random.choice(pet_reactions).format(author.display_name, user.display_name)
else:
return 'You pet yourself. I feel you, mate'
def drink_func(author, user, bot):
if user.id == bot.user.id:
return 'You try to drink me, but you can\'t, because I\'m digital!'
elif not author == user:
return random.choice(drink_reactions).format(author.display_name, user.display_name)
else:
return 'You pierce yourself with a straw. Not surprisingly, it hurts.'
def fart_reaction():
return random.choice(fart_reactions)
def hug_func(author, user, bot):
if user.id == bot.user.id:
return 'Even though I\'m digital, I do appreciate hugs :)'
elif not author == user:
return random.choice(hug_reactions).format(author.display_name, user.display_name)
else:
return 'You try to hug yourself, I feel you. Mind if I give you a hug?'
|
def outer():
def inner():
print(outervar)
outervar = "Hello World!!"
inner()
def main():
outer()
if __name__ == '__main__':
main() |
import discord
from discord.app.commands import slash_command, Option
from discord.ext import commands, menus
from discord.member import VoiceState
from discord.utils import escape_mentions
from mysqldb import *
from external_cons import the_drive
from PIL import Image, ImageDraw, ImageFont
import os
import shutil
import asyncio
import glob
from itertools import cycle
from extra.menu import InventoryLoop
from typing import List, Dict, Tuple, Union, Optional
from extra.slothclasses.player import Player
from extra.useful_variables import level_badges, flag_badges, patreon_roles
from extra.gif_manager import GIF
from extra import utils
from extra.currency.useritems import UserItemsTable
from extra.currency.userserveractivity import UserServerActivityTable, UserVoiceSystem
from extra.currency.usercurrency import UserCurrencyTable
booster_role_id = int(os.getenv('BOOSTER_ROLE_ID', 123))
guild_ids = [int(os.getenv('SERVER_ID', 123))]
currency_cogs: List[commands.Cog] = [
UserItemsTable, UserServerActivityTable, UserCurrencyTable,
UserVoiceSystem
]
class SlothCurrency(*currency_cogs):
""" Sloth Currency commands. """
def __init__(self, client) -> None:
""" Class init method. """
self.client = client
@commands.Cog.listener()
async def on_ready(self) -> None:
""" Tells when the cog is ready to go. """
print("SlothCurrency cog is online!")
@commands.Cog.listener()
async def on_message(self, message) -> None:
""" Updates the user's message counter and gives them XP. """
if not message.guild:
return
if message.author.bot:
return
if not await self.check_user_server_activity_table_exists():
return
user_info = await self.get_user_activity_info(message.author.id)
if not user_info:
return await self.insert_user_server_activity(message.author.id, 1)
effects = await self.client.get_cog('SlothClass').get_user_effects(message.author)
if 'sabotaged' not in effects:
await self.update_user_server_messages(message.author.id, 1)
# In-game commands
@commands.command()
@commands.has_permissions(administrator=True)
async def react(self, ctx, mid: discord.Message = None, reaction=None):
""" (ADM) Makes the bot react onto a message.
:param mid: The message ID.
:param reaction: The reaction to add. """
await ctx.message.delete()
if not reaction:
return await ctx.send("**Inform a reaction!**", delete_after=3)
if not mid:
return await ctx.send("**Inform a message id!**", delete_after=3)
await mid.add_reaction(reaction)
@commands.command(aliases=['inv'])
@Player.poisoned()
async def inventory(self, ctx, member: discord.Member = None) -> None:
""" Shows the member's item inventory.
:param member: The member to show. """
await ctx.message.delete()
if not member:
member = discord.utils.get(ctx.guild.members, id=ctx.author.id)
user_items = await self.get_user_items(member.id)
if not user_items:
return await ctx.send(f"**You don't have items to show, {ctx.author.mention}!**")
the_menu = menus.MenuPages(source=InventoryLoop(user_items, member), clear_reactions_after=True)
await the_menu.start(ctx)
@commands.command()
@Player.poisoned()
async def equip(self, ctx, *, item_name: str = None) -> None:
""" Equips an item.
:param item_name: The item to equip. """
item_name = escape_mentions(item_name)
await ctx.message.delete()
if not item_name:
return await ctx.send("**Inform an item to equip!**", delete_after=3)
if user_item := await self.get_user_item(ctx.author.id, item_name.title()):
if await self.check_user_can_equip(ctx.author.id, item_name.title()):
await self.update_user_item_info(ctx.author.id, item_name, 'equipped')
return await ctx.send(f"**{ctx.author.mention} equipped __{item_name.title()}__!**", delete_after=3)
else:
return await ctx.send(f"**You already have a __{user_item[3]}__ item equipped!**", delete_after=3)
else:
return await ctx.send(f"**You don't have an item named __{item_name.title()}__!**", delete_after=3)
@commands.command()
@Player.poisoned()
async def unequip(self, ctx, *, item_name: str = None) -> None:
""" Unequips an item.
:param item_name: The item to unequip """
item_name = escape_mentions(item_name)
await ctx.message.delete()
if not item_name:
return await ctx.send("**Inform an item to unequip!**", delete_after=3)
user_items = await self.get_user_items(ctx.author.id)
for item in user_items:
if item[1] == item_name.title():
if await self.check_user_can_unequip(ctx.author.id, item_name.lower()):
await self.update_user_item_info(ctx.author.id, item_name.title(), 'unequipped')
return await ctx.send(f"**{ctx.author.mention} unequipped __{item_name.title()}__!**",
delete_after=3)
else:
return await ctx.send(f"**The item __{item_name}__ is already unequipped!**", delete_after=3)
else:
return await ctx.send(f"**You don't have an item named __{item_name.title()}__!**", delete_after=3)
@commands.command()
@commands.has_permissions(administrator=True)
async def add_member(self, ctx, member: discord.Member = None, *, item_name: str = None) -> None:
""" (ADM) Gives a member an item.
:param member: The member to give the item.
:param item_name: The name of the item. """
item_name = escape_mentions(item_name)
if not member:
return await ctx.send("**Inform a member!**", delete_after=3)
if not item_name:
return await ctx.send("**Inform an item to add!**", delete_after=3)
user_has_item = await self.get_user_specific_item(member.id, item_name.title())
if not user_has_item:
if (shop_item := await self.get_shop_item(item_name)):
await self.insert_user_item(member.id, item_name, 'unequipped', shop_item[5], str(shop_item[3]).replace('registered_items/', ''))
return await ctx.send(f"**{item_name.title()} given to {member.name}!**", delete_after=3)
else:
return await ctx.send(f"**This item doesn't exist, {ctx.author.mention}!**")
else:
return await ctx.send(f"**{member.name} already has that item!**", delete_after=3)
async def get_shop_item(self, item_name: str) -> List[Union[str, int]]:
""" Gets a specific item from the shop.
:param item_name: The name of the item to get. """
mycursor, db = await the_django_database()
await mycursor.execute("SELECT * FROM shop_shopitem WHERE item_name = %s", (item_name,))
item = await mycursor.fetchone()
await mycursor.close()
return item
async def get_shop_items(self) -> List[List[Union[str, int]]]:
""" Gets all items from the shop. """
mycursor, db = await the_django_database()
await mycursor.execute("SELECT * FROM shop_shopitem")
items = await mycursor.fetchall()
await mycursor.close()
return items
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_member_item(self, ctx, member: discord.Member = None, *, item_name: str = None) -> None:
""" (ADM) Removes an item from the member.
:param member: The member to remove the item.
:param item_name: The name of the item. """
item_name = escape_mentions(item_name)
if not member:
return await ctx.send("**Inform a member!**", delete_after=3)
if not item_name:
return await ctx.send("**Inform an item to remove!**", delete_after=3)
user_has_item = await self.get_user_specific_item(member.id, item_name)
if len(user_has_item) != 0:
await self.remove_user_item(member.id, item_name)
return await ctx.send(f"**{item_name.title()} taken from {member.name}!**", delete_after=3)
else:
return await ctx.send(f"**{member.name} doesn't have that item!**", delete_after=3)
async def check_user_has_item(self, user_id: int, item_name: str) -> bool:
""" Checks whether the user has an item.
:param user_id: The ID of the user to check.
:param item_name: The name of the item to check. """
user_item = await self.get_user_specific_item(user_id, item_name)
if user_item:
return True
else:
return False
async def send_hacked_image(self, answer: discord.PartialMessageable, author: discord.Member, member: discord.Member) -> None:
""" Makes and sends a hacked image.
:param answer: The answerable object.
:param author: The author of the action.
:param member: The member who was hacked. """
SlothClass = self.client.get_cog('SlothClass')
try:
# Gets original skill action and the attacker
skill_action = await SlothClass.get_skill_action_by_target_id_and_skill_type(member.id, 'hack')
skill_action = skill_action[0] if skill_action else '??'
hacker = self.client.get_user(skill_action)
# Makes the Hacked image and saves it
big = ImageFont.truetype("built titling sb.ttf", 80)
background = Image.open('sloth_custom_images/background/hacked.png').convert('RGBA')
draw = ImageDraw.Draw(background)
draw.text((350, 300), f"Hacked by {hacker}", font=big, fill=(0, 0, 0))
file_path = f'media/temporary/hacked_{member.id}.png'
background.save(file_path, 'png', quality=90)
except Exception as e:
print(e)
return await answer(f"**{author.mention}, something went wrong with it!**")
else:
await answer(file=discord.File(file_path))
# await asyncio.sleep(0.5)
return os.remove(file_path)
async def send_frogged_image(self, answer: discord.PartialMessageable, author: discord.Member, member: discord.Member, knocked_out: bool = False) -> None:
""" Makes and sends a frogged image.
:param answer: The answerable object.
:param author: The author of the action.
:param member: The member who was frogged.
:param knocked_out: Whether the user is knocked out"""
SlothClass = self.client.get_cog('SlothClass')
try:
# Gets original skill action and the attacker
skill_action = await SlothClass.get_skill_action_by_target_id_and_skill_type(member.id, 'frog')
skill_action = skill_action[0] if skill_action else '??'
metamorph = self.client.get_user(skill_action)
# Makes the Hacked image and saves it
big = ImageFont.truetype("built titling sb.ttf", 80)
background = None
if knocked_out:
background = Image.open('sloth_custom_images/background/frogged_ko.png').convert('RGBA')
else:
background = Image.open('sloth_custom_images/background/frogged.png').convert('RGBA')
draw = ImageDraw.Draw(background)
draw.text((170, 170), f"{metamorph}", font=big, fill=(39, 126, 205))
file_path = f'media/temporary/frogged_{member.id}.png'
background.save(file_path, 'png', quality=90)
except Exception as e:
print(e)
return await answer(f"**{author.mention}, something went wrong with it!**")
else:
await answer(file=discord.File(file_path))
# await asyncio.sleep(0.5)
return os.remove(file_path)
@commands.command(name="profile")
@commands.cooldown(1, 5, commands.BucketType.user)
@Player.poisoned()
async def _profile_command(self, ctx, member: discord.Member = None):
""" Shows the member's profile with their custom sloth.
:param member: The member to see the profile. (Optional) """
await self._profile(ctx, member)
@slash_command(name="profile", guild_ids=guild_ids)
@commands.cooldown(1, 5, commands.BucketType.user)
@Player.poisoned()
async def _profile_slash(self, ctx, member: Option(discord.Member, description="The member to show the info; [Default=Yours]", required=False)) -> None:
""" Shows the member's profile with their custom sloth. """
await ctx.defer()
await self._profile(ctx, member)
async def _profile(self, ctx, member: discord.Member = None):
""" Shows the member's profile with their custom sloth.
:param member: The member to see the profile. (Optional) """
answer = None
if isinstance(ctx, commands.Context):
answer = ctx.send
else:
answer = ctx.respond
author = ctx.author
if not member:
member = author
user_info = await self.get_user_currency(member.id)
sloth_profile = await self.client.get_cog('SlothClass').get_sloth_profile(member.id)
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Account", emoji="🦥", url="https://thelanguagesloth.com/profile/update"))
if not user_info or not sloth_profile:
if author.id == member.id:
return await answer(
embed=discord.Embed(description=f"**{member.mention}, you don't have an account yet. Click [here](https://thelanguagesloth.com/profile/update) to create one, or in the button below!**"),
view=view)
else:
return await answer(f"**{member} doesn't have an account yet!**", delete_after=3)
if sloth_profile[1].lower() == 'default':
if author.id == member.id:
return await answer(
embed=discord.Embed(description=f"**{member.mention}, you don't have a Sloth class yet. Click [here](https://thelanguagesloth.com/profile/slothclass) to choose one, or in the button below!**"),
view=view)
else:
return await answer(f"**{member} has a default Sloth class, I cannot show their profile!**")
SlothClass = self.client.get_cog('SlothClass')
effects = await SlothClass.get_user_effects(member=member)
# Checks whether user is frogged
if 'frogged' in effects:
ko = 'knocked_out' in effects
return await self.send_frogged_image(answer, author, member, ko)
# Checks whether user is hacked
if 'hacked' in effects:
await self.send_hacked_image(answer, author, member)
if author.id != member.id:
await SlothClass.check_virus(ctx=ctx, target=member)
return
small = ImageFont.truetype("built titling sb.ttf", 45)
background = Image.open(await self.get_user_specific_type_item(member.id, 'background'))
# Checks whether user is transmutated
sloth = None
if await SlothClass.has_effect(effects, 'transmutated'):
sloth = Image.open(f"./sloth_custom_images/sloth/transmutated_sloth.png")
else:
sloth = Image.open(f"./sloth_custom_images/sloth/{sloth_profile[1].title()}.png")
# Gets an item image for each equippable slot
body = Image.open(await self.get_user_specific_type_item(member.id, 'body'))
hand = Image.open(await self.get_user_specific_type_item(member.id, 'hand'))
foot = Image.open(await self.get_user_specific_type_item(member.id, 'foot'))
head = Image.open(await self.get_user_specific_type_item(member.id, 'head'))
hud = Image.open(await self.get_user_specific_type_item(member.id, 'hud'))
# Pastes all item images
pfp = await utils.get_user_pfp(member)
background.paste(sloth, (0, 0), sloth)
background.paste(body, (0, 0), body)
background.paste(head, (0, 0), head)
background.paste(foot, (0, 0), foot)
background.paste(hand, (0, 0), hand)
background.paste(hud, (0, 0), hud)
# Checks if user is a booster
booster_role = discord.utils.get(ctx.guild.roles, id=booster_role_id)
if booster_role in member.roles:
if flag_badge := flag_badges.get('discord_server_booster'):
file_path = f"./sloth_custom_images/badge/{flag_badge[0]}"
if os.path.isfile(file_path):
booster_badge = Image.open(file_path).resize((50, 50)).convert('RGBA')
background.paste(booster_badge, flag_badge[1], booster_badge)
# Pastes all flag badges that the user has
flags = await utils.get_member_public_flags(member)
for flag in flags:
if flag_badge := flag_badges.get(flag):
file_path = f"./sloth_custom_images/badge/{flag_badge[0]}"
if os.path.isfile(file_path):
flag_image = Image.open(file_path).resize((50, 50)).convert('RGBA')
background.paste(flag_image, flag_badge[1], flag_image)
# Checks whether user has level badges
user_level = await self.client.get_cog('SlothReputation').get_specific_user(member.id)
for key, value in reversed(list(level_badges.items())):
if user_level[0][2] >= key:
file_path = f"sloth_custom_images/badge/{value[0]}.png"
if os.path.isfile(file_path):
level_badge = Image.open(file_path)
background.paste(level_badge, value[1], level_badge)
break
# Tries to print the user's profile picture
try:
background.paste(pfp, (201, 2), pfp)
except Exception:
pass
draw = ImageDraw.Draw(background)
draw.text((310, 5), f"{str(member)[:10]}", (255, 255, 255), font=small)
draw.text((80, 525), f"{user_info[0][1]}", (255, 255, 255), font=small)
file_path = f'media/temporary/profile_{member.id}.png'
background.save(file_path, 'png', quality=90)
all_effects = {key: value for (key, value) in effects.items() if value.get('has_gif')}
async with ctx.typing():
if all_effects:
try:
gif_file_path = await self.make_gif_image(user_id=member.id, all_effects=all_effects)
await answer(file=discord.File(gif_file_path))
except Exception as e:
print(e)
pass
finally:
os.remove(file_path)
os.remove(gif_file_path)
else:
try:
await answer(file=discord.File(file_path))
except:
pass
finally:
os.remove(file_path)
async def make_gif_image(self, user_id: int, all_effects: Dict[str, Dict[str, Union[List[str], Tuple[int]]]]) -> str:
""" Makes a gif image out a profile image.
:param user_id: The ID of the user for whom to make the GIF.
:param all_effects: All effects that the user currently has. """
gif_file_path = f'media/temporary/profile_{user_id}.gif'
try:
profile = Image.open(f'media/temporary/profile_{user_id}.png').convert('RGBA')
gif = GIF(image=profile, frame_duration=40)
path = 'media/effects'
# Gets all frames of each effect and resize them properly, respectively.
for effect in all_effects:
full_path = f"{path}/{effect}"
# Checks whether the effect folder exists
if os.path.isdir(full_path):
# Gets all frame images from the folder
for i in range(len(glob.glob(f"{full_path}/*.png"))):
frame = Image.open(f"{full_path}/{effect}_{i+1}.png") # convert('RGBA') # remove this convert later
# Checs whether frame has to be resized
if all_effects[effect]['resize']:
frame = frame.resize(all_effects[effect]['resize']).convert('RGBA')
# Appends to its respective frame list
all_effects[effect]['frames'].append(frame)
# Loops through the frames based on the amount of frames of the longest effect.
longest_gif = max([len(frames['frames']) for frames in all_effects.values()])
for efx in all_effects.keys():
all_effects[efx]['frames'] = cycle(all_effects[efx]['frames'])
for i in range(longest_gif):
# Gets a frame of each effect in each iteration of the loop
base = gif.new_frame()
await asyncio.sleep(0)
for efx, value in all_effects.items():
# print(all_effects[efx]['cords'])
cords = all_effects[efx]['cords']
frame = next(all_effects[efx]['frames'])
# print(efx, frame)
base.paste(frame, cords, frame)
gif.add_frame(base)
if i >= 400:
break
else:
gif.export(gif_file_path)
except Exception as e:
print(e)
pass
finally:
return gif_file_path
@commands.command()
@commands.has_permissions(administrator=True)
async def add_money(self, ctx, member: discord.Member = None, money: int = None) -> None:
""" (ADM) Adds money to a member.
:param member: The member to add money to.
:param money: The amount of money to add. """
if not member:
return await ctx.send("**Inform a member!**", delete_after=3)
elif not money:
return await ctx.send("**Inform an amount of money!**", delete_after=3)
await self.update_user_money(member.id, money)
return await ctx.send(f"**{money} added to {member.name}'s bank account!**", delete_after=5)
# Google Drive commands
@commands.command()
@commands.has_permissions(administrator=True)
async def download_update(self, ctx=None, rall: str = 'no'):
""" (ADM) Downloads all shop images from the Google Drive. """
if ctx:
await ctx.message.delete()
drive = await the_drive()
if rall.lower() == 'yes':
try:
shutil.rmtree('./sloth_custom_images')
except Exception:
pass
all_folders = {"background": "1V8l391o3-vsF9H2Jv24lDmy8e2erlHyI",
"sloth": "16DB_lNrnrmvxu2E7RGu01rQGQk7z-zRy",
"body": "1jYvG3vhL32-A0qDYn6lEG6fk_GKYDXD7",
"hand": "1ggW3SDVzTSY5b8ybPimCsRWGSCaOBM8d",
"hud": "1-U6oOphdMNMPhPAjRJxJ2E6KIzIbewEh",
"badge": "1k8NRfwwLzIY5ALK5bUObAcrKr_eUlfjd",
"foot": "1Frfra1tQ49dKM6Dg4DIbrfYbtXadv9zj",
"head": "1Y9kSOayw4NDehbqfmvPXKZLrXnIjeblP",
"pet": "1BthM5C9Gs2OkCJLzYQNwg1gMQky5v-pn"
}
categories = ['background', 'sloth', 'body', 'hand', 'hud', 'badge', 'foot', 'head', 'pet']
for category in categories:
try:
os.makedirs(f'./sloth_custom_images/{category}')
print(f"{category} folder made!")
except FileExistsError:
pass
for folder, folder_id in all_folders.items():
files = drive.ListFile({'q': "'%s' in parents and trashed=false" % folder_id}).GetList()
download_path = f'./sloth_custom_images/{folder}'
for file in files:
isFile = os.path.isfile(f"{download_path}/{file['title']}")
# print(isFile)
if not isFile:
# print(f"\033[34mItem name:\033[m \033[33m{file['title']:<35}\033[m | \033[34mID: \033[m\033[33m{file['id']}\033[m")
try:
output_file = os.path.join(download_path, file['title'])
temp_file = drive.CreateFile({'id': file['id']})
temp_file.GetContentFile(output_file)
except:
pass
# print(f"File '{file['title']}' downloaded!")
if ctx:
return await ctx.send("**Download update is done!**", delete_after=5)
# Google Drive commands
@commands.command()
@commands.has_permissions(administrator=True)
async def text_download_update(self, ctx=None, rall: str = 'no'):
""" (ADM) Downloads all texts from the GoogleDrive and stores in the bot's folder. """
if rall.lower() == 'yes':
try:
shutil.rmtree('./texts')
except Exception as e:
pass
drive = await the_drive()
all_text_folders = {"languages": "1_gBiliWPrCj5cLpChQfg9QRnj8skQVHM"}
text_categories = ["languages"]
for t_category in text_categories:
try:
os.makedirs(f'./texts/{t_category}')
print(f"{t_category} folder made!")
except FileExistsError:
pass
for folder, folder_id in all_text_folders.items():
files = drive.ListFile({'q': "'%s' in parents and trashed=false" % folder_id}).GetList()
download_path = f'./texts/{folder}'
for file in files:
isFile = os.path.isfile(f"{download_path}/{file['title']}")
if not isFile:
try:
output_file = os.path.join(download_path, file['title'])
temp_file = drive.CreateFile({'id': file['id']})
temp_file.GetContentFile(output_file)
except:
pass
if ctx:
return await ctx.send("**Download update is done!**")
@commands.command()
@commands.has_permissions(administrator=True)
async def list_folder(self, ctx, image_suffix: str = None, item_name: str = None):
""" (ADM) Lists a shop image folder from Google Drive.
:param image_suffix: The image/folder category. """
await ctx.message.delete()
all_folders = {"background": "1V8l391o3-vsF9H2Jv24lDmy8e2erlHyI",
"sloth": "16DB_lNrnrmvxu2E7RGu01rQGQk7z-zRy",
"body": "1jYvG3vhL32-A0qDYn6lEG6fk_GKYDXD7",
"hand": "1ggW3SDVzTSY5b8ybPimCsRWGSCaOBM8d",
"hud": "1-U6oOphdMNMPhPAjRJxJ2E6KIzIbewEh",
"badge": "1k8NRfwwLzIY5ALK5bUObAcrKr_eUlfjd",
"foot": "1Frfra1tQ49dKM6Dg4DIbrfYbtXadv9zj",
"head": "1Y9kSOayw4NDehbqfmvPXKZLrXnIjeblP"}
drive = await the_drive()
if not image_suffix:
for folder, folder_id in all_folders.items():
files = drive.ListFile({'q': "'%s' in parents and trashed=false" % folder_id}).GetList()
print(f"\033[35mCategory:\033[m {folder}")
for file in files:
print(
f"\033[34mItem name:\033[m \033[33m{file['title']:<35}\033[m | \033[34mID: \033[m\033[33m{file['id']}\033[m")
else:
for key, item in all_folders.items():
if image_suffix == key:
embed = discord.Embed(title=f"Category: {image_suffix}", color=discord.Color.dark_green(),
timestamp=ctx.message.created_at)
files = drive.ListFile({'q': "'%s' in parents and trashed=false" % item}).GetList()
print(f"\033[35mCategory:\033[m {image_suffix}")
for file in files:
embed.add_field(name=f"Name: {file['title']}", value=f"ID: {file['id']}", inline=False)
print(
f"\033[34mItem name:\033[m \033[33m{file['title']:<35}\033[m | \033[34mID: \033[m\033[33m{file['id']}\033[m")
return await ctx.send(embed=embed)
else:
return await ctx.send("**Category not found!**", delete_after=3)
async def exchange(self, ctx, cmsg, message_times, ctime, time_times):
""" Exchange your status into leaves (łł).
:param ctx: The context of the command.
:param cmsg: The amount of leaves gotten from messages.
:param message_times: The amount of loops it took to get to the messages result.
:param ctime: The amount of leaves gotten from time.
:param time_times: The amount of loops it took to get to the time result. """
embed = discord.Embed(title="Exchange", color=ctx.author.color, timestamp=ctx.message.created_at)
embed.set_author(name=ctx.author, url=ctx.author.display_avatar)
if cmsg > 0:
embed.add_field(name="__**Messages:**__",
value=f"Exchanged `{message_times * 50}` messages for `{cmsg}`łł;", inline=False)
if ctime > 0:
embed.add_field(name="__**Time:**__",
value=f"Exchanged `{(time_times * 1800) / 60}` minutes for `{ctime}`łł;", inline=False)
return await ctx.send(embed=embed)
async def convert_messages(self, user_message: int) -> List[int]:
""" Converts the user's message counter to leaves.
:param user_message: The message counter the user has. """
messages_left = user_message
exchanged_money = times = 0
while True:
if messages_left >= 50:
times += 1
messages_left -= 50
exchanged_money += 2
await asyncio.sleep(0)
continue
else:
return exchanged_money, times
async def convert_time(self, user_time: int) -> List[int]:
""" Converts the user's time counter to leaves.
:param user_time: The amount of time in seconds the user has. """
time_left = user_time
exchanged_money = times = 0
while True:
if time_left >= 1800:
times += 1
time_left -= 1800
exchanged_money += 2
await asyncio.sleep(0)
continue
else:
return exchanged_money, times
@commands.command()
@commands.has_permissions(administrator=True)
async def add_message(self, ctx, member: discord.Member = None, add_message: int = None):
""" (ADM) Adds messages to the member's status.
:param member: The member to add the messages to.
:param add_message: The amount of messages to add. """
if not add_message:
return await ctx.send(f"**Inform an amount of messages to add!**", delete_after=3)
if not member:
member = ctx.author
await self.update_user_server_messages(member.id, add_message)
return await ctx.send(f"Added {add_message} messages to {member}")
@commands.command()
@commands.has_permissions(administrator=True)
async def add_time(self, ctx, member: discord.Member = None, add_time: int = None):
""" (ADM) Adds time to the member's status.
:param member: The member to add time to.
:param add_time: The amount of time to add. (in secs) """
if not add_time:
return await ctx.send(f"**Inform an amount of seconds to add!**", delete_after=3)
if not member:
member = ctx.author
await self.update_user_server_time(member.id, add_time)
return await ctx.send(f"Added {add_time} seconds to {member}")
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
@Player.poisoned()
async def transfer(self, ctx, member: discord.Member = None, money: int = None):
""" Transfers money from one member to another member.
:param member: The member to transfer the money to.
:param money: The amount of money to transfer.
* Cooldown: 10 secs. """
if not member:
return await ctx.send('**Inform the member!**', delete_after=3)
elif member.id == ctx.author.id:
return await ctx.send("**You can't transfer money to yourself!**", delete_after=3)
elif not money:
return await ctx.send('**Inform the amount of money to transfer!**', delete_after=3)
elif not int(money) > 0:
return await ctx.send('**Inform value bigger than 0!**', delete_after=3)
the_user = await self.get_user_currency(ctx.author.id)
target_user = await self.get_user_currency(member.id)
if not the_user:
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Account", emoji="🦥", url="https://thelanguagesloth.com/profile/update"))
return await ctx.send(
embed=discord.Embed(description=f"**{member.mention}, you don't have an account yet. Click [here](https://thelanguagesloth.com/profile/update) to create one, or in the button below!**"),
view=view)
elif not target_user:
return await ctx.send(f"**{member} does not have a bank account yet!**", delete_after=5)
if the_user[0][1] < int(money):
return await ctx.send(f"You don't have {money}łł!")
SlothClass = self.client.get_cog('SlothClass')
wired_user = await SlothClass.get_skill_action_by_target_id_and_skill_type(
target_id=ctx.author.id, skill_type='wire')
if wired_user:
siphon_percentage = 35
cybersloth_money = round((money*siphon_percentage)/100)
await self.update_user_money(member.id, money-cybersloth_money)
await self.update_user_money(ctx.author.id, -money)
await self.update_user_money(wired_user[0], cybersloth_money)
description: str = f"{ctx.author.mention} tried to transfer `{money}łł` to {member.mention}, "\
f"and <@{wired_user[0]}> siphoned off `{siphon_percentage}%` of it; `{cybersloth_money}łł`! "\
f"So {member.mention} actually got `{money-cybersloth_money}łł`!"
await ctx.send(
content=f"{ctx.author.mention}, {member.mention}, <@{wired_user[0]}>",
embed=discord.Embed(
title="__Intercepted Transfer__",
description=description,
color=ctx.author.color,
timestamp=ctx.message.created_at)
)
else:
await self.update_user_money(member.id, money)
await self.update_user_money(ctx.author.id, -money)
await ctx.send(f"**{ctx.author.mention} transferred {money}łł to {member.mention}!**")
@commands.command(aliases=["farming_status", "farmingstatus", "farm", "farmstatus", "farmstats", "farm_status", "farm_stats"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def farming(self, ctx, member: discord.Member = None) -> None:
""" Checks the farming status of a specific member.
:param member: The member of the checking. """
allowed_roles = [int(os.getenv('OWNER_ROLE_ID', 123)), int(os.getenv('ADMIN_ROLE_ID', 123)), int(os.getenv('MOD_ROLE_ID', 123)), *patreon_roles.keys(), int(os.getenv('SLOTH_LOVERS_ROLE_ID', 123))]
allowed: bool = await utils.is_allowed(allowed_roles).predicate(ctx)
if member and not allowed:
return await ctx.send("**You can't do that!**")
author: discord.Member = ctx.author
if not member:
member = author
embed = discord.Embed(
title="__Farm Checking__",
color=author.color,
timestamp=ctx.message.created_at
)
embed.set_footer(text=f"Requested by: {author}", icon_url=author.display_avatar)
user_activity = await self.get_user_activity_info(member.id)
user_activity = user_activity[0][3] if user_activity else None
member_voice: VoiceState = member.voice
vc: discord.VoiceChannel = member_voice.channel if member_voice else None
smute, mute = member_voice.mute if member_voice else False, member_voice.self_mute if member_voice else False
sdeaf, deaf = member_voice.deaf if member_voice else False, member_voice.self_deaf if member_voice else False
embed.description = """
🍃 - Farming
<:vc:914947524178116649> - Joined VC Timestamp
"""
alts = await self.client.get_cog('Moderation').get_fake_accounts(member.id)
alts_list: List[int] = []
for alt in alts:
alts_list.append(alt[0])
alts_list.append(alt[1])
alts = list(set(alts_list))
try:
alts.remove(member.id)
except ValueError:
pass
vc_members: List[int] = len([m for m in vc.members if m.id not in alts]) if vc else 0
is_farming = True if vc and not mute and not deaf and not smute and not sdeaf and vc_members > 1 else False
embed.add_field(
name="__Checking__:",
value= f"**Member:** {member.mention}\n" \
f"<:server_muted:914943052156665919> `{smute}` | <:muted:914943036931326054> `{mute}`\n" \
f"<:server_deafened:914943091599880203> `{sdeaf}` | <:deafened:914943073119772683> `{deaf}`\n" \
f"🍃 `{is_farming}` ({vc.mention if vc else '`No VC`'})\n" \
f"<:vc:914947524178116649> `{user_activity}` ({f'<t:{user_activity}:R>' if user_activity else '`None`'})" \
, inline=False)
embed.set_thumbnail(url=member.display_avatar)
await ctx.send(embed=embed)
@commands.command(aliases=['balance', 'bal', 'fric'])
@commands.cooldown(1, 3, commands.BucketType.user)
async def money(self, ctx, member: Optional[Union[discord.Member, discord.User]] = None) -> None:
""" Shows the user's money.
:param member: The member from whom to show the money. [Optional][Default = You] """
if not member:
member = ctx.author
user_currency = await self.get_user_currency(member.id)
if not user_currency:
return await ctx.send(f"**User doesn't have a Sloth Account, {ctx.author.mention}!**")
embed = discord.Embed(
description=f"**{member.mention}'s money: {user_currency[0][1]} 🍃**",
color=member.color
)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(SlothCurrency(client))
|
#!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
from ez_setup import use_setuptools
use_setuptools()
import codecs
import os
import sys
import cassango as cngo
from setuptools import setup, find_packages
readme_file = os.path.join(os.path.dirname(__file__), 'README.rst')
try:
long_description = open(readme_file).read()
except IOError, err:
sys.stderr.write("[ERROR] Cannot find file specified as ""``long_description`` (%s)\n" % readme_file)
sys.exit(1)
setup(
name = 'cassango',
version='.'.join(map(str, cngo.__version__)),
author = cngo.__author__,
author_email = cngo.__contact__,
url = 'http://github.com/jabadabadu/cassango',
description= 'Django cassandra Engine, the cassandra backend for Django',
long_description = long_description,
packages = find_packages('cassango'),
package_dir = {'':'cassango'},
package_data = {'':['*.py']},
include_package_data = True,
scripts = [],
requires = [],
license = 'BSD License',
install_requires = [
'Django == 1.3',
'pycassa == 1.9.0',
],
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords = 'django, cassandra, orm, nosql, database, python',
)
|
from traitlets import Tuple, Dict
class DefaultDict(Dict):
def __init__(self, traits, **kwargs):
"""Create a dict trait type from a Python dict.
Parameters
----------
traits : Dictionary of trait types [ optional ]
A Python dictionary containing the types that are valid for
restricting the content of the Dict Container for certain keys.
"""
default_value = {}
for key, value in traits.items():
default_value[key] = value.default_value
super(DefaultDict, self).__init__(traits=traits, default_value=default_value, **kwargs)
self.default_value = default_value
class DefaultTuple(Tuple):
def __init__(self, *traits, **kwargs):
"""Create a tuple from a list, set, or tuple.
Create a fixed-type tuple with Traits:
``t = Tuple(Int(), Str(), CStr())``
would be length 3, with Int,Str,CStr for each element.
Parameters
----------
`*traits` : TraitTypes
the types for restricting the contents of the DefaultTuple. Each positional
argumentcorresponds to an element of the tuple. DefaultTuples are of fixed
length.
"""
default_value = []
for value in traits:
default_value.append(value.default_value)
super(DefaultTuple, self).__init__(*traits, default_value=default_value, **kwargs)
self.default_value = default_value
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..models import AcceptAgreementRequest
from ..models import AcceptAgreementResponse
from ..models import ErrorEntity
from ..models import PagedRetrieveUserAcceptedAgreementResponse
from ..models import RetrieveAcceptedAgreementResponse
from ..operations.agreement import AcceptVersionedPolicy
from ..operations.agreement import BulkAcceptVersionedPolicy
from ..operations.agreement import ChangePreferenceConsent
from ..operations.agreement import ChangePreferenceConsent1
from ..operations.agreement import IndirectBulkAcceptVersionedPolicy1
from ..operations.agreement import IndirectBulkAcceptVersionedPolicyV2
from ..operations.agreement import RetrieveAcceptedAgreements
from ..operations.agreement import RetrieveAgreementsPublic
from ..operations.agreement import RetrieveAllUsersByPolicyVersion
@same_doc_as(AcceptVersionedPolicy)
def accept_versioned_policy(localized_policy_version_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = AcceptVersionedPolicy.create(
localized_policy_version_id=localized_policy_version_id,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AcceptVersionedPolicy)
async def accept_versioned_policy_async(localized_policy_version_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = AcceptVersionedPolicy.create(
localized_policy_version_id=localized_policy_version_id,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(BulkAcceptVersionedPolicy)
def bulk_accept_versioned_policy(body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = BulkAcceptVersionedPolicy.create(
body=body,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(BulkAcceptVersionedPolicy)
async def bulk_accept_versioned_policy_async(body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = BulkAcceptVersionedPolicy.create(
body=body,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ChangePreferenceConsent)
def change_preference_consent(user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ChangePreferenceConsent.create(
user_id=user_id,
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ChangePreferenceConsent)
async def change_preference_consent_async(user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ChangePreferenceConsent.create(
user_id=user_id,
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ChangePreferenceConsent1)
def change_preference_consent_1(body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = ChangePreferenceConsent1.create(
body=body,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ChangePreferenceConsent1)
async def change_preference_consent_1_async(body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = ChangePreferenceConsent1.create(
body=body,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(IndirectBulkAcceptVersionedPolicy1)
def indirect_bulk_accept_versioned_policy_1(user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = IndirectBulkAcceptVersionedPolicy1.create(
user_id=user_id,
body=body,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(IndirectBulkAcceptVersionedPolicy1)
async def indirect_bulk_accept_versioned_policy_1_async(user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = IndirectBulkAcceptVersionedPolicy1.create(
user_id=user_id,
body=body,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(IndirectBulkAcceptVersionedPolicyV2)
def indirect_bulk_accept_versioned_policy_v2(client_id: str, country_code: str, user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = IndirectBulkAcceptVersionedPolicyV2.create(
client_id=client_id,
country_code=country_code,
user_id=user_id,
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(IndirectBulkAcceptVersionedPolicyV2)
async def indirect_bulk_accept_versioned_policy_v2_async(client_id: str, country_code: str, user_id: str, body: Optional[List[AcceptAgreementRequest]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = IndirectBulkAcceptVersionedPolicyV2.create(
client_id=client_id,
country_code=country_code,
user_id=user_id,
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAcceptedAgreements)
def retrieve_accepted_agreements(user_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAcceptedAgreements.create(
user_id=user_id,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAcceptedAgreements)
async def retrieve_accepted_agreements_async(user_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAcceptedAgreements.create(
user_id=user_id,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAgreementsPublic)
def retrieve_agreements_public(x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAgreementsPublic.create()
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAgreementsPublic)
async def retrieve_agreements_public_async(x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAgreementsPublic.create()
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAllUsersByPolicyVersion)
def retrieve_all_users_by_policy_version(policy_version_id: str, keyword: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAllUsersByPolicyVersion.create(
policy_version_id=policy_version_id,
keyword=keyword,
limit=limit,
offset=offset,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(RetrieveAllUsersByPolicyVersion)
async def retrieve_all_users_by_policy_version_async(policy_version_id: str, keyword: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = RetrieveAllUsersByPolicyVersion.create(
policy_version_id=policy_version_id,
keyword=keyword,
limit=limit,
offset=offset,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
|
N = int(input())
L = list(map(int, input().split()))
max_len = max(L)
sum = 0
for i in L:
sum += i
sum -= max_len
print("Yes" if sum > max_len else "No")
|
from __future__ import division, print_function, absolute_import
from warnings import warn
import celerite
from . import numbers as nb
import numpy as np
from . import ranges
from scipy.optimize import minimize
class QuiescenceModel(celerite.GP):
"""
An extension of the `celerite` Gassian Process model object for use in modeling the quiescence variations of a
stellar lightcurve as correlated noise.
"""
def __init__(self, t, f, e, tau_min=100., tau_logprior=None, params=None, mask=None, white_limit=2.0):
"""
Create a QuiscenceModel object that models lightcurve fluctuations as correlated noise with a Gaussian Process
(GP) where correlations decay as exp(-t/tau) where tau is a constant. Specifically, elements of the covariance
matrix are parameterized as a*exp(-c*t), where ln(a), ln(c), and the mean data value, mu, are parameters of
the model.
An important deviation from a typical GP is that the QuiescenceModel enforces a penalty for "jagged" models
to promote smoothness. Specifically, the penalty is based on the power of the model at frequencies of 0.1 Hz.
Parameters
----------
t, f, e : arrays
Time, flux, and error of data points.
tau_min : float
Minimum correlation time to consider in GP fit. Default
is 100 s. For no limit, simply set to 0.
tau_logprior : function, optional
Prior on the correlation time. Should accept a single
value for tau as input an return the natural
logarithm of the likelihood of that value.
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
mask : bool array, optional
mask of data points where a value of True indicates a
point that should be excluded from fitting (e.g., because
it is in a flare or suspect region). If None, assume
all data should be included.
white_limit : float
Likelihood ratio below which a white noise model should
be used instead of a correlated noise model. Default is
2.0 to give preference for the simpler model even if a
GP gives a slightly better fit.
"""
# set up celerite GP model as a
terms = celerite.terms
kernel = terms.RealTerm(log_a=np.log(np.var(f)), log_c=-7.)
super(QuiescenceModel, self).__init__(kernel, mean=np.median(f), fit_mean=True)
# initialize parameters of GP if desired
if params is not None:
self.set_parameter_vector(params)
# set data mask
if mask is not None:
self.mask = mask
else:
self.mask = np.ones(len(t), bool)
# store other attributes
self.tau_min = tau_min
self.tau_logprior = tau_logprior
self.t, self.f, self.e = t, f, e
self.n = len(self.t)
self.fit_params = None
self.white_limit = white_limit
self.white = (self.get_parameter('kernel:log_c') == np.inf)
self.quick_compute()
def tau_loglike(self, tau):
"""
Log likeliood of tau values based on lower limit and any user-defined prio.
"""
if tau < self.tau_min:
return -np.inf
if self.tau_logprior is None:
return 0.0
else:
return self.tau_logprior(tau)
def smoothness_penalty(self, params):
"""
Cost function for power at 0.1 Hz frequencies. Exact form was set by trial and error.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
"""
self.set_parameter_vector(params)
hi_freq_power = self.kernel.get_psd(2*np.pi/10)
if hi_freq_power == 0:
return 0.0
if hi_freq_power == np.inf or np.isnan(hi_freq_power):
return -np.inf
else:
return -5*np.sum(self.mask)*np.sqrt(hi_freq_power)/np.std(self.f[self.mask])
def quick_compute(self):
"""
Quickly recompute GP intermediate values (i.e. following a change in the parameters).
"""
super(QuiescenceModel, self).compute(self.t[self.mask], self.e[self.mask])
def _get_set_mask(self, mask=None):
if mask is None:
return self.mask
else:
self.mask = mask
return mask
def log_likelihood(self, params):
"""
Log likelihood of the GP model based on the data likelihood and prior on tau. *Smoothness penalty is not applied
here* so this can be used for sampling the posterior of parameters without that associated bias.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
Returns
-------
Natural log of the likelihood of the data given the model and priors on parameters.
"""
self.set_parameter_vector(params)
if self.white:
return self.log_likelihood_white_noise(params[[0,2]])
else:
data_loglike = super(QuiescenceModel, self).log_likelihood(self.f[self.mask])
return data_loglike + self.tau_loglike(self.tau())
def cost(self, params):
"""
Log likelihood of the GP model including data likelihood, prior on tau, and smoothness penalty. This is
what is actually used for the fitting.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
Returns
-------
Natural log of the likelihood of the data given the model, priors on parameters, and smoothness penalty.
"""
return -(self.log_likelihood(params) + self.smoothness_penalty(params))
# return -self.log_likelihood(params)
def log_likelihood_white_noise(self, log_sig2_and_mu):
"""
Log likelihood of a constant mean + white noise model.
Parameters
----------
log_sig2_and_mu : list
Natural log of the "extra" variance and the constant mean.
Returns
-------
Natural log of the likelihood of the data given the model.
"""
self.set_parameter_vector([log_sig2_and_mu[0], np.inf, log_sig2_and_mu[1]])
return super(QuiescenceModel, self).log_likelihood(self.f[self.mask])
def log_likelihood_no_noise(self, mu):
"""
Log likelihood of a model with no noise and constant mean.
Parameters
----------
mu : float
value of constant mean
Returns
-------
Natural log of the likelihood of the data given the model.
"""
self.set_parameter_vector([-np.inf, np.inf, mu])
return super(QuiescenceModel, self).log_likelihood(self.f[self.mask])
def fit(self, mask=None, method='Nelder-Mead'):
"""
Perform a max-likelihood fit of the QuiescenceModel to the data.
Parameters
----------
mask : bool array
Mask identifying data to be used in the fit.
method : str
numerical method to use in minimizing the likelihood function
(of those allowable by scipy.optimize.minimize)
Returns
-------
None. Fit is performed in-place and parameters of the QuiescenceModel object set to the best-fit values.
"""
self._get_set_mask(mask)
self.quick_compute()
guess = self.get_parameter_vector()
if self.white:
guess[1] = -7
soln = minimize(self.cost, guess, method=method)
soln_white = minimize(lambda params: -self.log_likelihood_white_noise(params), guess[[0,2]], method=method)
if not (soln.success and soln_white.success):
raise ValueError('Gaussian process fit to quiescence did not converge. Perhaps try a different minimize '
'method or different initial parameters.')
if soln_white.fun + self.log_likelihood(soln.x) < np.log(self.white_limit):
self.fit_params = np.array([soln_white.x[0], np.inf, soln_white.x[1]])
self.white = True
else:
self.fit_params = soln.x
self.white = False
self.set_to_best_fit()
def fit_with_frozen_tau(self, mask=None, method='Nelder-Mead'):
"""
Perform a max-likelihood fit of the QuiescenceModel to the data holding the time constant, tau, constant.
Parameters
----------
mask : bool array
Mask identifying data to be used in the fit.
method : str
numerical method to use in minimizing the likelihood function
(of those allowable by scipy.optimize.minimize)
Returns
-------
None. Fit is performed in-place and parameters of the QuiescenceModel object set to the best-fit values.
"""
self._get_set_mask(mask)
self.quick_compute()
logc = self.get_parameter('kernel:log_c')
if self.white:
def cost(params):
return -self.log_likelihood_white_noise([params[0], params[1]])
else:
def cost(params):
return self.cost([params[0], logc, params[1]])
soln = minimize(cost, [np.log(np.var(self.f)), np.median(self.f)], method=method)
assert soln.success
self.fit_params = np.array([soln.x[0], logc, soln.x[1]])
self.set_to_best_fit()
def _get_params_boiler(self, params):
"""Boilerplate code for getting the current model parameters as an array."""
if params is None:
params = self.get_parameter_vector()
return np.reshape(params, [-1,3])
def tau(self, params=None):
"""
Value of the decay constant tau in the covariance model sigma**2 * exp(-t/tau). If the model is set
to white noise, this will be 0.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
If None, the parameters currently set in the QuiescenceModel
are used.
"""
params = self._get_params_boiler(params)
return np.squeeze(np.exp(-params[:,1]))
def sigma(self, params=None):
"""
Value of the standard deviation in the covariance model sigma**2 * exp(-t/tau). If the model is set
to white noise, then technically this is sigma in sigma**2 * delta(t) where delta is the Dirac delta function.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
If None, the parameters currently set in the QuiescenceModel
are used.
"""
params = self._get_params_boiler(params)
return np.squeeze(np.exp(params[:,0]/2))
def mu(self, params=None):
"""
Value of model mean value.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
If None, the parameters currently set in the QuiescenceModel
are used.
"""
params = self._get_params_boiler(params)
return np.squeeze(params[:,2])
def sigma_rel(self, params=None):
"""
Value of the standard deviation in the covariance model sigma**2 * exp(-t/tau), normalized by the
mean.
Parameters
----------
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
If None, the parameters currently set in the QuiescenceModel
are used.
"""
return np.squeeze(self.sigma(params)/self.mu(params))
def sigma_relative_at_tbin(self, tbin, params=None):
"""
Current value of the expected standard deviation of the model if values were binned to tbin.
Parameters
----------
tbin : float
Width of time bins over which model values are assumed to be averaged.
params : list
Parameters of the GP covariance model, a * exp(-c*t).
These are log(a), log(c), and the (constant) mean, mu.
If None, the parameters currently set in the QuiescenceModel
are used.
"""
params = self._get_params_boiler(params)
loga, logc, mu = params.T
sig, c = np.exp(loga/2), np.exp(logc)
with np.errstate(invalid='ignore', over='ignore'):
x = c*tbin
sig_dt = np.sqrt(2*(sig/x)**2 * (x + np.exp(-x) - 1))
uncorrelated = (x == np.inf)
if np.any(uncorrelated):
dt = np.median(np.diff(self.t))
sig_dt[uncorrelated] = sig[uncorrelated]/np.sqrt(tbin/dt)
return np.squeeze(sig_dt/mu)
def set_to_best_fit(self):
"""Return the model parameters to their best-fit values, if they have been computed.
Else you will get an error."""
self.set_parameter_vector(self.fit_params)
self.quick_compute()
def curve(self, t):
"""Compute the lightcurve of the quiescence model at t."""
if self.white:
return self.mu()*np.ones_like(t), self.sigma()**2*np.ones_like(t)
else:
return self.predict(self.f[self.mask], t, return_var=True)
def lightcurve_fill(t, f, e, qmodel, fill_ranges):
"""
Replace flare times with simulated data based on the qmodel with appropriately correlated noise.
Parameters
----------
t, f, e : arrays
Lightcurve points -- time, flux, energy.
qmodel : QuiescenceModel
Gassian Process model for quiescent variations in lightcurve.
fill_ranges : Nx2 array
Start and end time of each flare.
Returns
-------
f_filled, e_filled : arrays
Flux and error arrays where regions within flares have been filled with simulated data.
"""
f_filled, e_filled = list(map(np.copy, [f, e]))
if len(fill_ranges) > 0:
# pull random draws to fill where flares were until no false positives occur
flare = ranges.inranges(t, fill_ranges)
if not np.any(flare):
warn("Flare ranges were supplied, yet no points were within these ranges.")
return f, e
isort = np.argsort(f) # comes in handy in a sec
# estimate white noise error in flare range
mean_flare, _ = qmodel.curve(t[flare])
e_sim = np.interp(mean_flare, f[isort], e[isort])
# draw random fluxes and estimate what the uncertainty estimate would have been for those points
f_fill = nb.conditional_qmodel_draw(qmodel, t[~flare], f[~flare], t[flare])
f_fill += np.random.randn(np.sum(flare))*e_sim
e_fill = np.interp(f_fill, f[isort], e[isort])
e_filled[flare] = e_fill
f_filled[flare] = f_fill
return f_filled, e_filled
else:
return f, e |
from __future__ import absolute_import, print_function
import pytest
from moment_polytopes import *
# prior work
def test_borland_dennis(algorithm):
R = weyl_module(6, [1, 1, 1])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(((0, 0, 0, -1, 1, 1), 0))
def test_8_qubits(algorithm):
R = external_tensor_product([2] * 8)
C = ressayre_tester(R, algorithm=algorithm)
for i in range(8):
assert C.is_ressayre(([-1, 0] * i + [1, 0] + [-1, 0] * (8 - 1 - i), 2 - 8))
def test_bravyi(algorithm):
R = external_tensor_product([2, 2, 4])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(((-1, 0, 0, 0, 1, 1, 0, 0), 0))
assert C.is_ressayre(((0, 0, -1, 0, 1, 1, 0, 0), 0))
assert C.is_ressayre(((-1, 0, -1, 0, 1, 0, 0, -1), -1))
assert C.is_ressayre(((1, 0, -1, 0, 1, 0, -1, 0), 0))
assert C.is_ressayre(((1, 0, -1, 0, 0, 1, 0, -1), 0))
assert C.is_ressayre(((-1, 0, 1, 0, 1, 0, -1, 0), 0))
assert C.is_ressayre(((-1, 0, 1, 0, 0, 1, 0, -1), 0))
# examples in my thesis
def test_qubits(algorithm):
# 7 qubits
R = external_tensor_product([2] * 7)
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(([-1, 1] * 6 + [1, -1], 2 - 7))
# 3 qubits -- redundant inequality!
R = external_tensor_product([2, 2, 2])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(((0, 0, -1, 1, 0, 0), -1))
def test_mixed_state_of_two_qubits(algorithm):
R = external_tensor_product([2, 2, 4])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(((-1, 1, 1, -1, 2, 0, -2, 0), 0))
assert C.is_ressayre(((-1, 1, 1, -1, 0, 2, 0, -2), 0))
def test_three_qutrits(algorithm):
R = external_tensor_product([3, 3, 3])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(((0, -1, 1, -1, 0, 1, 1, 0, -1), -1))
@pytest.mark.parametrize("d", [6, 7, 8])
def test_three_fermions_with_total_spin_one_half(algorithm, d):
R = external_tensor_product([weyl_module(d, [2, 1]), 2])
C = ressayre_tester(R, algorithm=algorithm)
assert C.is_ressayre(([-2, 2] + [0] * (d - 2) + [-1, 1], -3))
|
"""
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
import re
import subprocess
from pathlib import Path
from tempfile import mkstemp
import click
SKIP_FILES = ['DistrictEnergySystem.mot']
TEMPLATE_FILES = Path('geojson_modelica_translator/model_connectors').glob('**/templates/*')
@click.command()
@click.argument('mofile', required=False)
def fmt_modelica_files(mofile):
if mofile is not None:
files = [mofile]
else:
files = TEMPLATE_FILES
for filepath in files:
if os.path.basename(filepath) in SKIP_FILES:
continue
try:
if filepath.suffix == ".mot":
preprocess_and_format(str(filepath))
elif filepath.suffix == ".mo":
apply_formatter(str(filepath))
except FormattingException as e:
click.echo(f'Error processing file {filepath}:\n {e}', err=True)
class FormattingException(Exception):
pass
def apply_formatter(filepath):
"""
Run modelicafmt on a file
:param filepath: str, path to file
"""
try:
subprocess.run(["modelicafmt", "-w", filepath], stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
raise FormattingException('Failed to run modelicafmt; ensure it can be found in $PATH')
except subprocess.CalledProcessError as e:
raise FormattingException(f'Failed to format filename: {e.stdout}')
class SubMap:
"""
Class for managing substitutions into modelica template files (ie Jinja templates)
"""
def __init__(self):
self._cur_id = 1
self._map = {}
def add_sub(self, text):
"""
Registers a substitution and returns the substitution name
:param text: str, text to substitute
:returns: str, substitution name/id
"""
sub_id = f'JINJA_SUB_{self._cur_id:03}'
self._map[sub_id] = text
self._cur_id += 1
return sub_id
def get_text(self, sub):
"""
Get original text for a substitution
:param sub: str, substitution name
:returns: str, text corresponding to that substitution name
"""
try:
return self._map[sub]
except KeyError:
raise FormattingException(f'Key "{sub}" was not found in the substitution map, this should never happen... '
f'Perhaps the substitution name was a false positive match?')
GENERIC_CONTROL_REGEX = re.compile('({%.*?%})')
def sub_generic(text, sub_map):
"""
Substitutes all Jinja control statements, those that look like {% ... %}
:param text: str, text to make substitutions in
:param sub_map: SubMap
:returns: str, text post substitutions
"""
matches = reversed([m.span() for m in GENERIC_CONTROL_REGEX.finditer(text)])
for span in matches:
sub_id = sub_map.add_sub(text[span[0]:span[1]])
text = f'{text[:span[0]]}/*{sub_id}*/{text[span[1]:]}'
return text
EXPRESSION_REGEX = re.compile('({{.*?}})')
def sub_expression(text, sub_map):
"""
Substitutes all Jinja expression statements, those that look like {{ ... }}
:param text: str, text to make substitutions in
:param sub_map: SubMap
:returns: str, text post substitutions
"""
matches = reversed([m.span() for m in EXPRESSION_REGEX.finditer(text)])
for span in matches:
sub_id = sub_map.add_sub(text[span[0]:span[1]])
text = f'{text[:span[0]]}{sub_id}{text[span[1]:]}'
return text
COMMENTED_SUB = re.compile(r'/\*(JINJA_SUB_\d\d\d)\*/')
NORMAL_SUB = re.compile(r'JINJA_SUB_\d\d\d')
def reverse_sub(text, sub_map):
"""
Reverses Jinja substitutions, ie replaces the JINJA_SUB_XXX texts with their
original texts
:param text: str, text to reverse substitutions
:param sub_map: SubMap, the submap used for making substitutions
:returns: str, text with substitutions reversed
"""
# remove the comments around commented substitutions
text = COMMENTED_SUB.sub(r'\1', text)
# replace all substitutions with their original values
def _replace(matchobj):
return sub_map.get_text(matchobj.group(0))
text = NORMAL_SUB.sub(_replace, text)
return text
def preprocess_and_format(filename, outfilename=None):
"""
Formats modelica files that include Jinja templating.
:param filename: str, template file to format
"""
try:
with open(filename) as f:
contents = f.read()
except FileNotFoundError:
raise FormattingException(f'File "{filename}" not found.')
tmp_fd, tmp_filepath = mkstemp()
try:
# General strategy:
# 1. replace all Jinja templating stuff with unique IDs, additionally
# commenting out any IDs that would result in invalid modelica
# syntax (those that are flow control, ie {% ... %}). After this
# step the file should be "valid" from the modelica lexer's perspective
# 2. apply modelica formatter to format the file
# 3. reverse the substitutions, replacing IDs with their original text
sub_map = SubMap()
previous_span = (0, 0)
raw_regex = re.compile(r'{% raw %}[\s\S]*?{% endraw %}')
raw_groups = [m.span() for m in raw_regex.finditer(contents)]
with open(tmp_fd, 'w') as f:
for span in raw_groups:
# format from previous end to new start
text = contents[previous_span[1]:span[0]]
text = sub_generic(text, sub_map)
text = sub_expression(text, sub_map)
f.write(text)
# format current span (should be raw)
text = contents[span[0]:span[1]]
text = sub_generic(text, sub_map)
f.write(text)
previous_span = span
# finish from end of last span to end of file
text = contents[previous_span[1]:]
text = sub_generic(text, sub_map)
text = sub_expression(text, sub_map)
f.write(text)
apply_formatter(tmp_filepath)
# substitute original values back in
with open(tmp_filepath, 'r') as f:
formatted_result = reverse_sub(f.read(), sub_map)
if outfilename is None:
outfilename = filename
with open(outfilename, 'w') as f:
f.write(formatted_result)
finally:
os.remove(tmp_filepath)
|
from django.utils.translation import ugettext_lazy
from openslides.config.api import config
from openslides.utils.personal_info import PersonalInfo
from .models import Motion
class MotionSubmitterPersonalInfo(PersonalInfo):
"""
Class for personal info block for motion submitters.
"""
headline = ugettext_lazy('I submitted the following motions')
default_weight = 20
def get_queryset(self):
return Motion.objects.filter(submitter__person=self.request.user)
class MotionSupporterPersonalInfo(PersonalInfo):
"""
Class for personal info block for motion supporters.
"""
headline = ugettext_lazy('I support the following motions')
default_weight = 30
def get_queryset(self):
if config['motion_min_supporters']:
return_value = Motion.objects.filter(supporter__person=self.request.user)
else:
return_value = None
return return_value
|
import os
import io
from contextlib import contextmanager
import boto3
import botocore
from lib.storage import Storage
from lib.package import Package
class S3Storage(Storage):
def __init__(self, bucket_loc=None):
s3 = boto3.resource('s3')
self._bucket_loc = bucket_loc or os.getenv('DEFAULT_BUCKET')
self.bucket = s3.Bucket(self._bucket_loc)
def __iter__(self):
prefix = '/src/contrib'
objs = self.bucket.objects.filter(Prefix=prefix)
for el in objs:
yield el.key
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, pkg_id):
target = io.StringIO()
self.bucket.download_fileobj(pkg_id, target)
return target
def __setitem__(self, pkg_id, fobj):
self.bucket.upload_fileobj(fobj, pkg_id)
def __delitem__(self, pkg_id):
pass
if __name__ == '__main__':
s = S3Storage()
for p in s:
print(p)
|
from util.Match import Tail,force
class unpack(object):
def __init__(self,v):
self.v = v
def __enter__(self):
return _f(self.v)
def __exit__(self,*args):
pass
@Tail
def f(lst,acc):
if lst == []:
return acc
return f(lst[1:],[lst[0],acc])
def _f(lst):
lst = list(reversed(lst))
return force(f(lst,[]))
lst = list(range(6)) # 6 or more
print( _f(lst) )
with unpack(lst) as (x,xs):
print( x )
print( xs )
|
def countdown(number):
while number >= 0:
print number
number -= 1
def count(num):
if num > 0:
while num >= 0:
print num
num -= 1
elif num < 0:
while num < 1:
print num
num += 1
else:
print num
#count(5)
#count(-3)
def countfrom(num1, num2):
if num1 > num2:
while num1 >= num2:
print num1
num1 -= 1
elif num1 < num2:
while num1 <= num2:
print num1
num1 += 1
else:
print "Uhhh.. Its done already."
#countfrom(-1, 1)
#countfrom(1, -1)
def sumofodds(num):
total = 0
if num > 0:
while num > 0:
if num % 2 == 1:
total += num
num = num-1
else:
num = num-1
while num == 0:
return total
elif num < 0:
while num < 0:
if num % 2 == 1:
total += num
num = num+1
else:
num = num+1
while num == 0:
return total
else:
return num
#print sumofodds(5)
#print sumofodds(-9)
def grid(w, h):
out = ""
count = 0
while h > 0:
while w > 0:
out +="."
w -= 1
out += "\n"
h -= 1
return out
print grid(10, 10)
|
"""
Predict flower name from an image with predict.py along with the probability of that name. That is, you'll pass in a single image /path/to/image and return the flower name and class probability.
Basic usage: python predict.py /path/to/image checkpoint
Options:
Return top KKK most likely classes: python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference: python predict.py input checkpoint --gpu
"""
import click
import torch
from train_pred_funcs import get_dataloaders, get_model, train_model, save_checkpoint, load_checkpoint, predict
@click.command()
@click.argument('filename')
@click.argument("checkpoint", default="checkpoint.pth")
@click.option("topk", "--topk", default=5)
@click.option("category_names", "--category_names", required=False)
@click.option("gpu", '--gpu', is_flag=True, default=True)
def do_predict(filename, checkpoint, topk, category_names, gpu):
device = torch.device('cuda' if (torch.cuda.is_available() and gpu) else 'cpu')
model = load_checkpoint(checkpoint_path=checkpoint)
prediction = predict(filename, model, topk=topk)
click.echo(prediction)
if __name__ == '__main__':
do_predict() |
# @Author: Narsi Reddy <narsi>
# @Date: 2019-12-18T20:15:32-06:00
# @Last modified by: cibitaw1
# @Last modified time: 2020-02-11T19:02:04-06:00
import os
from PIL import Image
import torch
torch.manual_seed(29)
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from torchvision import transforms
import pandas as pd
import numpy as np
from glob import glob
np.random.seed(29)
from tqdm import tqdm
from .data_utils import RandomResize
transform_v1=transforms.Compose([
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
RandomResize(0.25, 2.0, Image.BICUBIC),
transforms.RandomCrop((64, 64)),
transforms.ToTensor()
])
transform_o=transforms.Compose([
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
RandomResize(0.25, 2.0, Image.BICUBIC),
transforms.RandomCrop((64, 64)),
])
transform_x = transforms.ToTensor()
class dataset_1(Dataset):
def __init__(self, src_fldr, transform = transform_v1, two_out = False):
if type(src_fldr) is not list:
src_fldr = [src_fldr]
self.imgs = []
for sf in src_fldr:
self.imgs += glob(sf + os.sep + '*.png')
self.num_samples = len(self.imgs)
self.transform = transform
self.two_out = two_out
def __getitem__(self, i):
I = Image.open(self.imgs[i]).convert('RGB')
if self.transform:
I = self.transform(I)
return (I, I) if self.two_out else I
def __len__(self):
return self.num_samples
class dataset_2(Dataset):
def __init__(self, src_fldr, two_out = False, convert = "LAB"):
if type(src_fldr) is not list:
src_fldr = [src_fldr]
self.imgs = []
for sf in src_fldr:
self.imgs += glob(sf + os.sep + '*.png')
self.num_samples = len(self.imgs)
self.two_out = two_out
self.convert = convert
def __getitem__(self, i):
I = Image.open(self.imgs[i]).convert('RGB')
I = transform_o(I)
I = I.convert(self.convert)
I = transform_x(I)
return (I, I) if self.two_out else I
def __len__(self):
return self.num_samples
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from .models import Blog, Category, Comment, Subscriber
from django.urls import reverse
class BlogTests(TestCase):
@classmethod
def setUpTestData(cls):
# create a user
testuser1 = get_user_model().objects.create_user(
username='testuser1',
email='test@gmail.com',
password='123456789'
)
testuser1.save()
# create a blog
programming = Category.objects.create(name='Python')
test_post = Blog.objects.create(
user=testuser1,
title ='New Blog title',
subtitle ='simple title',
category = programming,
meta_description =' a web app blog testing haha',
content ='Content will goes here.',
)
test_post.save()
def test_blog_content(self):
post = Blog.objects.get(id=1)
title = f'{post.title}'
category = f'{post.category}'
subtitle = f'{post.subtitle}'
meta_description = f'{post.meta_description}'
content = f'{post.content}'
# self.assertEqual(user,'testuser1','testuser1')
self.assertEqual(title, 'New Blog title')
self.assertEqual(subtitle, 'simple title')
self.assertEqual(meta_description, ' a web app blog testing haha')
self.assertEqual(content, 'Content will goes here.')
class SubscriberTests(TestCase):
def setUp(self):
test_subscriber = Subscriber.objects.create(
name='ram',
email="hello@gmail.com"
)
def test_email_content(self):
subscriber = Subscriber.objects.get(id=1)
expected_object_name = f'{subscriber.email}'
self.assertEquals(expected_object_name, 'hello@gmail.com')
class CommentTests(TestCase):
def setUp(self):
test_comment = Comment.objects.create(
name = 'lol',
email = 'lol@gmail.com',
message = 'shai ho',
)
def test_comment(self):
comment = Comment.objects.get(id=1)
name = f'{comment.name}'
email = f'{comment.email}'
message = f'{comment.message}'
self.assertEqual(name,'lol'),
self.assertEqual(email,'lol@gmail.com')
self.assertEqual(message,'shai ho')
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Dwm-Api
GUID : 292a52c4-fa27-4461-b526-54a46430bd54
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=2, version=0)
class Microsoft_Windows_Dwm_Api_2_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=4, version=0)
class Microsoft_Windows_Dwm_Api_4_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=6, version=0)
class Microsoft_Windows_Dwm_Api_6_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=8, version=0)
class Microsoft_Windows_Dwm_Api_8_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=9, version=0)
class Microsoft_Windows_Dwm_Api_9_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=10, version=0)
class Microsoft_Windows_Dwm_Api_10_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=11, version=0)
class Microsoft_Windows_Dwm_Api_11_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=12, version=0)
class Microsoft_Windows_Dwm_Api_12_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=14, version=0)
class Microsoft_Windows_Dwm_Api_14_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=16, version=0)
class Microsoft_Windows_Dwm_Api_16_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=18, version=0)
class Microsoft_Windows_Dwm_Api_18_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=20, version=0)
class Microsoft_Windows_Dwm_Api_20_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=24, version=0)
class Microsoft_Windows_Dwm_Api_24_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=26, version=0)
class Microsoft_Windows_Dwm_Api_26_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=27, version=0)
class Microsoft_Windows_Dwm_Api_27_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=28, version=0)
class Microsoft_Windows_Dwm_Api_28_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=30, version=0)
class Microsoft_Windows_Dwm_Api_30_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=32, version=0)
class Microsoft_Windows_Dwm_Api_32_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=34, version=0)
class Microsoft_Windows_Dwm_Api_34_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=36, version=0)
class Microsoft_Windows_Dwm_Api_36_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=38, version=0)
class Microsoft_Windows_Dwm_Api_38_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=40, version=0)
class Microsoft_Windows_Dwm_Api_40_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=42, version=0)
class Microsoft_Windows_Dwm_Api_42_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=44, version=0)
class Microsoft_Windows_Dwm_Api_44_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=45, version=0)
class Microsoft_Windows_Dwm_Api_45_0(Etw):
pattern = Struct(
"Hwnd" / Int64ul,
"luidAdapter" / Int64ul,
"hmonAssociation" / Int64ul,
"dwFlags" / Int32ul,
"DxgiFormat" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=46, version=0)
class Microsoft_Windows_Dwm_Api_46_0(Etw):
pattern = Struct(
"Hwnd" / Int64ul,
"hr" / Int32ul,
"hDxSurface" / Int64ul,
"uiUpdateId" / Int64ul,
"DxgiFormat" / Int32ul,
"cTries" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=48, version=0)
class Microsoft_Windows_Dwm_Api_48_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Hwnd" / Int64ul,
"left" / Int32ul,
"top" / Int32ul,
"right" / Int32ul,
"bottom" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=50, version=0)
class Microsoft_Windows_Dwm_Api_50_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"fEnable" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=52, version=0)
class Microsoft_Windows_Dwm_Api_52_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=54, version=0)
class Microsoft_Windows_Dwm_Api_54_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=56, version=0)
class Microsoft_Windows_Dwm_Api_56_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=58, version=0)
class Microsoft_Windows_Dwm_Api_58_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=66, version=0)
class Microsoft_Windows_Dwm_Api_66_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=68, version=0)
class Microsoft_Windows_Dwm_Api_68_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=70, version=0)
class Microsoft_Windows_Dwm_Api_70_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=71, version=0)
class Microsoft_Windows_Dwm_Api_71_0(Etw):
pattern = Struct(
"hwndDst" / Int64ul,
"hwndSrc" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=72, version=0)
class Microsoft_Windows_Dwm_Api_72_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=73, version=0)
class Microsoft_Windows_Dwm_Api_73_0(Etw):
pattern = Struct(
"fActivate" / Int8ul,
"hwndExclude" / Int64ul,
"trigger" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=74, version=0)
class Microsoft_Windows_Dwm_Api_74_0(Etw):
pattern = Struct(
"fActivate" / Int8ul,
"hwndExclude" / Int64ul,
"trigger" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=75, version=0)
class Microsoft_Windows_Dwm_Api_75_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=76, version=0)
class Microsoft_Windows_Dwm_Api_76_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=77, version=0)
class Microsoft_Windows_Dwm_Api_77_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=78, version=0)
class Microsoft_Windows_Dwm_Api_78_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=80, version=0)
class Microsoft_Windows_Dwm_Api_80_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Hwnd" / Int64ul,
"left" / Int32ul,
"top" / Int32ul,
"right" / Int32ul,
"bottom" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=82, version=0)
class Microsoft_Windows_Dwm_Api_82_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Hwnd" / Int64ul,
"Flags" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=84, version=0)
class Microsoft_Windows_Dwm_Api_84_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=85, version=0)
class Microsoft_Windows_Dwm_Api_85_0(Etw):
pattern = Struct(
"Enum" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=86, version=0)
class Microsoft_Windows_Dwm_Api_86_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"Enum" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=88, version=0)
class Microsoft_Windows_Dwm_Api_88_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=90, version=0)
class Microsoft_Windows_Dwm_Api_90_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=92, version=0)
class Microsoft_Windows_Dwm_Api_92_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=94, version=0)
class Microsoft_Windows_Dwm_Api_94_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=96, version=0)
class Microsoft_Windows_Dwm_Api_96_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=98, version=0)
class Microsoft_Windows_Dwm_Api_98_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=99, version=0)
class Microsoft_Windows_Dwm_Api_99_0(Etw):
pattern = Struct(
"storyid" / Int32sl
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=100, version=0)
class Microsoft_Windows_Dwm_Api_100_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=101, version=0)
class Microsoft_Windows_Dwm_Api_101_0(Etw):
pattern = Struct(
"storyid" / Int32sl
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=102, version=0)
class Microsoft_Windows_Dwm_Api_102_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=103, version=0)
class Microsoft_Windows_Dwm_Api_103_0(Etw):
pattern = Struct(
"Hwnd" / Int64ul,
"target" / Int32sl
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=104, version=0)
class Microsoft_Windows_Dwm_Api_104_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=105, version=0)
class Microsoft_Windows_Dwm_Api_105_0(Etw):
pattern = Struct(
"Hwnd" / Int64ul,
"target" / Int32sl
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=106, version=0)
class Microsoft_Windows_Dwm_Api_106_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=107, version=0)
class Microsoft_Windows_Dwm_Api_107_0(Etw):
pattern = Struct(
"hwnd" / Int64ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=108, version=0)
class Microsoft_Windows_Dwm_Api_108_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=110, version=0)
class Microsoft_Windows_Dwm_Api_110_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
@declare(guid=guid("292a52c4-fa27-4461-b526-54a46430bd54"), event_id=112, version=0)
class Microsoft_Windows_Dwm_Api_112_0(Etw):
pattern = Struct(
"hr" / Int32ul
)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from CenterLoss import CenterLoss
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.prelu3_2 = nn.PReLU()
self.preluip1 = nn.PReLU()
self.ip1 = nn.Linear(128*3*3, 2)
self.ip2 = nn.Linear(2, 10, bias=False)
def forward(self, x):
x = self.prelu1_1(self.conv1_1(x))
x = self.prelu1_2(self.conv1_2(x))
x = F.max_pool2d(x,2)
x = self.prelu2_1(self.conv2_1(x))
x = self.prelu2_2(self.conv2_2(x))
x = F.max_pool2d(x,2)
x = self.prelu3_1(self.conv3_1(x))
x = self.prelu3_2(self.conv3_2(x))
x = F.max_pool2d(x,2)
x = x.view(-1, 128*3*3)
ip1 = self.preluip1(self.ip1(x))
ip2 = self.ip2(ip1)
return ip1, F.log_softmax(ip2, dim=1)
def visualize(feat, labels, epoch):
plt.ion()
c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.clf()
for i in range(10):
plt.plot(feat[labels == i, 0], feat[labels == i, 1], '.', c=c[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc = 'upper right')
plt.xlim(xmin=-8,xmax=8)
plt.ylim(ymin=-8,ymax=8)
plt.text(-7.8,7.3,"epoch=%d" % epoch)
plt.savefig('./images/epoch=%d.jpg' % epoch)
plt.draw()
plt.pause(0.001)
def train(epoch):
print "Training... Epoch = %d" % epoch
ip1_loader = []
idx_loader = []
for i,(data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
ip1, pred = model(data)
loss = nllloss(pred, target) + loss_weight * centerloss(target, ip1)
optimizer4nn.zero_grad()
optimzer4center.zero_grad()
loss.backward()
optimizer4nn.step()
optimzer4center.step()
ip1_loader.append(ip1)
idx_loader.append((target))
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
visualize(feat.data.cpu().numpy(),labels.data.cpu().numpy(),epoch)
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda" if use_cuda else "cpu")
# Dataset
trainset = datasets.MNIST('../MNIST', download=True,train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_loader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)
# Model
model = Net().to(device)
# NLLLoss
nllloss = nn.NLLLoss().to(device) #CrossEntropyLoss = log_softmax + NLLLoss
# CenterLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# optimzer4nn
optimizer4nn = optim.SGD(model.parameters(),lr=0.001,momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(optimizer4nn,20,gamma=0.8)
# optimzer4center
optimzer4center = optim.SGD(centerloss.parameters(), lr =0.5)
for epoch in range(100):
sheduler.step()
# print optimizer4nn.param_groups[0]['lr']
train(epoch+1)
|
# Generated by Django 3.1.5 on 2021-01-09 06:50
import cloudinary.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('imageDescription', models.CharField(max_length=450)),
('image_url', cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='image')),
('date_uploaded', models.DateTimeField(auto_now_add=True)),
],
),
]
|
"""
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
"""
import logging
from rich.logging import RichHandler # type: ignore
from kmermaid.scripts import arguments, kmer, kmer_batch, kmer_count, kmer_uniq
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[RichHandler(markup=True, rich_tracebacks=True)],
)
__all__ = ["arguments", "kmer", "kmer_batch", "kmer_count", "kmer_uniq"]
|
from django.contrib.admin.widgets import ForeignKeyRawIdWidget, ManyToManyRawIdWidget
from django.forms import widgets
__all__ = ["AdminIPInput", "AdminUUIDInput", "AdminSwitchInput", "AdminTextarea",
"AdminForeignKeyRawIdWidget", "AdminManyToManyRawIdWidget", "AdminCheckboxInput",
"AdminRadioSelect", "AdminCheckboxSelectMultiple", "FilteredSelectMultiple"]
class AdminIPInput(widgets.TextInput):
template_name = "django/forms/widgets/ip.html"
class AdminUUIDInput(widgets.TextInput):
template_name = "django/forms/widgets/uuid.html"
class AdminSwitchInput(widgets.CheckboxInput):
template_name = "django/forms/widgets/switch.html"
def __init__(self, attrs=None):
attrs = attrs or {}
attrs.setdefault("class", "custom-control-input")
super().__init__(attrs=attrs)
class AdminTextarea(widgets.Textarea):
def __init__(self, attrs=None):
default_attrs = {
"rows": "3",
}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class AdminForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
template_name = "django/forms/widgets/foreign_key_raw_id.html"
class AdminManyToManyRawIdWidget(ManyToManyRawIdWidget):
template_name = "django/forms/widgets/many_to_many_raw_id.html"
class AdminCheckboxInput(widgets.CheckboxInput):
template_name = "django/forms/widgets/checkbox_custom.html"
def __init__(self, attrs=None, check_test=None):
attrs = attrs or {}
attrs.setdefault("class", "custom-control-input")
super().__init__(attrs=attrs, check_test=check_test)
class AdminRadioSelect(widgets.RadioSelect):
template_name = "django/forms/widgets/radio_custom.html"
option_template_name = "django/forms/widgets/radio_option_custom.html"
def __init__(self, attrs=None, choices=()):
attrs = attrs or {}
attrs.setdefault("class", "custom-control-input")
super().__init__(attrs=attrs, choices=choices)
class AdminCheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
template_name = "django/forms/widgets/checkbox_select.html"
option_template_name = "django/forms/widgets/checkbox_custom.html"
def __init__(self, attrs=None, choices=()):
attrs = attrs or {}
attrs.setdefault("class", "custom-control-input")
super().__init__(attrs=attrs, choices=choices)
class FilteredSelectMultiple(widgets.SelectMultiple):
template_name = "django/forms/widgets/select_multiple.html"
|
import copy
import logging
from .batch_provider import BatchProvider
from gunpowder.batch_request import BatchRequest
from gunpowder.profiling import Timing
logger = logging.getLogger(__name__)
class BatchFilterError(Exception):
def __init__(self, batch_filter, msg):
self.batch_filter = batch_filter
self.msg = msg
def __str__(self):
return f"Error in {self.batch_filter.name()}: {self.msg}"
class BatchFilter(BatchProvider):
"""Convenience wrapper for :class:`BatchProviders<BatchProvider>` with
exactly one input provider.
By default, a node of this class will expose the same :class:`ProviderSpec`
as the upstream provider. You can modify the provider spec by calling
:func:`provides` and :func:`updates` in :func:`setup`.
Subclasses need to implement at least :func:`process` to modify a passed
batch (downstream). Optionally, the following methods can be implemented:
:func:`setup`
Initialize this filter. Called after setup of the DAG. All upstream
providers will be set up already.
:func:`teardown`
Destruct this filter, free resources, stop worker processes.
:func:`prepare`
Prepare for a batch request. Always called before each
:func:`process`. Used to communicate dependencies.
"""
@property
def remove_placeholders(self):
if not hasattr(self, '_remove_placeholders'):
return False
return self._remove_placeholders
def get_upstream_provider(self):
if len(self.get_upstream_providers()) != 1:
raise BatchFilterError(
self,
"BatchFilters need to have exactly one upstream provider, "
f"this one has {len(self.get_upstream_providers())}: "
f"({[b.name() for b in self.get_upstream_providers()]}")
return self.get_upstream_providers()[0]
def updates(self, key, spec):
"""Update an output provided by this :class:`BatchFilter`.
Implementations should call this in their :func:`setup` method, which
will be called when the pipeline is build.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The array or point set key this filter updates.
spec (:class:`ArraySpec` or :class:`GraphSpec`):
The updated spec of the array or point set.
"""
if key not in self.spec:
raise BatchFilterError(
self,
f"BatchFilter {self} is trying to change the spec for {key}, "
f"but {key} is not provided upstream. Upstream offers: "
f"{self.get_upstream_provider().spec}")
self.spec[key] = copy.deepcopy(spec)
self.updated_items.append(key)
logger.debug("%s updates %s with %s" % (self.name(), key, spec))
def enable_autoskip(self, skip=True):
"""Enable automatic skipping of this :class:`BatchFilter`, based on
given :func:`updates` and :func:`provides` calls. Has to be called in
:func:`setup`.
By default, :class:`BatchFilters<BatchFilter>` are not skipped
automatically, regardless of what they update or provide. If autskip is
enabled, :class:`BatchFilters<BatchFilter>` will only be run if the
request contains at least one key reported earlier with
:func:`updates` or :func:`provides`.
"""
self._autoskip_enabled = skip
def _init_spec(self):
# default for BatchFilters is to provide the same as upstream
if not hasattr(self, "_spec") or self._spec is None:
if len(self.get_upstream_providers()) != 0:
self._spec = copy.deepcopy(self.get_upstream_provider().spec)
else:
self._spec = None
def internal_teardown(self):
logger.debug("Resetting spec of %s", self.name())
self._spec = None
self._updated_items = []
self.teardown()
@property
def updated_items(self):
"""Get a list of the keys that are updated by this `BatchFilter`.
This list is only available after the pipeline has been build. Before
that, it is empty.
"""
if not hasattr(self, "_updated_items"):
self._updated_items = []
return self._updated_items
@property
def autoskip_enabled(self):
if not hasattr(self, "_autoskip_enabled"):
self._autoskip_enabled = False
return self._autoskip_enabled
def provide(self, request):
skip = self.__can_skip(request)
timing_prepare = Timing(self, "prepare")
timing_prepare.start()
downstream_request = request.copy()
if not skip:
dependencies = self.prepare(request)
if isinstance(dependencies, BatchRequest):
upstream_request = request.update_with(dependencies)
elif dependencies is None:
upstream_request = request.copy()
else:
raise BatchFilterError(
self,
f"This BatchFilter returned a {type(dependencies)}! "
"Supported return types are: `BatchRequest` containing your exact "
"dependencies or `None`, indicating a dependency on the full request.")
self.remove_provided(upstream_request)
else:
upstream_request = request.copy()
self.remove_provided(upstream_request)
timing_prepare.stop()
batch = self.get_upstream_provider().request_batch(upstream_request)
timing_process = Timing(self, "process")
timing_process.start()
if not skip:
if dependencies is not None:
dependencies.remove_placeholders()
node_batch = batch.crop(dependencies)
else:
node_batch = batch
downstream_request.remove_placeholders()
processed_batch = self.process(node_batch, downstream_request)
if processed_batch is None:
processed_batch = node_batch
batch = batch.merge(processed_batch, merge_profiling_stats=False).crop(
downstream_request
)
timing_process.stop()
batch.profiling_stats.add(timing_prepare)
batch.profiling_stats.add(timing_process)
return batch
def __can_skip(self, request):
"""Check if this filter needs to be run for the given request."""
if not self.autoskip_enabled:
return False
for key, spec in request.items():
if spec.placeholder:
continue
if key in self.provided_items:
return False
if key in self.updated_items:
return False
return True
def setup(self):
"""To be implemented in subclasses.
Called during initialization of the DAG. Callees can assume that all
upstream providers are set up already.
In setup, call :func:`provides` or :func:`updates` to announce the
arrays and points provided or changed by this node.
"""
pass
def prepare(self, request):
"""To be implemented in subclasses.
Prepare for a batch request. Should return a :class:`BatchRequest` of
needed dependencies. If None is returned, it will be assumed that all
of request is needed.
"""
return None
def process(self, batch, request):
"""To be implemented in subclasses.
Filter a batch, will be called after :func:`prepare`. Should return a
:class:`Batch` containing modified Arrays and Graphs. Keys in the returned
batch will replace the associated data in the original batch. If None is
returned it is assumed that the batch has been modified in place. ``request``
is the same as passed to :func:`prepare`, provided for convenience.
Args:
batch (:class:`Batch`):
The batch received from upstream to be modified by this node.
request (:class:`BatchRequest`):
The request this node received. The updated batch should meet
this request.
"""
raise BatchFilterError(
self,
"does not implement 'process'")
|
from myspiders.ruia import JsonField, AttrField, Item, Spider
from urllib.parse import urlencode, urlparse
from config import CONFIG, Target, Job
import random
import demjson
import re
import os
'''
AIResumeCount: 0
ActivityGID: null
ActivityTitle: ""
ApprovedBy: ""
ApprovedComment: "请明确数字的包含关系,重新提交审批"
ApprovedOn: "2020-04-02T16:42:51.593"
ApprovedOnfb: "0001-01-01T00:00:00"
BeginAge: 0
BranchCode: "108019"
BranchCodeName: ""
CollegeRequirement: ""
CollegeRequirementName: ""
Count: 0
DeleteFilterRules: null
DySynchronizationStatus: 0
EducationRequirement: ""
EducationRequirementGID: ""
EducationRequirementName: ""
EndAge: 0
ExpiredOn: "2020-12-31T00:00:00"
ExpiredOnStr: "2020-12-31"
FilledPositions: 0
FilterRules: null
FromType: "social"
GenderRequirement: ""
IsForever: null
IsHeadhunter: null
IsSocialView: false
JobCategory: ""
JobCategoryGID: ""
JobCategoryName: ""
JobCode: ""
JobDescription: ""
JobDisplay: "支行行长"
JobDisplayShowPaper: ""
JobGID: "01435e0d-60e6-4d9c-add9-1025fee76927"
JobName: "支行行长|行长室"
JobNameGID: ""
JobRequirement: "<p>1.年龄40岁(含)以下,全日制大学本科及以上学历;</p><p>2.5年及以上银行从业经验,3年及以上银行网点零售业务管理经验;</p><p>3.较强的业务开拓、风险把控、团队管理和综合协调能力;</p><p>4.具有良好的品质和职业操守,无不良、违纪和违法行为纪录;</p><p>5.特别优秀者,以上条件可适当放宽。</p>"
JobResponsibility: "<p>1.统筹支行零售经营管理目标,完成综合绩效考核指标;</p><p>2.负责支行零售各项营销工作,树立良好服务品牌,建立和维护与重要客户的关系,不断拓展市场份额;</p><p>3.负责支行内控合规、安全保卫、综合事务等工作。</p>"
JobType: ""
JobTypeName: ""
Location: "贵阳市"
LocationGID: ""
LocationName: ""
MajorRequirement: ""
MajorRequirementName: ""
OpenPositions: 0
OrgID: ""
OrgName: "贵阳分行"
Owner: ""
OwnerEmail: ""
OwnerPhone: ""
PositionsRate: 0
PresentRequirement: ""
PresentRequirementName: ""
PublishGID: "565c6374-5029-4bde-8fd2-61cfbcddc59f"
PublishIsDistributionChannels: false
PublishIsPersonnelExchangeCentre: false
PublishedOn: "2020-03-31T00:00:00"
PublishedOnStr: "2020-03-31"
RecruitJobTypeID: 1
RecruitJobTypeName: ""
RecruitmentTypeID: null
RecruitmentTypeName: ""
Remark: ""
RequireDept: ""
RequireDeptName: ""
RequireGroup: ""
RowCreated: "2020-03-30T18:15:59.45"
RowCreatedBy: ""
RowCreatedByName: ""
RowDeleted: null
RowDeletedBy: ""
RowIsDeleted: null
RowUpdated: "0001-01-01T00:00:00"
RowUpdatedBy: ""
SalaryRange: ""
SalaryRangeGID: ""
ShowTab: 0
Status: 3
StatusStr: "已审核"
Synchronization: ""
TalentRecruitmentStatus: 0
TolalPositions: 0
UpdateCallbackStatus: ""
WarnState: ""
WorkingYears: null
WorkingYearsName: ""
isAdd: false
isaudit: false
job: ""
job_id: ""
longtermsocial: 0
score: 0
'''
class CmbchinaItem(Item):
url_detail = 'http://career.cmbchina.com/index.html#jobDetail?id=%s&returnUrl=#jobList?id=1'
target_item = JsonField(json_select='Result')
bank_name = JsonField(default='招商银行')
type_main = JsonField(default='社会招聘')
name = JsonField(json_select='JobDisplay')
job_id = JsonField(json_select='JobGID')
branch_name = JsonField(json_select='OrgName')
department = JsonField(json_select='JobName')
subject = JsonField(json_select='MajorRequirementName')
education = JsonField(json_select='EducationRequirementName')
requirement = JsonField(json_select='JobRequirement')
content = JsonField(json_select='JobResponsibility')
place = JsonField(json_select='Location')
date_publish = JsonField(json_select='PublishedOnStr')
date_close = JsonField(json_select='ExpiredOnStr')
async def clean_date_publish(self, value):
return value + ' 00:00:00'
async def clean_date_close(self, value):
return value + ' 00:00:00'
def make_form_data(page_count: int):
formdata_list = []
for one in range(page_count):
formdata = {"branchCode": "", "pageIndex": one + 1, "pageSize": 6, "recruitJobTypeID": "1", "searchWords": "", "location": ""}
formdata_list.append(formdata)
formdata_extra = {"branchCode": "", "pageIndex": one + 1, "pageSize": 6, "recruitJobTypeID": "0", "searchWords": "", "location": ""}
formdata_list.append(formdata_extra)
return formdata_list
# 每次爬取前10页
class CmbchinaWorker(Spider):
name = 'CmbchinaWorker'
bank_name = '招商银行'
page_size = 6
start_urls = ['http://career.cmbchina.com/api/JobPublish/GetJobPublishList']
form_data = make_form_data(10)
form_data_type = 'json'
async def parse(self, response):
jsondata = await response.json()
async for item in CmbchinaItem.get_json(jsondata=jsondata):
data = item.results
job = Job.do_load(data)
await self.save_job(job)
def start():
# CmbchinaWorker.start()
pass
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test the Nuclear method in cclib"""
import sys
import os
import re
import logging
import unittest
import numpy as np
from cclib.method import Nuclear
from cclib.parser import ccData
from cclib.parser import DALTON
from cclib.parser import Gaussian
from cclib.parser import QChem
from cclib.parser import utils
sys.path.insert(1, "..")
from ..test_data import getdatafile
class NuclearTest(unittest.TestCase):
def test_stoichiometry(self):
"""Testing stoichoimetry generation."""
data = ccData()
def check(atomnos, formula, charge=0):
data.natom = len(atomnos)
data.atomnos = np.array(atomnos)
data.atomcoords = np.zeros((data.natom, 3))
data.charge = charge
self.assertEqual(Nuclear(data).stoichiometry(), formula)
# Basics and permutations.
check([], "")
check([6, 1, 6, 1, 1, 1], "C2H4")
check([1, 1, 1, 6, 1, 6], "C2H4")
# Charges.
check([8], "O", charge=0)
check([8], "O(+1)", charge=1)
check([8], "O(-1)", charge=-1)
check([8], "O(+2)", charge=2)
check([8], "O(+9)", charge=9)
# Element counts.
check([6, 1], "CH")
check([6] * 60, "C60")
# Test the Hill system.
check([8, 1, 1], "H2O")
check([6, 8, 8, 1, 1], "CH2O2")
check([16, 16, 8, 8], "O2S2")
def test_repulsion_energy(self):
"""Testing nuclear repulsion energy for one logfile where it is printed."""
data, logfile = getdatafile(QChem, "basicQChem4.2", ["water_mp4sdq.out"])
nuclear = Nuclear(data)
nuclear.logger.setLevel(logging.ERROR)
with open(logfile.filename) as f:
output = f.read()
line = re.search('Nuclear Repulsion Energy = .* hartrees', output).group()
nre = float(line.split()[4])
nre = utils.convertor(nre, 'Angstrom', 'bohr')
self.assertAlmostEqual(nuclear.repulsion_energy(), nre, places=7)
@unittest.skipIf(sys.version_info < (3, 0), "The periodictable package doesn't work in Python2.")
def test_principal_moments_of_inertia(self):
"""Testing principal moments of inertia and the principal axes for one
logfile where it is printed.
"""
data, logfile = getdatafile(DALTON, "basicDALTON-2015", ["dvb_sp_hf.out"])
nuclear = Nuclear(data)
nuclear.logger.setLevel(logging.ERROR)
ref_pmoi = []
ref_axes = []
with open(logfile.filename) as f:
for line in f:
if line.strip() == "Principal moments of inertia (u*A**2) and principal axes":
next(f)
next(f)
for _ in range(3):
tokens = [float(x) for x in next(f).split()[1:]]
ref_pmoi.append(tokens[0])
ref_axes.append(tokens[1:])
pmoi, axes = nuclear.principal_moments_of_inertia("amu_angstrom_2")
np.testing.assert_allclose(pmoi, ref_pmoi, rtol=0, atol=1.0e-4)
# The phases of the eigenvectors may be different, but they
# are still orthonormal within each set.
np.testing.assert_allclose(np.abs(axes), np.abs(ref_axes), rtol=0, atol=1.0e-4)
@unittest.skipIf(sys.version_info < (3, 0), "The periodictable package doesn't work in Python2.")
def test_rotational_constants(self):
"""Testing rotational constants for two logfiles where they are
printed.
"""
data, logfile = getdatafile(DALTON, "basicDALTON-2015", ["dvb_sp_hf.out"])
nuclear = Nuclear(data)
nuclear.logger.setLevel(logging.ERROR)
with open(logfile.filename) as f:
for line in f:
if line.strip() == "Rotational constants":
while line.split() != ['A', 'B', 'C']:
line = next(f)
line = next(f)
ref_mhz = [float(x) for x in next(f).split()[:-1]]
ref_invcm = [float(x) for x in next(f).split()[:-1]]
rotconsts_ghz = nuclear.rotational_constants('ghz')
rotconsts_invcm = nuclear.rotational_constants('invcm')
np.testing.assert_allclose(rotconsts_ghz * 1.0e3, ref_mhz, rtol=0, atol=1.0e-4)
np.testing.assert_allclose(rotconsts_invcm, ref_invcm, rtol=0, atol=1.0e-4)
data, logfile = getdatafile(Gaussian, "basicGaussian16", ["dvb_sp.out"])
nuclear = Nuclear(data)
nuclear.logger.setLevel(logging.ERROR)
with open(logfile.filename) as f:
for line in f:
if "Rotational constants (GHZ):" in line:
ref_ghz = [float(x) for x in line.split()[3:]]
rotconsts_ghz = nuclear.rotational_constants('ghz')
np.testing.assert_allclose(rotconsts_ghz, ref_ghz, rtol=0, atol=1.0e-5)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(unittest.makeSuite(NuclearTest))
|
import logging
import os
import psutil
from qtoggleserver.conf import settings
from . import battery
from . import conf
from . import date
from . import dns
from . import fwupdate
from . import net
from . import storage
from . import temperature
logger = logging.getLogger(__name__)
def reboot() -> None:
logger.debug('rebooting')
if not settings.debug:
os.system('reboot')
def uptime() -> int:
with open('/proc/uptime', 'r') as f:
line = f.readlines()[0]
return int(float(line.split()[0]))
def get_cpu_usage() -> int:
return int(psutil.cpu_percent())
def get_mem_usage() -> int:
vm = psutil.virtual_memory()
return int(100 * (1 - vm.available / vm.total))
async def init() -> None:
logger.debug('initializing DNS')
await dns.init()
async def cleanup() -> None:
logger.debug('cleaning up DNS')
await dns.cleanup()
|
import numpy as np
class DigitUtils:
def countPrimeDigits(heftyNum,digits)->"[counts per digit]":
if type(digits) is not list:
digits = [digits]
cntDict = dict.fromkeys(digits,0)
for i in np.nditer(heftyNum.baseCoef):
if i in digits:
cntDict[int(i)] += 1
return cntDict
|
from pylabnet.utils.decorators.gui_decorators import handle_gui_errors
class GUIHandler():
"""Generic GUI handler class providing an error-tolerant GUI client - script Interface.
Member variables of this class keep track of connection parameters
(is_running, is_connected, etc.), and global error handling is performed
in the @handle_gui_errors decorator, which is applied to all calls
from the GUIHandler to a GUI client.
:gui_client: (object)
GUI client to be called.
:logger_client: (object)
Logger client used for error logging in @handle_gui_errors
decorator.
"""
def __init__(self, gui_client=None, logger_client=None):
self.is_running = False # Flag which lets us know if WlmMonitor is running
self.is_paused = False # Flag which tells us we have simply paused WlmMonitor operation
self.gui_connected = False # Flag which alerts if a GUI client has been connected successfully
self.gui_reconnect = False # Flag which tells us to try reconnecting to the GUI client
self.gui_client = gui_client
self.logger_client = logger_client
def assign_gui(self, gui_client):
"""Assigns a GUI client to the GUI handler
:param client:
(obj) instance of GUI client
"""
self.gui = gui_client
def pause(self):
"""Pauses the wavemeter monitor"""
self.is_running = False
self.is_paused = True
def resume(self):
"""Resumes the wavemeter monitor when paused"""
self.is_paused = False
def reconnect_gui(self):
""" Reconnects to the GUI
Should be called if the GUI connection has been lost, once a new GUI client with the same access parameters has
been reinstantiated
"""
self.gui_reconnect = True
# Functions called on gui client with corresponding error_handling decorator
@handle_gui_errors
def assign_plot(self, plot_widget, plot_label, legend_widget):
return self.gui_client.assign_plot(
plot_widget=plot_widget,
plot_label=plot_label,
legend_widget=legend_widget
)
@handle_gui_errors
def clear_plot(self, plot_widget):
return self.gui_client.clear_plot(
plot_widget=plot_widget
)
@handle_gui_errors
def assign_curve(self, plot_label, curve_label, error=False):
return self.gui_client.assign_curve(
plot_label=plot_label,
curve_label=curve_label
)
@handle_gui_errors
def remove_curve(self, plot_label, curve_label):
return self.gui_client.remove_curve(
plot_label=plot_label,
curve_label=curve_label
)
@handle_gui_errors
def assign_scalar(self, scalar_widget, scalar_label):
self.gui_client.assign_scalar(
scalar_widget=scalar_widget,
scalar_label=scalar_label
)
@handle_gui_errors
def assign_label(self, label_widget, label_label):
return self.gui_client.assign_label(
label_widget=label_widget,
label_label=label_label
)
@handle_gui_errors
def assign_event_button(self, event_widget, event_label):
return self.gui_client.assign_event_button(
event_widget=event_widget,
event_label=event_label,
)
@handle_gui_errors
def assign_event_button_event(self, event_label, function):
return self.gui_client.assign_event_button_event(
event_label=event_label,
function=function,
)
@handle_gui_errors
def assign_container(self, container_widget, container_label):
return self.gui_client.assign_container(
container_widget, container_label
)
@handle_gui_errors
def set_curve_data(self, data, plot_label, curve_label, error=None):
return self.gui_client.set_curve_data(
data=data,
plot_label=plot_label,
curve_label=curve_label,
error=error
)
@handle_gui_errors
def set_scalar(self, value, scalar_label):
return self.gui_client.set_scalar(
value=value,
scalar_label=scalar_label
)
@handle_gui_errors
def get_scalar(self, scalar_label):
return self.gui_client.get_scalar(scalar_label)
@handle_gui_errors
def activate_scalar(self, scalar_label):
return self.gui_client.activate_scalar(scalar_label)
@handle_gui_errors
def deactivate_scalar(self, scalar_label):
return self.gui_client.deactivate_scalar(scalar_label)
@handle_gui_errors
def set_label(self, text, label_label):
return self.gui_client.set_label(
text=text,
label_label=label_label
)
@handle_gui_errors
def get_text(self, label_label):
return self.gui_client.get_text(
label_label
)
@handle_gui_errors
def was_button_pressed(self, event_label):
return self.gui_client.was_button_pressed(event_label)
@handle_gui_errors
def was_button_released(self, event_label):
return self.gui_client.was_button_released(event_label)
@handle_gui_errors
def is_pressed(self, event_label):
return self.gui_client.is_pressed(event_label)
@handle_gui_errors
def reset_button(self, event_label):
return self.gui_client.reset_button(event_label)
@handle_gui_errors
def change_button_background_color(self, event_label, color):
return self.gui_client.change_button_background_color(event_label, color)
@handle_gui_errors
def get_item_index(self, container_label):
return self.gui_client.get_item_index(container_label)
@handle_gui_errors
def set_item_index(self, container_label, index):
return self.gui_client.set_item_index(container_label, index)
@handle_gui_errors
def set_button_text(self, event_label, text):
return self.gui_client.set_button_text(event_label, text)
@handle_gui_errors
def save_gui(self, config_filename, folder_root=None, logger=None, scalars=[], labels=[]):
return self.gui_client.save_gui(config_filename, folder_root, logger, scalars, labels)
@handle_gui_errors
def load_gui(self, config_filename, folder_root=None, logger=None):
return self.gui_client.load_gui(config_filename, folder_root, logger)
|
import pygame
pygame.mixer.init()
pygame.mixer.music.load('morena.mp3')
pygame.mixer.music.play()
input("Escuta o som") |
from ..Base import Base
from . import u_logger as log
import random
import math
from scipy.special import erf, erfinv
# noinspection SpellCheckingInspection
class Gacha(Base):
def __init__(self, *args):
super().__init__(*args)
@staticmethod
async def random_album_popularity():
"""Returns a truncated normal random popularity between 0 and 1 that follows the PDF
f(y) = exp( -a * (y - c)^2 ) where a is the curvature and c is the bell center.
This is a truncated normal distribution.
The random variable transformation g(x) : x -> y needs to be used where x is a uniform distribution
and y is the f(x) distribution. g(x) = Fy^-1( F(x) ) where Fx = x and Fy = erf( sqrt(a)*(y - c) )
which are the corresponding CDFs of x and y. Solving we find that
g(x) = erfinv(x) / sqrt(a) + c."""
center_popularity = 0.8
curvature = 40
lower_bound = -1
upper_bound = erf(math.sqrt(curvature) * (1 - center_popularity))
x = random.uniform(lower_bound, upper_bound)
y = erfinv(x) / math.sqrt(curvature) + center_popularity
return y
async def random_skill_score(self, card_rarity):
"""Return a random skill score for rap/dance/vocal for the gacha card between 1 and 99
dependent on the rarity of the card."""
if card_rarity == "common":
random.randint(1, 20)
elif card_rarity == "uncommon":
random.randint(21, 40)
elif card_rarity == "rare":
random.randint(41, 60)
elif card_rarity == "epic":
random.randint(61, 80)
elif card_rarity == "legendary":
random.randint(81, 99)
else:
raise self.ex.exceptions.ShouldNotBeHere(f"random_skill_score received the card rarity: {card_rarity} "
f"which is not a valid card_rarity.")
@staticmethod
async def get_all_skill_scores(idol_skill_type, card_rarity):
"""Returns the rap, dance, and vocal scores of an idol"""
skill_types = {"rap": 0, "dance": 1, "vocal": 2}
all_skills = [0, 0, 0]
all_skills[skill_types.get(idol_skill_type)] = await Gacha.random_skill_score(card_rarity)
return all_skills
# self.ex.u_gacha = Gacha()
|
def lexNext(s, n):
for i in range(n - 1, -1, -1):
if s[i] != 'z':
k = ord(s[i])
s[i] = chr(k + 1)
return ''.join(s)
s[i] = 'a'
S = "abcdeg"
n = len(S)
S = list(S)
res = lexNext(S, n)
print(s)
|
from serif.theory.serif_syn_node_theory import SerifSynNodeTheory
from serif.theory.token import Token
from serif.xmlio import _SimpleAttribute, _ReferenceAttribute, _ChildTheoryElementList
class SynNode(SerifSynNodeTheory):
tag = _SimpleAttribute(is_required=True)
start_token = _ReferenceAttribute('start_token', cls=Token,
is_required=True)
end_token = _ReferenceAttribute('end_token', cls=Token,
is_required=True)
is_head = _SimpleAttribute(bool, default=False)
_children = _ChildTheoryElementList('SynNode')
|
# Simple python program to display current date and time.
# Author: Adrian Sypos
# Date: 21/09/2017
# Imports
import time;
# Creating variable localtime that returns a time-tuple with all nine items valid
localtime = time.localtime(time.time())
# Printing localtime unformatted
print ("Local current time :", localtime)
# Formatting the localtime into readable format
localtime = time.asctime( time.localtime(time.time()))
# Printing formatter time
print ("Local current time :", localtime) |
import inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester, setup_quickstart, teardown_quickstart
app = None
def setup():
template = MokshaConsumerTemplate
templates = ['moksha.consumer']
template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
global app
app = setup_quickstart(template=template, templates=templates, args=args,
template_vars=template_vars)
def teardown():
teardown_quickstart()
class TestConsumerQuickstart(QuickstartTester):
def setUp(self):
self.app = app
def get_consumer(self):
return self.get_entry('moksha.consumer')
def test_entry_point(self):
assert self.get_consumer(), \
"Cannot find mokshatest on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_consumer()
print consumer
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_consumer()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_consumer()
assert hasattr(consumer, 'consume')
|
#!/usr/bin/env python
import os
import sys
import getpass
try:
import Environment
from Executor import *
except ImportError, e:
print "Couldn't find project-utils modules."
sys.exit(1)
network_config = [
{ 'NAME' : 'QSFP_TOP_10G_PORT1', 'DFE': '172.16.50.1', 'TAP': '172.16.50.10', 'NETMASK' : '255.255.255.0' },
{ 'NAME' : 'QSFP_BOT_10G_PORT1', 'DFE': '172.16.60.1', 'TAP': '172.16.60.10', 'NETMASK' : '255.255.255.0' }
]
class MaxCompilerSim(Executor):
def __init__(self, dfeModel):
super(MaxCompilerSim, self).__init__(logPrefix="[MaxCompilerSim] ")
self.MAXCOMPILERDIR = Environment.require("MAXCOMPILERDIR")
self.dfeModel = dfeModel
self.ORIG_MAXELEROSDIR = Environment.optional("MAXELEROSDIR")
self.ORIG_LD_PRELOAD = Environment.optional("LD_PRELOAD")
self.ORIG_SLIC_CONF = Environment.optional("SLIC_CONF")
self.envSet = False
def getSimName(self):
return getpass.getuser() + 'Sim'
def getSimDeviceName(self):
return '%s0:%s' % (self.getSimName(), self.getSimName())
def getSimNameParam(self):
return ['-n', self.getSimName()]
def getMaxCompilerSim(self):
return ['%s/bin/maxcompilersim' % self.MAXCOMPILERDIR]
def getDfeModelParam(self):
return ['-c', self.dfeModel]
def getNetSimParams(self, config):
params = []
for p in config:
params += ['-e', p['NAME'] + ':%s:%s' % (p['TAP'], p['NETMASK'])]
params += ['-p', p['NAME'] + ':%s.pcap' % (p['NAME'])]
return params
def getSimBaseParams(self):
return self.getMaxCompilerSim() + self.getSimNameParam()
def getSimParams(self, netConfig):
return self.getSimBaseParams() + self.getDfeModelParam() + self.getNetSimParams(netConfig)
def start(self, netConfig=network_config):
if self.isRunning():
print "Cannot start another instance of the simulator. Please stop the previous one."
return
self.execCommand(self.getSimParams(netConfig) + ["restart"])
self.wait()
self.setSimEnv()
def stop(self):
self.execCommand(self.getSimBaseParams() + ["stop"] )
self.wait()
self.revertSimEnv()
def setSimEnv(self):
if not self.envSet:
print "Setting Simulation Environment..."
maxelerosdir = self.MAXCOMPILERDIR + "/lib/maxeleros-sim"
Environment.set("MAXELEROSDIR", maxelerosdir)
Environment.set("LD_PRELOAD", maxelerosdir + "/lib/libmaxeleros.so:" + self.ORIG_LD_PRELOAD)
Environment.set("SLIC_CONF", self.ORIG_SLIC_CONF + ";use_simulation=" + self.getSimName())
self.envSet = True
def revertSimEnv(self):
if self.envSet:
print "Reverting Simulation Environment..."
Environment.set("MAXELEROSDIR", self.ORIG_MAXELEROSDIR)
Environment.set("LD_PRELOAD", self.ORIG_LD_PRELOAD)
Environment.set("SLIC_CONF", self.ORIG_SLIC_CONF)
self.envSet = False
def maxdebug(self, maxfiles):
self.setSimEnv()
for m in maxfiles:
self.execCommand(['maxdebug', '-g', 'graph_%s' % self.getSimName(), '-d', self.getSimDeviceName(), m])
self.wait()
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.conf.urls.defaults import patterns, url
from datawinners.alldata.views import index
from datawinners.dashboard.views import dashboard, get_submission_breakup, get_submissions_about_project, geo_json_for_project, render_map
from datawinners.dashboard.views import start
urlpatterns = patterns('',
url(r'^dashboard/$', index, name='dashboard'),
(r'^start/$', start),
(r'^submission/breakup/(?P<project_id>.+?)/$', get_submission_breakup),
(r'^submission/details/(?P<project_id>.+?)/$', get_submissions_about_project),
(r'^get_geojson/(?P<project_id>.+?)/$', geo_json_for_project),
(r'^get_geojson/(?P<project_id>.+?)/(?P<entity_type>.+?)$', geo_json_for_project),
(r'^maps/entity_type$', render_map)
)
|
import os
import sys
import time
from torch import save,load, device
import pickle
LIMIT_4G = 3.8 * 1024 ** 3
def timeFormated() -> str:
return time.strftime("%H-%M_%d-%b-%y", time.gmtime())
def timeFormatedS() -> str:
return time.strftime("%H-%M-%S_%d-%b-%y", time.gmtime())
def timeDiffFormated(start):
tock = time.time()
total = tock - start
hours = int(total//3600)
mins = int(total%3600//60)
secs = int(total%3600%60//1)
if hours > 0:
s = "{}h {}m {}s".format(hours, mins, secs)
elif mins > 0:
s = "{}m {}s".format(mins, secs)
elif secs > 0:
s = "{}s".format(secs)
else:
s = "< 0s"
return s, tock
def goToDir(path):
home = os.getenv('HOME')
try:
os.chdir(os.path.join(home, path))
except:
os.chdir(home)
os.makedirs(path)
os.chdir(path)
return os.getcwd()
def createFolder(path:str, mod:str):
start = mod + '_' + timeFormated()
new_dir = os.path.join(path, start)
new_dir = goToDir(new_dir)
return start, new_dir
class Tocker:
def __init__(self):
self.tick
@property
def tick(self):
self.start = time.time()
return self.start
@property
def tock(self):
s, self.start = timeDiffFormated(self.start)
return s
@property
def tocktock(self):
"""
Returns the time elapsed since the last tick in minutes
"""
return (time.time() - self.start) * 0.016666667
def lockHz(self, Hz:int):
tHz = 1 / Hz
remaind = time.time() - self.start
remaind = tHz - remaind
if remaind > 0:
time.sleep(remaind)
return True
class Stack:
"""
Dict stack working in a FIFO manner
"""
def __init__(self):
self.stack = dict()
self.min = 0
self.actual = 0
def add(self, obj):
self.stack[self.actual] = obj
self.actual += 1
def pop(self):
poped = self.stack[self.min]
self.stack.pop(self.min)
self.min += 1
return poped
def __len__(self):
return len(self.stack)
class Reference:
def __init__(self, obj,
name: str,
limit:int,
torchType:bool = False,
device = device("cpu")):
self.torchType = torchType
self.name = name
self.ref = obj
self.prevVersions = Stack()
self.limit = limit
self.device = device
self._version = 0
def save(self, path):
if self.torchType:
self.saveTorch(path)
else:
self.savePy(path)
self.clean(path)
def clean(self, path):
if len(self.prevVersions) >= self.limit:
target = self.prevVersions.pop()
#target = os.path.join(path, target)
os.remove(target)
@staticmethod
def loaderAssist(path):
os.chdir(path)
files = os.listdir()
print("Files on direction:")
for n, File in enumerate(files):
print("{} : {}".format(n, File))
while 1:
choice = input("Enter the number for the file to load :")
choice = int(choice)
if choice > len(files) or not isinstance(choice, int) or choice < 0:
print("Number not valid. Please try again.")
else:
break
return os.path.join(path, files[choice])
def load(self, path):
print("Trying to load in object {}".format(self.name))
target = self.loaderAssist(path)
if self.torchType:
self.loadTorch(target, self.device)
else:
self.loadObj(target)
def loadTorch(self, path, device):
model = load(path, map_location=device)
self.ref.load_state_dict(model, strict = True)
print("Model successfully loaded from ", path)
def loadObj(self, path):
fileHandler = open(path, 'rb')
self.ref = pickle.load(fileHandler)
fileHandler.close()
print("Object successfully loaded from ", path)
def saveTorch(self, path):
name = self._gen_name() + ".modelst"
path = os.path.join(path, name)
try:
stateDict = self.ref.state_dict()
save(stateDict, path)
self.prevVersions.add(path)
except:
None
def savePy(self, path):
name = self._gen_name() + ".pyobj"
path = os.path.join(path, name)
if sys.getsizeof(self.ref) < LIMIT_4G:
fileHandler = open(path, "wb")
pickle.dump(self.ref, fileHandler)
fileHandler.close()
self.prevVersions.add(path)
def _gen_name(self):
self._version += 1
return self.name + "_v{}".format(self._version) + "_" + timeFormated()
class Saver():
"""
Object that administrates objects to dump
save files if possible.
parameters
----------
envName: str
path: str
Path relative to Home to dump the saved files
"""
def __init__(self, envName:str,
path:str = "PG_results/",
limitTimes:int = 5,
saveFreq:int = 40):
self.startPath, self.dir = createFolder(path, envName)
self._objRefs_ = []
self.names = set()
self.limit = limitTimes
self.time = Tocker()
self.freq = saveFreq
def start(self):
self.time.tick
def check(self):
if self.time.tocktock >= self.freq:
self.saveAll()
self.time.tick
def addObj(self, obj,
objName:str,
isTorch:bool = False,
device = device("cpu")):
if objName in self.names:
raise KeyError
self.names.add(objName)
self._objRefs_ += [Reference(obj,
objName,
self.limit,
isTorch,
device)]
def saveAll(self):
for ref in self._objRefs_:
ref.save(self.dir)
def load(self, path):
for ref in self._objRefs_:
ref.load(path) |
# -*- coding: utf-8 -*-
# Copyright 2014 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Adapted by GComyn on April 16, 2017
from __future__ import absolute_import
import cgi
import difflib
import json
import logging
import re
import time
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib.error import HTTPError
from .base_adapter import BaseSiteAdapter
from .. import exceptions as exceptions
from ..htmlcleanup import stripHTML
from ..dateutils import parse_relative_date_string
HTML_TAGS = (
'a', 'abbr', 'acronym', 'address', 'applet', 'area', 'article', 'aside', 'audio', 'b', 'base', 'basefont', 'bdi',
'bdo', 'big', 'blockquote', 'body', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'embed',
'fieldset', 'figcaption', 'figure', 'font', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5',
'h6', 'head', 'header', 'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'link',
'main', 'map', 'mark', 'menu', 'menuitem', 'meta', 'meter', 'nav', 'noframes', 'noscript', 'object', 'ol',
'optgroup', 'option', 'output', 'p', 'param', 'picture', 'pre', 'progress', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'script', 'section', 'select', 'small', 'source', 'span', 'strike', 'strong', 'style', 'sub', 'summary', 'sup',
'svg', 'table', 'tbody', 'td', 'template', 'textarea', 'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'track', 'tt',
'u', 'ul', 'var', 'video', 'wbr')
# TinyMCE-specific annotations, let's ignore these just like previously
TINY_MCE_TAGS = 'anno', 'annotations'
logger = logging.getLogger(__name__)
pseudo_html_regex_format = r'(<+(?!/?(%s)>).*?>+)'
real_html_regex = re.compile(r'</?(?:%s)(?:\s.*?)?\s*>' % '|'.join(HTML_TAGS), re.IGNORECASE)
def getClass():
return WWWWebNovelComAdapter
def fix_pseudo_html(pseudo_html, whitelist_tags=()):
tags = set(HTML_TAGS).union(whitelist_tags)
pseudo_html_regex = re.compile(pseudo_html_regex_format % '|'.join(tags), re.IGNORECASE)
return pseudo_html_regex.sub(lambda match: cgi.escape(match.group(1)), pseudo_html)
class WWWWebNovelComAdapter(BaseSiteAdapter):
_GET_VIP_CONTENT_DELAY = 8
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
# get storyId from url
# https://www.webnovel.com/book/6831837102000205
self.story.setMetadata('storyId', self.parsedUrl.path.split('/')[2])
# normalized story URL.
self._setURL('https://' + self.getSiteDomain() + '/book/' + self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev', 'wncom')
self._csrf_token = None
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.webnovel.com'
@classmethod
def getSiteExampleURLs(cls):
return 'https://' + cls.getSiteDomain() + '/book/123456789012345'
def getSiteURLPattern(self):
return r'https://' + re.escape(self.getSiteDomain()) + r'/book/*(?P<id>\d+)'
def use_pagecache(self):
'''
adapters that will work with the page cache need to implement
this and change it to True.
'''
return True
# Getting the chapter list and the meta data, plus 'is adult' checking.
def doExtractChapterUrlsAndMetadata(self, get_cover=True):
url = self.url
try:
data = self._fetchUrl(url)
except HTTPError as e:
if e.code == 404:
raise exceptions.StoryDoesNotExist('Error 404: {0}'.format(self.url))
else:
raise e
if 'We might have some troubles to find out this page.' in data:
raise exceptions.StoryDoesNotExist('{0} says: "" for url "{1}"'.format(self.getSiteDomain(), self.url))
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# removing all of the scripts
for tag in soup.findAll('script') + soup.find_all('svg'):
tag.extract()
# Now go hunting for all the meta data and the chapter list.
# This is the block that holds the metadata
bookdetails = soup.find('div', {'class': 'g_col_8'})
# Title
title = bookdetails.find('h2')
# done as a loop incase there isn't one, or more than one.
for tag in title.find_all('small'):
tag.extract()
self.story.setMetadata('title', stripHTML(title))
detail_txt = stripHTML(bookdetails.find('p', {'class': re.compile('detail')}))
if "Completed" in detail_txt:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
meta_tag = bookdetails.find('address').p
meta_txt = stripHTML(meta_tag)
def parse_meta(mt,label,setmd):
if label in mt:
data = mt.split(label,1)[1].split('Translator:', 1)[0].split('Editor:', 1)[0].strip()
if data:
# print("setting %s to %s"%(setmd, data))
self.story.setMetadata(setmd, data)
parse_meta(meta_txt,'Author:','author')
self.story.setMetadata('authorId', self.story.getMetadata('author'))
# There is no authorUrl for this site, so I'm setting it to the story url
# otherwise it defaults to the file location
self.story.setMetadata('authorUrl', url)
parse_meta(meta_txt,'Translator:','translator')
parse_meta(meta_txt,'Editor:','editor')
cats = bookdetails.find_all('a',href=re.compile(r'/category/list'))
self.story.extendList('category',[stripHTML(cat) for cat in cats])
poptags = soup.find('p',{'class':'pop-tags'})
if poptags:
sitetags = poptags.find_all('a',href=re.compile(r'/tag/list'))
self.story.extendList('sitetags',[sitetag.string for sitetag in sitetags])
## get _csrfToken cookie for chapter list fetch
for cookie in self.get_configuration().get_cookiejar():
if cookie.name == '_csrfToken':
self._csrf_token = csrf_token = cookie.value
break
else:
raise exceptions.FailedToDownload('csrf token could not be found')
## get chapters from a json API url.
jsondata = json.loads(self._fetchUrl(
"https://" + self.getSiteDomain() + "/apiajax/chapter/GetChapterList?_csrfToken=" + csrf_token + "&bookId=" + self.story.getMetadata(
'storyId')))
# print json.dumps(jsondata, sort_keys=True,
# indent=2, separators=(',', ':'))
for volume in jsondata["data"]["volumeItems"]:
for chap in volume["chapterItems"]:
# Only allow free and VIP type 1 chapters
if chap['isAuth'] not in [1]: # Ad wall indicator
# seems to have changed
# --JM
continue
chap_title = 'Chapter ' + unicode(chap['index']) + ' - ' + chap['name']
chap_Url = url.rstrip('/') + '/' + chap['id']
self.add_chapter(chap_title, chap_Url)
if get_cover:
cover_meta = soup.find('div', {'class': 'g_col_4'}).find('img')
cover_url = 'https:' + cover_meta['src']
self.setCoverImage(url, cover_url)
detabt = soup.find('div', {'class': 'det-abt'})
synopsis = detabt.find('p')
self.setDescription(url, synopsis)
rating = detabt.find('span',{'class': 'vam'})
if rating:
self.story.setMetadata('rating',rating.string)
last_updated_string = jsondata['data']['bookInfo']['newChapterTime']
last_updated = parse_relative_date_string(last_updated_string)
# Published date is always unknown, so simply don't set it
# self.story.setMetadata('datePublished', UNIX_EPOCHE)
self.story.setMetadata('dateUpdated', last_updated)
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
book_id = self.story.getMetadata('storyId')
chapter_id = url.split('/')[-1]
content_url = 'https://%s/apiajax/chapter/GetContent?_csrfToken=%s&bookId=%s&chapterId=%s&_=%d' % (
self.getSiteDomain(), self._csrf_token, book_id, chapter_id, time.time() * 1000)
topdata = json.loads(self._fetchUrl(content_url))
# logger.debug(json.dumps(topdata, sort_keys=True,
# indent=2, separators=(',', ':')))
chapter_info = topdata['data']['chapterInfo']
# Check if chapter is marked as VIP type 1 (requires an ad to be watched)
if chapter_info['isVip'] == 1:
content_token_url = 'https://%s/apiajax/chapter/GetChapterContentToken?_csrfToken=%s&bookId=%s&chapterId=%s' % (
self.getSiteDomain(), self._csrf_token, self.story.getMetadata('storyId'), chapter_id)
content_token = json.loads(self._fetchUrl(content_token_url))['data']['token']
content_by_token_url = 'https://%s/apiajax/chapter/GetChapterContentByToken?_csrfToken=%s&token=%s' % (
self.getSiteDomain(), self._csrf_token, content_token)
# This is actually required or the data/content field will be empty
time.sleep(self._GET_VIP_CONTENT_DELAY)
contents = json.loads(self._fetchUrl(content_by_token_url))['data']['contents']
else:
contents = chapter_info['contents']
# Content is HTML, so return it directly
if chapter_info['isRichFormat']:
content = "\n".join([ x['content'] for x in contents])
if self.getConfig('fix_pseudo_html', False):
# Attempt to fix pseudo HTML
fixed_content = fix_pseudo_html(content, TINY_MCE_TAGS)
if content != fixed_content:
diff = difflib.unified_diff(
real_html_regex.split(content),
real_html_regex.split(fixed_content),
n=0, lineterm='')
logger.info('fix_pseudo_html() modified content:\n%s', '\n'.join(diff))
content = fixed_content
else: # text format.
content = "".join([ x['content'] for x in contents])
# Content is raw text, so convert paired newlines into paragraphs like the website
content = content.replace('\r', '')
content = cgi.escape(content)
content = re.sub(r'\n(.+?)\n', r'<p>\1</p>', content)
return content
|
#!/usr/bin/env python3
from instaloader.__main__ import main
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1020/B
#寻找环.
def who(al,i):
vl = [False]*len(al)
while vl[i] is False:
vl[i] = True
i = al[i]
return i
n = int(input()) #1000
al = [0] + list(map(int,input().split())) #al
print(*[who(al,i) for i in range(1,n+1)])
|
"""
Interactions with other mF2C components
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Roi Sucasas Font, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 09 may. 2019
@author: Roi Sucasas - ATOS
"""
import requests, json
import config
from lifecycle.logs import LOG
from lifecycle.common import TRACE
# SET UM INFORMATION
# set_um_properties: call to lifceycle from other agent in order to update user management properties
def set_um_properties(apps=0):
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [set_um_properties] localhost - local UM: Updating UM properties ...")
try:
LOG.info("[lifecycle.connectors.atos.user_manager] [set_um_properties] HTTP PUT: " + str(config.dic['URL_AC_USER_MANAGEMENT']) + "/sharing-model")
r = requests.put(str(config.dic['URL_AC_USER_MANAGEMENT']) + "/sharing-model",
json={"apps_running": apps},
verify=config.dic['VERIFY_SSL'])
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [set_um_properties] response: " + str(r) + ", " + str(r.json()))
if r.status_code == 200:
json_data = json.loads(r.text)
LOG.debug('[lifecycle.connectors.atos.user_manager] [set_um_properties] json_data=' + str(json_data))
return json_data
LOG.error("[lifecycle.connectors.atos.user_manager] [set_um_properties] Error: status_code=" + str(r.status_code) + "; Returning None ...")
except:
LOG.exception("[lifecycle.connectors.atos.user_manager] [set_um_properties] Exception; Returning None ...")
return None
# CHECK AVIALABILITY
# check_avialability: call to local UM to check if it's possible to deploy a service
def check_avialability():
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [check_avialability] localhost - local UM: Checking avialability ...")
try:
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [check_avialability] HTTP GET: " + str(config.dic['URL_AC_USER_MANAGEMENT']) + "/check")
r = requests.get(str(config.dic['URL_AC_USER_MANAGEMENT']) + "/check",
verify=config.dic['VERIFY_SSL'])
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [check_avialability] response: " + str(r) + ", " + str(r.json()))
json_data = json.loads(r.text)
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [check_avialability] json_data=" + str(json_data))
if r.status_code == 200 and not json_data['result'] is None:
return json_data
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [check_avialability] Error: status_code=" + str(r.status_code) + "; Returning None ...")
except:
LOG.exception("[lifecycle.connectors.atos.user_manager] [check_avialability] Exception; Returning None ...")
return None
# GET CURRENT USER / DEVICE
# get_current: call to local UM to get current values (user, device)
def get_current(val):
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_current] Getting current " + val + " from localhost - UM: Checking avialability ...")
try:
LOG.info("[lifecycle.connectors.atos.user_manager] [get_current] HTTP GET: " + str(config.dic['URL_AC_USER_MANAGEMENT']) + "/current/" + val)
r = requests.get(str(config.dic['URL_AC_USER_MANAGEMENT']) + "/current/" + val,
verify=config.dic['VERIFY_SSL'])
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_current] response: " + str(r) + ", " + str(r.json()))
json_data = json.loads(r.text)
LOG.debug("[lifecycle.connectors.atos.user_manager] [get_current] json_data=" + str(json_data))
if r.status_code == 200:
return json_data
LOG.error("[lifecycle.connectors.atos.user_manager] [get_current] Error: status_code=" + str(r.status_code) + "; Returning None ...")
except:
LOG.exception("[lifecycle.connectors.atos.user_manager] [get_current] Exception; Returning None ...")
return None
# GET CURRENT USER-PROFILE
# get_user_profile: call to local UM to get current user-profile
def get_user_profile():
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_user_profile] Getting user-profile from localhost ...")
try:
LOG.info("[lifecycle.connectors.atos.user_manager] [get_user_profile] HTTP GET: " + str(config.dic['URL_AC_USER_MANAGEMENT']) + "/user-profile")
r = requests.get(str(config.dic['URL_AC_USER_MANAGEMENT']) + "/user-profile",
verify=config.dic['VERIFY_SSL'])
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_user_profile] response: " + str(r) + ", " + str(r.json()))
json_data = json.loads(r.text)
LOG.debug("[lifecycle.connectors.atos.user_manager] [get_user_profile] json_data=" + str(json_data))
if r.status_code == 200:
return json_data
LOG.error("[lifecycle.connectors.atos.user_manager] [get_user_profile] Error: status_code=" + str(r.status_code) + "; Returning None ...")
except:
LOG.exception("[lifecycle.connectors.atos.user_manager] [get_user_profile] Exception; Returning None ...")
return None
# GET CURRENT SHARING-MODEL
# get_sharing_model: call to local UM to get current usharing-model
def get_sharing_model():
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_sharing_model] Getting sharing-model from localhost ...")
try:
LOG.info("[lifecycle.connectors.atos.user_manager] [get_sharing_model] HTTP GET: " + str(config.dic['URL_AC_USER_MANAGEMENT']) + "/sharing-model")
r = requests.get(str(config.dic['URL_AC_USER_MANAGEMENT']) + "/sharing-model",
verify=config.dic['VERIFY_SSL'])
LOG.log(TRACE, "[lifecycle.connectors.atos.user_manager] [get_sharing_model] response: " + str(r) + ", " + str(r.json()))
json_data = json.loads(r.text)
LOG.debug("[lifecycle.connectors.atos.user_manager] [get_sharing_model] json_data=" + str(json_data))
if r.status_code == 200:
return json_data
LOG.error("[lifecycle.connectors.atos.user_manager] [get_sharing_model] Error: status_code=" + str(r.status_code) + "; Returning None ...")
except:
LOG.exception("[lifecycle.connectors.atos.user_manager] [get_sharing_model] Exception; Returning None ...")
return None |
import argparse
import csv
import datetime
import gpxpy
import gpxpy.gpx
import pyproj
from dateutil import parser
def jps2gpx(args):
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
gpx = gpxpy.gpx.GPX()
# Create first track in our GPX:
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
# Create first segment in our GPX track:
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
with open(args.csv, newline='') as csvfile, open(args.gpx, 'w') as gpx_file:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
sigma = float(row['sigma'])
if sigma < 0.8:
lon, lat, alt = pyproj.transform(ecef, lla, float(row['x']), float(row['y']), float(row['z']), radians=False)
t = parser.parse(row['t'])
utc = datetime.datetime(2021, 9, 23, t.time().hour, t.time().minute, t.time().second) - \
datetime.timedelta(seconds=17)
gpx_segment.points.append(gpxpy.gpx.GPXTrackPoint(lat, lon, alt, utc))
gpx_file.write(gpx.to_xml())
print('Created GPX:', args.gpx)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
arg_parser.add_argument("--csv", help="CSV file derived form JPS file", required=True)
arg_parser.add_argument("--gpx", help="GPX file", required=True)
jps2gpx(arg_parser.parse_args())
|
#!/usr/bin/env python3
"""Default values and configuration."""
# Possible word delimiters for synonyms.
MULTIWORD_DELIMITERS = (' ', '-', '_', '/')
# Default output format if not explicitly stated.
DEFAULT_OUTPUT_FORMAT = 'yaml'
# Stemmer to be used by default.
DEFAULT_STEMMER = None
# DEFAULT_STEMMER = Stemmer.get_stemmer('EnglishStemmer')
# Lemmatizer to be used by default.
DEFAULT_LEMMATIZER = None
# DEFAULT_LEMMATIZER = Lemmatizer.get_lemmatizer()
# Filter keywords that have low occurrence.
OCCURRENCE_COUNT_FILTER = 2
# Scoring mechanism used.
DEFAULT_SCORER = 'Count'
|
from .lr0itemset import LR0ItemSet
from .lr0node import LR0Node
from .lr0path import LR0PathItem
from .lr0dominancenode import LR0DominanceSet
from .merge_tree import MergeTree
from motor_typing import TYPE_CHECKING
import sys
class LALRTable(object):
def __init__(self, action_table, goto_table):
# type: (List[Dict[int, Tuple[int,...]]], List[Dict[int, int]]) -> None
self._action_table = action_table
self._goto_table = goto_table
def _find_merge_points(conflict_list, name_map):
# type: (List[Tuple[LR0Node, str]], List[str]) -> None
#dominance_set = MergeTree(conflict_list)
#dominance_set.print_dot(name_map)
#dominance_set.print_merge_tree(name_map)
pass
def _log(title, conflict_paths, out, name_map):
# type: (Text, List[LR0Path], Logger, List[str]) -> None
seen = set([])
if conflict_paths:
count = len(set(conflict_paths))
out.info(u' %s', title)
out.info(u' \u256d\u2574')
for path in conflict_paths:
if path in seen:
continue
count -= 1
seen.add(path)
strings = path.to_string(name_map)
for s in strings:
out.info(u' \u2502 %s', s)
if count == 0:
out.info(u' \u2570\u2574')
else:
out.info(u' \u251c\u2574')
_dominance_set_cache = {} # type: Dict[FrozenSet[LR0Node], LR0DominanceSet]
def _find_common_parent(node_list):
# type: (List[LR0Node]) -> List[LR0Node]
node_set = frozenset(node_list)
try:
dominance_set = _dominance_set_cache[node_set]
except KeyError:
dominance_set = LR0DominanceSet(node_list)
#dominance_set.print_dot()
_dominance_set_cache[node_set] = dominance_set
if dominance_set._best_dominator:
return dominance_set._best_dominator._nodes
return []
def _find_counterexamples(conflict_list):
# type: (List[Tuple[LR0Node, Optional[int]]]) -> List[Tuple[LR0Node, List[Tuple[LR0Node, LR0Path]]]]
class IntermediateResult(object):
def __init__(self, path_list):
# type: (List[List[Tuple[LR0Node, LR0Path]]]) -> None
self._paths = path_list
self._refcount = 1
conflict_paths = [
(node, []) for node, _ in conflict_list
] # type: List[Tuple[LR0Node, List[Tuple[LR0Node, LR0Path]]]]
reduce_node = [1 if node._item == node._item._last else 0 for node, _ in conflict_list]
lst = [
] # type: List[List[Tuple[LR0Node, LR0Path, Optional[int], Set[Union[Tuple[LR0Node, Optional[int]], LR0ItemSet]]]]]
states = {
} # type: Dict[Tuple[LR0ItemSet, int], List[List[Tuple[LR0Node, LR0Path, Optional[int], Set[Union[Tuple[LR0Node, Optional[int]], LR0ItemSet]]]]]]
for s in conflict_list:
lst.append([(s[0], LR0PathItem(s[0]._item), s[1], set())])
intermediate_result = None # type: Optional[IntermediateResult]
queue = [(lst, conflict_list[0][0]._item_set._index, intermediate_result, states)]
while queue:
path_list, state, intermediate_result, states = queue.pop(0)
temp_result = [] # type: List[List[Tuple[LR0Node, LR0Path]]]
recurse = False
report = True
all_nodes = [] # type: List[LR0Node]
for paths in path_list:
temp_result.append([])
for node, path, lookahead, _ in paths:
if node._item._index == 0 and lookahead is None:
all_nodes.append(node)
temp_result[-1].append((node, path))
else:
recurse = True
if len(paths) > 0 and len(temp_result[-1]) == 0:
report = False
if report:
common_parents = _find_common_parent(all_nodes)
if common_parents:
if recurse:
if intermediate_result is not None:
intermediate_result._refcount -= 1
intermediate_result = IntermediateResult([])
for index, tmp_paths in enumerate(temp_result):
intermediate_result._paths.append([])
for node, path in tmp_paths:
node, path = node.backtrack_to_any(path, common_parents)
intermediate_result._paths[index].append((node, path))
else:
for index, tmp_paths in enumerate(temp_result):
for node, path in tmp_paths:
node, path = node.backtrack_to_any(path, common_parents)
conflict_paths[index][1].append((node, path))
else:
recurse = True
if recurse:
check_current_state = False
if len(states) == 0:
current_state = []
for index, paths in enumerate(path_list):
current_state_paths = []
for node, path, lookahead, seen in paths:
predecessor_list = node.backtrack_up(path, lookahead, seen)
for predecessor, predecessor_path, la, consumed_token in predecessor_list:
if consumed_token is not None:
try:
predecessors = states[(predecessor._item_set, consumed_token)]
except KeyError:
predecessors = [[] for _ in path_list]
states[(predecessor._item_set, consumed_token)] = predecessors
predecessors[index].append((predecessor, predecessor_path, la, seen))
else:
current_state_paths.append((predecessor, predecessor_path, la, set(seen)))
if current_state_paths:
check_current_state = True
current_state.append(current_state_paths)
else:
current_state.append(paths)
if check_current_state:
queue.insert(0, (current_state, state, intermediate_result, states))
else:
for (item_set, _), nodes_list in states.items():
count = 0
reduce_count = 0
for nodes, reduce in zip(nodes_list, reduce_node):
if len(nodes) > 0:
count += 1
reduce_count += reduce
if count > 1 and reduce_count > 0:
if intermediate_result is not None:
intermediate_result._refcount += 1
queue.append((nodes_list, item_set._index, intermediate_result, {}))
if intermediate_result is not None:
intermediate_result._refcount -= 1
if intermediate_result._refcount == 0:
for index, tmp_paths in enumerate(intermediate_result._paths):
conflict_paths[index][1].extend(tmp_paths)
return conflict_paths
def create_parser_table(productions, start_id, name_map, terminal_count, sm_log, conflict_log, error_log):
# type: (Dict[int, Grammar.Production], int, List[str], int, Logger, Logger, Logger) -> LALRTable
cidhash = {} # type: Dict[int, int]
goto_cache = {} # type: Dict[Tuple[int, int], Optional[LR0ItemSet]]
goto_cache_2 = {} # type: Dict[int, Any]
def goto(item_set, index, lookahead):
# type: (LR0ItemSet, int, int) -> Optional[LR0ItemSet]
# First we look for a previously cached entry
item_set_id = id(item_set)
result = goto_cache.get((item_set_id, lookahead), None)
if result is not None:
return result
s = goto_cache_2.get(lookahead)
if not s:
s = {}
goto_cache_2[lookahead] = s
gs = [] # type: List[Tuple[LR0Item, Optional[LR0Node], int]]
for item in item_set:
next = item._next
if next and next._before == lookahead:
gs.append((next, item_set[item], lookahead))
gs = sorted(gs, key=lambda x: id(x[0]))
for item, _, _ in gs:
s1 = s.get(id(item))
if not s1:
s1 = {}
s[id(item)] = s1
s = s1
result = s.get(0, None)
if result is None:
if gs:
result = LR0ItemSet(index, gs)
s[0] = result
else:
s[0] = None
else:
result.add_core(gs)
goto_cache[(item_set_id, lookahead)] = result
return result
def create_item_sets():
# type: () -> List[LR0ItemSet]
assert len(productions[start_id]) == 1
states = [LR0ItemSet(0, [(productions[start_id][0]._item, None, 2)])]
cidhash[id(states[0])] = 0
# Loop over the items in C and each grammar symbols
i = 0
while i < len(states):
state = states[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = set([])
for item in state:
asyms.update(item._symbols)
for x in sorted(asyms):
g = goto(state, len(states), x)
if not g or id(g) in cidhash:
continue
cidhash[id(g)] = len(states)
states.append(g)
return states
def add_lalr_lookahead(states):
# type: (List[LR0ItemSet]) -> None
def traverse(x, N, stack, F, X, R, FP):
# type: (Tuple[int, int], Dict[Tuple[int, int], int], List[Tuple[int, int]], Dict[Tuple[int, int], List[int]], List[Tuple[int, int]], Callable[[Tuple[int, int]], List[Tuple[int, int]]], Callable[[Tuple[int, int]], List[int]]) -> None
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = sys.maxsize
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = sys.maxsize
F[stack[-1]] = F[x]
element = stack.pop()
def digraph(X, R, FP):
# type: (List[Tuple[int, int]], Callable[[Tuple[int, int]], List[Tuple[int, int]]], Callable[[Tuple[int, int]], List[int]]) -> Dict[Tuple[int, int], List[int]]
N = {}
for x in X:
N[x] = 0
stack = [] # type: List[Tuple[int, int]]
F = {} # type: Dict[Tuple[int, int], List[int]]
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def dr_relation(trans):
# type: (Tuple[int, int]) -> List[int]
state, N = trans
terms = []
item_set = goto(states[state], len(states), N)
assert item_set is not None
for item in item_set:
if item._index < item.len:
a = item.rule.production[item._index]
if a < terminal_count:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == start_id and N == productions[start_id][0].production[0]:
terms.append(0)
return terms
def reads_relation(trans, empty):
# type: (Tuple[int, int], Set[int]) -> List[Tuple[int, int]]
# Look for empty transitions
rel = []
state, N = trans
item_set = goto(states[state], len(states), N)
assert item_set is not None
j = cidhash[id(item_set)]
for item in item_set:
if item._index < item.len:
a = item.rule.production[item._index]
if a in empty:
rel.append((j, a))
return rel
def compute_read_sets(trans, nullable):
# type: (List[Tuple[int, int]], Set[int]) -> Dict[Tuple[int, int], List[int]]
FP = lambda x: dr_relation(x)
R = lambda x: reads_relation(x, nullable)
F = digraph(trans, R, FP)
return F
def compute_lookback_includes(trans, nullable):
# type: (List[Tuple[int, int]], Set[int]) -> Tuple[Dict[Tuple[int, int], List[Tuple[int, LR0Item]]], Dict[Tuple[int, int], List[Tuple[int, int]]]]
lookdict = {} # Dictionary of lookback relations
includedict = {} # type: Dict[Tuple[int, int], List[Tuple[int, int]]]
# Make a dictionary of non-terminal transitions
dtrans = {}
for t1 in trans:
dtrans[t1] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in states[state]:
if p.rule._prod_symbol != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p._index
j = state
while lr_index < p.len:
t = p.rule.production[lr_index]
lr_index = lr_index + 1
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index
while li < p.len:
if p.rule.production[li] < terminal_count:
break # No forget it
if p.rule.production[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = goto(states[j], len(states), t) # Go to next set
j = cidhash[id(g)] # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in states[j]:
if r.rule._prod_symbol != p.rule._prod_symbol:
continue
if r.len != p.len:
continue
# i = 0
# This look is comparing a production ". A B C" with "A B C ."
# while i < r._index:
# if r._rule[i] != p._rule[i + 1]:
# break
# i = i + 1
#else:
# lookb.append((j, r))
if p._index == 0 and r.rule.production[:r._index] == p.rule.production[:r._index]:
lookb.append((j, r))
for ii in includes:
if ii not in includedict:
includedict[ii] = []
includedict[ii].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
def compute_follow_sets(ntrans, readsets, inclsets):
# type: (List[Tuple[int, int]], Dict[Tuple[int, int], List[int]], Dict[Tuple[int, int], List[Tuple[int, int]]]) -> Dict[Tuple[int, int], List[int]]
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
def add_lookaheads(lookbacks, followset):
# type: (Dict[Tuple[int, int], List[Tuple[int, LR0Item]]], Dict[Tuple[int, int], List[int]]) -> None
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p._lookaheads:
p._lookaheads[state] = []
l = p._lookaheads[state]
f = followset.get(trans, [])
for a in f:
if a not in l:
l.append(a)
# Determine all of the nullable nonterminals
nullable = set([])
for prod_symbol, prod in productions.items():
if prod._empty:
nullable.add(prod_symbol)
# Find all non-terminal transitions
trans = []
for stateno, state in enumerate(states):
for item in state:
if item._index < item.len:
t = (stateno, item.rule.production[item._index])
if t[1] >= terminal_count:
if t not in trans:
trans.append(t)
# Compute read sets
readsets = compute_read_sets(trans, nullable)
# Compute lookback/includes relations
lookd, included = compute_lookback_includes(trans, nullable)
# Compute LALR FOLLOW sets
followsets = compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
add_lookaheads(lookd, followsets)
goto_table = [] # Goto array
action = [] # Action array
# Build the parser table, state by state
states = create_item_sets()
add_lalr_lookahead(states)
st = 0
priority_missing = {} # type: Dict[LR0Item, List[int]]
split_missing = {} # type: Dict[LR0Item, List[int]]
merge_missing = {} # type: Dict[LR0Item, List[int]]
priority_conflict = {} # type: Dict[FrozenSet[LR0Item], List[int]]
merge_conflict = {} # type: Dict[FrozenSet[Grammar.Rule], List[int]]
conflict_issues = {} # type: Dict[FrozenSet[LR0Item], Dict[LR0Item, List[Tuple[LR0Node, LR0Path]]]]
split_seen = set() # type: Set[FrozenSet[Tuple[LR0Node, str]]]
num_rr = 0
num_sr = 0
for st, item_group in enumerate(states):
# Loop over each production
action_map = {} # type: Dict[int, List[Tuple[int, LR0Item]]]
st_action = {} # type: Dict[int, Tuple[int,...]]
st_goto = {} # type: Dict[int, int]
sm_log.info('')
sm_log.info('')
sm_log.info('state %d:', st)
sm_log.info('')
for item in item_group:
sm_log.info(' (%d) %s', item.rule._id, item.to_string(name_map))
sm_log.info('')
for item in item_group:
if item._last == item:
if item.rule._prod_symbol == start_id:
# Start symbol. Accept!
action_map[0] = action_map.get(0, []) + [(0, item)]
item.rule._reduced += 1
else:
# We are at the end of a production. Reduce!
for a in item._lookaheads[st]:
action_map[a] = action_map.get(a, []) + [(-item.rule._id, item)]
item.rule._reduced += 1
else:
i = item._index
a = item.rule.production[i] # Get symbol right after the "."
if a < terminal_count:
g = goto(item_group, len(states), a)
j = cidhash[id(g)]
if j >= 0:
action_map[a] = action_map.get(a, []) + [(j, item)]
for a in sorted(action_map):
actions = action_map[a]
action_dest = {} # type: Dict[int, List[LR0Item]]
for i, item in actions:
try:
action_dest[i].append(item)
except KeyError:
action_dest[i] = [item]
accepted_actions = {} # type: Dict[int, List[LR0Item]]
if len(action_dest) > 1:
# looks like a potential conflict, look at precedence
conflict_log.info('State %d:', st)
conflict_log.info(' disambiguation for lookahead %s', name_map[a])
precedence, associativity = (-1, 'nonassoc')
shift_actions = False
reduce_actions = False
assoc_error = False
precedence_set = False
split = True
all_items = []
for j, items in action_dest.items():
all_items += items
for item in items:
if item._precedence is not None:
if item._precedence[1] > precedence:
precedence_set = True
precedence = item._precedence[1]
associativity = item._precedence[0]
assoc_error = False
shift_actions = j >= 0
reduce_actions = j < 0
split = item._split is not None
elif item._precedence[1] == precedence:
if precedence_set:
if item._precedence[0] != associativity:
assoc_error = True
shift_actions |= j >= 0
reduce_actions |= j < 0
split &= item._split is not None
else:
associativity = item._precedence[0]
precedence_set = True
assoc_error = False
shift_actions = j >= 0
reduce_actions = j < 0
split = item._split is not None
elif precedence == -1:
shift_actions |= j >= 0
reduce_actions |= j < 0
split &= item._split is not None
all_items_set = frozenset(all_items)
if assoc_error:
try:
priority_conflict[all_items_set].append(st)
except KeyError:
priority_conflict[all_items_set] = [st]
for j, items in action_dest.items():
for item in items:
if item._precedence is None and precedence_set:
if j >= 0:
try:
priority_missing[item].append(st)
except KeyError:
priority_missing[item] = [st]
conflict_log.info(' [no precedence] %s', item.to_string(name_map))
else:
conflict_log.info(' [discarded] %s', item.to_string(name_map))
continue
elif item._precedence is not None:
if item._precedence[1] < precedence:
conflict_log.info(' [discarded] %s', item.to_string(name_map))
continue
if j < 0 and shift_actions and associativity == 'left':
conflict_log.info(' [discarded] %s', item.to_string(name_map))
continue
if j >= 0 and reduce_actions and associativity == 'right':
conflict_log.info(' [discarded] %s', item.to_string(name_map))
continue
if split and item._split is None:
try:
split_missing[item].append(st)
except KeyError:
split_missing[item] = [st]
try:
accepted_actions[j].append(item)
except KeyError:
accepted_actions[j] = [item]
conflict_log.info(' [accepted] %s', item.to_string(name_map))
conflict_log.info('')
else:
accepted_actions = action_dest
st_action[a] = tuple(sorted(accepted_actions))
if len(accepted_actions) > 1 and not split:
# handle conflicts
conflicts = [] # type: List[Tuple[LR0Node, Optional[int]]]
num_rr += 1
sm_log.info(' %-30s conflict split', name_map[a])
for j in st_action[a]:
items = accepted_actions[j]
if j >= 0:
sm_log.info(' shift and go to state %d', j)
num_rr -= 1
num_sr += 1
for item in items:
node = item_group[item]
if j >= 0:
conflicts.append((node, None))
else:
sm_log.info(' reduce using rule %s', item.to_string(name_map))
conflicts.append((node, a))
counterexamples = _find_counterexamples(conflicts)
result_count = 0
for node, conflict_list in counterexamples:
if conflict_list:
result_count += 1
if result_count == 0:
conflict_log.info(' unable to find counterexamples - lookahead: %s\n' % name_map[a])
error_log.warning('unable to find counterexamples - lookahead: %s' % name_map[a])
conflict_items = frozenset(node._item for node, _ in counterexamples)
try:
item_conflict_node = conflict_issues[conflict_items]
except KeyError:
item_conflict_node = {}
conflict_issues[conflict_items] = item_conflict_node
for node, paths in counterexamples:
try:
item_conflict_node[node._item] += paths
except KeyError:
item_conflict_node[node._item] = paths
elif len(accepted_actions) > 1:
for j, items in accepted_actions.items():
for item in items:
assert item._split is not None
item._split_use += 1
conflict_log.info('')
splits = [] # type: List[Tuple[LR0Node, str]]
sm_log.info(' %-30s split', name_map[a])
for j in st_action[a]:
items = accepted_actions[j]
if j >= 0:
sm_log.info(' shift and go to state %d', j)
for item in items:
assert item._split is not None
if j < 0:
sm_log.info(' reduce using rule %s', item.to_string(name_map))
splits.append((item_group[item], item._split))
else:
splits.append((item_group[item], item._split))
key = frozenset(splits)
if key not in split_seen:
_find_merge_points(splits, name_map)
split_seen.add(key)
conflict_log.info('')
else:
for j in st_action[a]:
items = accepted_actions[j]
if j >= 0:
sm_log.info(' %-30s shift and go to state %d', name_map[a], j)
for item in items:
if j < 0:
sm_log.info(' %-30s reduce using rule %s', name_map[a], item.to_string(name_map))
nkeys = set([])
for item in item_group:
for s in item._symbols:
if s > terminal_count:
g = goto(item_group, len(states), s)
j = cidhash.get(id(g), -1)
if j >= 0:
if s not in nkeys:
st_goto[s] = j
nkeys.add(s)
sm_log.info(' %-30s shift and go to state %d', name_map[s], j)
assert item._next is not None
action.append(st_action)
goto_table.append(st_goto)
# Report errors
for _, production in productions.items():
for rule in production:
if rule._reduced == 0:
error_log.warning('Rule (%s) is never reduced', rule.to_string(name_map))
for missing, text in (
(priority_missing, 'precedence'),
(split_missing, 'split'),
(merge_missing, 'merge'),
):
if len(missing) == 1:
error_log.warning('1 missing %s annotation', text)
elif len(missing) > 1:
error_log.warning('%d missing %s annotations', len(missing), text)
for item, _ in sorted(missing.items(), key=lambda x: (x[0].rule._filename, x[0].rule._lineno)):
error_log.diagnostic(item.rule._filename, item.rule._lineno, item.to_string(name_map))
if len(priority_conflict) == 1:
error_log.warning('1 conflicting precedence annotation')
elif len(priority_conflict) > 1:
error_log.warning('%d conflicting precedence annotations', len(priority_conflict))
for item_set, state_numbers in priority_conflict.items():
error_log.warning('conflicting precedence in states %s:', ', '.join([str(i) for i in state_numbers]))
for item in sorted(item_set, key=lambda x: (x.rule._filename, x.rule._lineno)):
error_log.diagnostic(item.rule._filename, item.rule._lineno, item.to_string(name_map))
if len(merge_conflict) == 1:
error_log.warning('1 conflicting merge annotation')
elif len(merge_conflict) > 1:
error_log.warning('%d conflicting merge annotations', len(merge_conflict))
for rules, state_numbers in merge_conflict.items():
error_log.warning('conflicting merge in states %s:', ', '.join([str(i) for i in state_numbers]))
for rule in sorted(rules, key=lambda x: (x._filename, x._lineno)):
error_log.diagnostic(rule._filename, rule._lineno, rule._item.to_string(name_map))
for _, production in sorted(productions.items()):
for rule in production:
item_iterator = rule._item # type: Optional[LR0Item]
while item_iterator:
if item_iterator._split is not None and item_iterator._split_use == 0:
error_log.warning('unused split annotation')
error_log.diagnostic(rule._filename, rule._lineno, item_iterator.to_string(name_map))
item_iterator = item_iterator._next
#if rule._item._last._merge and rule._item._last._merge_use == 0:
# error_log.warning('unused merge annotation')
# error_log.diagnostic(rule._filename, rule._lineno, rule._item._last.to_string(name_map))
if num_sr == 1:
error_log.warning('1 shift/reduce conflict')
elif num_sr > 1:
error_log.warning('%d shift/reduce conflicts', num_sr)
if num_rr == 1:
error_log.warning('1 reduce/reduce conflict')
elif num_rr > 1:
error_log.warning('%d reduce/reduce conflicts', num_rr)
for item_set, conflict_set in conflict_issues.items():
conflict_log.info('conflict:')
for item in sorted(item_set, key=lambda x: (x.rule._filename, x.rule._lineno)):
conflict_log.info(' %s', item.to_string(name_map))
conflict_log.info('')
for (item, paths) in sorted(conflict_set.items(), key=lambda x: (x[0].rule._filename, x[0].rule._lineno)):
_log(
'%s using rule %s' % ('reduce' if item == item._last else 'shift', item.to_string(name_map)),
sorted(
[p for _, p in paths],
key=lambda p: (
p._items[0][1].rule._prod_name,
p._items[0][1].rule._filename,
p._items[0][1].rule._lineno,
)
), conflict_log, name_map
)
return LALRTable(action, goto_table)
if TYPE_CHECKING:
from motor_typing import Any, Callable, Dict, FrozenSet, List, Optional, Set, Text, Tuple, Union
from .grammar import Grammar
from .lr0item import LR0Item
from .lr0path import LR0Path
from ..log import Logger
|
import tkinter
from tkinter import *
win = Tk()
c = Canvas(win, height=250, width=300, bg="blue")
coord = 20, 50, 250, 200
arc = c.create_arc(coord, start=0, extent=180, fill="red")
line = c.create_line(10, 10, 200, 200, fill="white")
c.pack()
win.mainloop()
|
from src.switchbox import *
from src.point import *
from src.netcrackerformat import *
# ============================== Analysis results ==============================
FILTER_GLOBAL_RES = "global pjs" # Type: [PIPJunction]
FILTER_LOCAL_RES = "local pjs" # Type: [PIPJunction]
FILTER_INTERNAL_RES = "internal pjs" # Type: [PIPJunction]
# ==============================================================================
def posBasedFilter(sb):
globalPJs = []
localPJs = []
internalPJs = []
for pj in sb.PIPJunctions:
connectsToGlobal = False
connectsToLocal = False
connectsToInternal = False
for pjlist in [pj.forward_pjs, pj.backward_pjs]:
for c_pj in pjlist:
posDifference = c_pj.pos - pj.pos
if posDifference.length == 0:
connectsToInternal = True
elif posDifference.length == 1:
connectsToLocal = True
elif posDifference.length > 1:
connectsToGlobal = True
if connectsToInternal:
internalPJs.append(pj)
if connectsToLocal:
localPJs.append(pj)
if connectsToGlobal:
globalPJs.append(pj)
sb.results[FILTER_GLOBAL_RES] = globalPJs
sb.results[FILTER_LOCAL_RES] = localPJs
sb.results[FILTER_INTERNAL_RES] = internalPJs
|
# -*- coding: utf-8 -*-
import threading
import json
from typing import Optional
from dataclasses import dataclass, asdict
import inject
import template_logging
import template_exception
from flask import Request, Response
from werkzeug.exceptions import NotFound, BadRequest
from pymysql.constants import ER
from sqlalchemy.exc import DatabaseError
from template_rbac import AuthStore, Auth
from template_babel import get_text as _
from app.constants.code_map import CODE_MAP
from app.exceptions import ParamsInvalidException, ServerException, ClientException
from app.dependencies import MainDBSession
from app.utils.view import response
logger = template_logging.getLogger(__name__)
@dataclass
class HandlerStore:
# 请求url
url: str
# 请求方式
method: str
# 请求
args: str
body: str
@dataclass
class RequestInfo(HandlerStore):
# token
token: str
# 用户信息
user_info: str
class GlobalErrorHandler:
def __init__(self):
self.registry = threading.local()
def set_request(self, request: Request):
# 获取信息
body: str = json.dumps(request.json or request.form) if request.data else "null"
handler_store: HandlerStore = HandlerStore(
request.url, request.method, request.query_string.decode(), body,
)
self.registry.handler_store = handler_store
def get_handler_store(self) -> Optional[HandlerStore]:
return getattr(self.registry, 'handler_store', None)
def get_request_info(self) -> str:
"""
获取请求信息
"""
auth: Auth = inject.instance(Auth)
auth_store: AuthStore = auth.get_auth_store()
token: str = auth_store.token if auth_store else 'null'
user_info: str = json.dumps(auth_store.user_info) if auth_store else 'null'
# 获得handler store
handler_store: Optional[HandlerStore] = self.get_handler_store()
# 本地request info
request_info: RequestInfo = RequestInfo(
handler_store.url, handler_store.method, handler_store.args, handler_store.body,
token, user_info
)
return json.dumps(asdict(request_info))
def clear(self):
"""
清理
"""
if hasattr(self.registry, 'handler_store'):
del self.registry.handler_store
@staticmethod
def __handle_500(exception: Exception):
"""
处理500错误
:param exception:
:return:
"""
code: int = 5001
if hasattr(exception, 'code') and exception.code:
code = exception.code
# response 500
return response.exception(
code=code,
status=500,
message=str(exception)
)
@staticmethod
def __handle_401(exception: Exception):
"""
处理401错误
:param exception:
:return:
"""
code: int = 40104
if hasattr(exception, 'code') and exception.code:
code = exception.code
# response 401
return response.exception(
code=code,
status=401,
message=str(exception)
)
@staticmethod
def __handle_400(exception: Exception):
"""
处理400错误
:param exception:
:return:
"""
code: int = 4001
if hasattr(exception, 'code') and exception.code:
code = exception.code
# response 400
return response.exception(
code=code,
status=400,
message=str(exception)
)
@staticmethod
def __handle_404(exception: Exception):
"""
处理404错误
:param exception:
:return:
"""
# response 404
return response.page_not_found(
message=str(exception)
)
def __handler_database_error(self, exception: DatabaseError) -> Response:
code, msg = exception.orig.args
if code == ER.LOCK_DEADLOCK:
return response.error(
message=_('其它用户或系统正在操作,请稍后再试')
)
if code == ER.LOCK_WAIT_TIMEOUT:
return response.error(
message=_('获取数据库锁失败,其它用户或系统正在操作,请稍后再试')
)
if code == ER.DATA_TOO_LONG:
return response.error(
message=_('数据数据太长,超过了列的长度限制。具体报错:%s') % msg
)
if code == ER.TRUNCATED_WRONG_VALUE_FOR_FIELD:
return response.error(
message=_('提交了数据库不支持的字符。具体报错:%s') % msg
)
if code == ER.TABLE_EXISTS_ERROR:
return response.error(
message=_('该表已存在。具体报错:%s') % msg
)
if code == ER.DUP_ENTRY:
return response.error(
message=_('记录重复。具体报错:%s') % msg
)
if code == ER.INVALID_DEFAULT or code == ER.PARSE_ERROR:
return response.error(
message=_('建表时发生错误。具体报错:%s') % msg
)
if code == ER.BAD_FIELD_ERROR:
return response.error(
message=_('查询了不存在的列。具体报错:%s') % msg
)
if code == ER.NO_SUCH_TABLE:
return response.error(
message=_('表不存在。具体报错:%s') % msg
)
# 2006 mysql server has gone away, 2013 lost connection
if code == 2006 or code == 2013:
return response.error(
message=_('数据库链接异常,请稍后再试。具体报错:%s') % msg
)
return self.__handle_500(exception)
def __handler_sso_error(self, exception: template_exception.SSOException) -> Response:
"""
处理SSO异常
"""
code: int = exception.code or 40100
# 默认返回信息
message: str = str(CODE_MAP[40100])
if isinstance(exception, template_exception.AuthorizedFailException):
return response.error(code=code, message=message)
if isinstance(exception, template_exception.UserResourceNotFoundException):
return response.error(code=code, message=message)
if isinstance(exception, template_exception.TokenInvalidException):
return response.error(code=code, message=message)
return self.__handle_401(exception)
def global_api_error_handler(self, exception: Exception) -> Response:
"""
这里捕获所有异常,handler内部按类型处理
"""
# 忽略静态文件的异常
if isinstance(exception, NotFound):
return self.__handle_404(exception)
logger.error(f"request info is {self.get_request_info()}", exc_info=True)
# 认证相关异常
if isinstance(exception, template_exception.SSOException):
return self.__handler_sso_error(exception)
# 参数校验不通过
if isinstance(exception, ParamsInvalidException):
code: int = exception.code if hasattr(exception, 'code') else 4001
return response.error(code=code, message=exception.message)
# 回滚主库
session = inject.instance(MainDBSession)
session.rollback()
# 客户端异常
if isinstance(exception, ClientException):
code: int = exception.code if hasattr(exception, 'code') else 4001
return response.error(code=code, message=str(exception))
# 数据库异常
if isinstance(exception, DatabaseError):
return self.__handler_database_error(exception)
# 服务器异常
if isinstance(exception, ServerException):
return self.__handle_500(exception)
# 处理BadRequest
if isinstance(exception, BadRequest):
return self.__handle_400(exception)
return self.__handle_500(exception)
global_error_handler: GlobalErrorHandler = GlobalErrorHandler()
|
from typing import List
from dataclasses import dataclass
from injector import inject
from domain.customer import Customer
from domain.repository.customer_repository import CustomerRepository
from domain.repository.menu_repository import MenuRepository
from domain.repository.store_repository import StoreRepository
from domain.store import Store
from service.store_service import StoreService
from domain.menu import Menu
@inject
@dataclass
class StoreServiceImpl(StoreService):
store_repository: StoreRepository
customer_repository: CustomerRepository
menu_repository: MenuRepository
def get_menu(self, store_id: int) -> List['Menu']:
return self.menu_repository.find_menus_by_store_id(store_id)
def order(self, customer_id: int, menu_id: int) -> Menu:
# TODO Transaction
menu: Menu = self.menu_repository.find_menu_by_menu_id(menu_id)
store: Store = self.store_repository.find_store_by_store_id(menu.store_id)
customer: Customer = self.customer_repository.find_customer_by_customer_id(customer_id)
store.money += menu.menu_price
customer.money -= menu.menu_price
self.store_repository.save(store)
self.customer_repository.save(customer)
return menu
|
# pylint: disable=unused-wildcard-import
from typing import Callable
import functools
import traceback
# Local
from .common import *
from .executor import NoRemoteException
# External
from py_derive_cmd import Settings, CommandInfo, UsageException
class MySettings(Settings):
def handle_exceptions(self, command: 'CommandInfo', function_that_might_fail: Callable) -> Callable:
'''
A wrapper method (decorator) that should catch all exceptions and log / print them.
It is used for created do_command and complete_command functions
'''
@functools.wraps(function_that_might_fail)
def wrapper_print_exceptions(*args, **kwargs):
try:
return function_that_might_fail(*args, **kwargs)
except UsageException as ex:
print(err(str(ex)))
print('Expected parameters:', command.usage_params)
except NoRemoteException:
print(err('No remote is defined'))
print('Hint: Start this program with --help to see how to specify a remote host')
except Exception:
self.print_error(self.make_box_message('Internal error', traceback.format_exc()))
return wrapper_print_exceptions
|
# Plexiglass box generating marginal value maps
# Concept design by Ben Bryant
# code by Ginger Kowal
# 5.15.15
import os
import sys
if 'C:/Python27/Lib/site-packages' not in sys.path:
sys.path.append('C:/Python27/Lib/site-packages')
sys.path.append('C:/Users/Ginger/Documents/Python/csopt_Ginger_working/csopt')
from datetime import datetime as dtdatetime
import itertools
import re
import objgraph
import logging
import importlib
import numpy as np
from scipy import sparse
from scipy.optimize import curve_fit
import pandas
import matplotlib.pyplot as plt
from osgeo import gdal
import statsmodels.api as sm
import cvxpy as cvx
import cvxopt
import pygeoprocessing.geoprocessing
import IntegerSolver as isolve
import fitters as csopt_fit
import reporting as csopt_report
from memory_profiler import profile
gdal.UseExceptions()
global _debug
global _run_InVEST
_run_InVEST = False
_debug = False
class InVEST_model:
"""Class holding important info about an InVEST model: the args to
run it and the output file from it that we will use to measure marginal
value.
eval: should the model be re-run if evaluate_solution is True? i.e., do we
expect to get different marginal values for the model when the
implementation solution is implemented? i.e., is the model 'spatially
interdependent'?"""
def __init__(self, name, args, module, output, eval):
self.name = name
self.args = args
self.output = output
if module is not None:
self.module = importlib.import_module(module)
self.eval = eval
def execute(self):
self.module.execute(self.args)
class Objective:
"""Class describing an optimization objective. May be linked to an InVEST
model by its name. An objective should be maximized if the output returns
are judged to be beneficial (e.g., maximize crop yields). An objective
should be minimized if output returns are judged to be detrimental (e.g.,
minimize sediment export)."""
def __init__(self, name, weight, lower_level_target, upper_level_target,
target_type, maximize):
self.name = name
self.weight = weight # positive: maximize. negative: minimize
self.l_target = lower_level_target
self.u_target = upper_level_target
self.target_type = target_type
self.maximize = maximize
def get_objective_weight(objective_list, model_name):
"""Return the objective weight corresponding to a model name."""
found = False
for objective in objective_list:
if objective.name == model_name:
if found:
raise Exception("Model name found twice in objective list")
else:
weight = objective.weight
found = True
if not found:
raise Exception("Model name not found in objective list")
return weight
def raster_to_np_array(raster):
"""Convert a geotiff raster to a numpy array and return the array."""
ds = gdal.Open(raster)
band = ds.GetRasterBand(1)
array = np.array(band.ReadAsArray())
nodata = band.GetNoDataValue()
if array.dtype == np.int8:
# gdal converts signed 8 bit to unsigned
er = "Exception: raster is of 8 bit type"
raise Exception(er)
# array = array.astype(np.int32, copy = False)
# for the purposes of this script, -9999 is always NoData
array[array == nodata] = -9999
ds = None
return array
def array_to_raster(array, template_raster, new_name, datatype=None):
"""Write a numpy array to raster as a GeoTIFF of filename new_name, with
spatial extent and projection of the template raster."""
inDs = gdal.Open(template_raster)
if inDs is None:
er = "Could not open template raster dataset"
raise Exception(er)
rows = inDs.RasterYSize
cols = inDs.RasterXSize
if datatype is not None:
data_type = datatype
else:
data_type = (inDs.GetRasterBand(1)).DataType
driver = gdal.GetDriverByName('GTiff')
outDs = driver.Create(new_name, cols, rows, 1, data_type)
if outDs is None:
er = "Could not create new raster object"
raise Exception(er)
out_band = outDs.GetRasterBand(1)
if sparse.issparse(array):
array = array.toarray()
out_band.WriteArray(array)
del array
out_band.FlushCache()
out_band.SetNoDataValue(-9999)
outDs.SetGeoTransform(inDs.GetGeoTransform())
outDs.SetProjection(inDs.GetProjection())
inDs = None
outDs = None
def merge_rasters(rasters_to_merge, save_as):
"""Mosaic positive values from several rasters of the same shape together.
If positive regions overlap, those later in the list will cover those
earlier so that the last raster in the list will not be covered. Saves the
merged raster as save_as and returns nothing."""
def merge_op(*rasters):
raster_list = list(rasters)
# assume rasters share size
result_raster = np.full((raster_list[0].shape), -9999)
# assume nodata (should not be copied) is 0
for raster in raster_list:
np.copyto(result_raster, raster, where=raster > 0)
return result_raster
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
rasters_to_merge[0])
pygeoprocessing.geoprocessing.vectorize_datasets(
rasters_to_merge, merge_op, save_as,
gdal.GDT_Int32, -9999, out_pixel_size, "union",
dataset_to_align_index=0, vectorize_op=False)
def set_nodata_areas(pdict, value_raster, mask, out_uri):
"""Set the areas of a raster falling outside a mask to -9999 so that they
are correctly represented as NoData in ArcGIS."""
def copy_data_areas(value_raster, mask):
result_raster = np.full((value_raster.shape), -9999)
np.copyto(result_raster, value_raster, where=mask != 0)
return result_raster
input_list = [value_raster, mask]
inDs = gdal.Open(value_raster)
data_type = (inDs.GetRasterBand(1)).DataType
del inDs
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
value_raster)
pygeoprocessing.geoprocessing.vectorize_datasets(
input_list, copy_data_areas, out_uri, data_type,
-9999, out_pixel_size, "dataset",
dataset_to_bound_index=0, vectorize_op=False)
def save_sparse_csr(filename, array):
"""Save a sparse csr matrix."""
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
"""Load a saved sparse matrix in csr format. Stolen from above source."""
loader = np.load(filename)
return sparse.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
def get_service_scores(raster, mask=None):
"""Retrieve values from a results raster. If a mask is supplied, only the
values falling under the mask are returned."""
value_array = raster_to_np_array(raster)
value_array[value_array == -9999] = 0
if mask is not None:
ret_arr = value_array
mask_arr = mask.toarray()
ret_arr[mask_arr != 1] = 0
else:
ret_arr = value_array
sparse_array = sparse.csr_matrix(ret_arr)
return sparse_array
def calc_marginal_scores(path1, path2):
"""Calculate marginal scores as the per-pixel difference between two arrays.
Array 2 (pointed to by path2) is subtracted from array 1 (pointed to by
path1)."""
scores1 = get_service_scores(path1)
scores2 = get_service_scores(path2)
marginal_arr = scores1 - scores2
return marginal_arr
def calc_cost_limit(pdict, cost_list, objective_list, rau):
"""Calculate the range of costs that should be tested in order to generate
a continuous response curve within one RAU."""
max_cost = max(cost_list)
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' +
objective_list[0].name)
file = os.path.join(rau_dir, 'rau' + str(rau) + '.npy')
if not os.path.isfile(file):
raise Exception("file %s not found" % file)
obj_data = np.load(file)
# this ignores any pixels where intervention is not defined
upper_limit = [obj_data.shape[0] * max_cost]
return upper_limit
def get_mask_list_reg(raster_shape, reg_dimensions):
"""Generate a list of raster masks by regularly sampling the landscape.
Takes the shape of the raster to be sampled and the dimensions of the
"building block grid" as arguments. Returns a list of masks consisting of
regularly sampled groups of cells; together these masks cover the raster
entirely."""
# generate all possible "building blocks" from regular dimensions
# e.g. for regular dimensions (2, 2), possible building blocks are
# (0, 0), (0, 1), (1, 0), (1, 1)
block_list = []
combinations = [list(xrange(reg_dimensions[0])), list(xrange(
reg_dimensions[1]))]
for element in itertools.product(*combinations):
block_list.append(element)
# for each possible building block pattern, piece it together to cover the
# raster
mask_list = []
# for block in block_list:
# mask = ummmm ?
# mask_list.append(mask)
# TODO finish this
# each element of mask_list (each mask) should be of the same shape as
# the raster, and mask values of 1 indicate pixels to apply interventions
return mask_list
def get_mask_list_perc(raster_sort_order, percentile_list, unique=None):
"""Generate a list of raster masks where each mask identifies a group of
pixels by position that we expect to have minimum spatial interaction.
Pixels are sampled in percentiles by position from the raster sort order so
that the raster sort order defines groups of pixels that are expected to
have minimum spatial interaction; the percentile list defines how many
groups of pixels should be selected and how large the groups should be
relative to each other. The percentile list may be supplied as a list of
integers less than or equal to 100, or as a list of floats less than or
equal to 1."""
mask_list = []
past = []
integer_based = [t for t in percentile_list if t > 1]
if integer_based:
if np.max(percentile_list) > 100:
er = "Error: percentile list contains elements greater than 100"
raise Exception(er)
print "Array is integer based, converting to float"
percentile_list = np.multiply(percentile_list, 0.01)
perc_list_sorted = np.sort(percentile_list)
for percentile in perc_list_sorted:
full_subset = raster_sort_order[:(int(percentile * len(
raster_sort_order)))]
if unique:
mask = list(set(full_subset) - set(past))
else:
mask = full_subset
mask_list.append(mask)
past = full_subset
# TODO cumulative or unique?
# Ben's original design document showed this as cumulative. Then when
# selecting the marginal value to apply to each pixel, would find the first
# mask where that pixel appeared and take the marginal value according to
# that mask and assign it to the pixel. Why not make these masks distinct?
# so that each pixel appears in exactly one mask?
return mask_list
def get_raster_sort_order(unsorted_array):
"""Generate raster pixel sort order describing which pixels should receive
interventions first. Pixels are sampled from the landscape in percentiles
following this order. The elements of raster sort order give the position
of elements in the flattened raster."""
flattened_raster = np.ravel(unsorted_array)
position_array = np.arange(np.ravel(unsorted_array).shape[0])
# remove position indices that refer to NoData pixels
position_array = position_array[flattened_raster != -9999]
# for now, we simply shuffle pixel position randomly
raster_sort_order = position_array
np.random.shuffle(raster_sort_order)
return raster_sort_order
def extract_by_mask(values, mask, index=None):
"""Extract values from a 2-d array falling under a mask. If an index is
supplied (for example to identify an RAU), the values identified in the
mask by that index are extracted."""
if sparse.issparse(values):
value_arr = values.toarray()
else:
value_arr = values
if sparse.issparse(mask):
mask_arr = mask.toarray()
else:
mask_arr = mask
if index is not None:
match_val = index
else:
match_val = 1
extracted = np.extract(mask_arr == match_val, value_arr)
return extracted
def get_mask_list_entire(pdict, index=None, rau_raster=None):
"""Generate one mask that covers the raster entirely. This mask can be used
for whole-landscape-at-a-time, non-spatially-interdependent service models.
If an index is supplied, this generates a mask that identifies pixels of a
raster with the value of that index."""
if index is not None and rau_raster is not None:
arr = raster_to_np_array(rau_raster)
else:
arr = raster_to_np_array(pdict[u'lulc'])
mask = np.zeros(arr.shape)
if index is None:
mask[arr != -9999] = 1
else:
mask[arr == index] = 1
sp_mask = sparse.csr_matrix(mask)
return [sp_mask]
def delete_geotiff(raster_name):
"""Delete a GeoTIFF including all associated files, identified as files that
begin with the filename raster_name.tif. Can handle raster_name argument
ending with '.tif', or without the .tif extension.
Returns nothing."""
folder = os.path.dirname(raster_name)
file_name = os.path.basename(raster_name)
if re.search(r".tif$", file_name):
tif_name = file_name
else:
tif_name = file_name + '.tif'
files = [f for f in os.listdir(folder) if os.path.isfile(
os.path.join(folder, f))]
pattern = r"^" + re.escape(tif_name)
tif_files = [f for f in files if re.search(pattern, f)]
for file in tif_files:
os.remove(os.path.join(folder, file))
def get_allowable_lulc(lulc_arr, intervention, biophys):
"""Get list of lulc types for which an intervention is defined in the
biophysical table. These are the lulc types for which the intervention can
be implemented and its marginal value calculated."""
allowed_lulc = []
biophys_lu = biophys.set_index('lucode')
biophys_desc = biophys.set_index('description')
lulc_categories = list(np.unique(lulc_arr))
if -9999 in lulc_categories:
lulc_categories.remove(-9999)
if 0 in lulc_categories:
lulc_categories.remove(0)
for lulc in lulc_categories:
# look up new category according to intervention and lulc type
desc = biophys_lu.loc[lulc]['description']
new_desc = desc + "->" + intervention
if new_desc in biophys_desc.index:
allowed_lulc.append(lulc)
return allowed_lulc
def identify_undefined_pixels(pdict, intervention_list):
"""Identify pixels of land use types for which the given intervention is
undefined (i.e., the biophysical table does not contain parameters for the
lulc/intervention combination)."""
rau_raster = get_rau_raster(pdict)
rau_list = pygeoprocessing.geoprocessing.unique_raster_values_uri(
rau_raster)
orig_lulc = raster_to_np_array(pdict[u'lulc'])
biophys = pandas.read_csv(pdict[u'biophys'])
for indx in xrange(len(intervention_list)):
# identify undefined pixels
intervention = intervention_list[indx]
und_arr = np.zeros(orig_lulc.shape)
allowed_lulc = get_allowable_lulc(orig_lulc, intervention, biophys)
for lulc in list(np.unique(orig_lulc)):
if lulc not in allowed_lulc:
und_arr[orig_lulc == lulc] = 1
for rau in rau_list:
# extract each rau by mask
mask = get_mask_list_entire(pdict, rau, rau_raster)[0]
extracted = extract_by_mask(und_arr, mask)
save_as = os.path.join(pdict[u'intermediate'],
'undefined_pixels_i%d_rau%d.npy' %
(indx, rau))
np.save(save_as, extracted)
def discard_undefined_pixels(raw_model_data, undefined_pixels, weight):
"""Change the value of pixels for which a transition is undefined so that
that intervention will never be selected by the optimizer for that
pixel."""
# This function is deprecated and replaced by code interior to the function
# optimize in IntegerSolver.py.
if weight < 0: # objective should be minimized
new_val = 99999
elif weight > 0: # objective should be maximized
new_val = -99999
else:
new_val = 0
model_arr = np.asarray(raw_model_data)
undef_arr = np.asarray(undefined_pixels)
assert model_arr.shape == undef_arr.shape, """Model data and undefined pixel
data must reflect same number of pixels and interventions"""
model_arr[undef_arr == 1] = new_val
del undef_arr
model_data = np.column_stack(model_arr)
return model_data
def create_intervention_maps(pdict, model_list, intervention_list):
"""Create landuse maps where interventions are applied across the entire
landscape. These are the same maps that would be created if mask type ==
'entire'. These maps are used to translate solution rasters to lulc
rasters following optimization."""
orig_lulc = raster_to_np_array(pdict[u'lulc'])
mask = get_mask_list_entire(pdict)[0]
biophys = pandas.read_csv(pdict[u'biophys'])
for i_index in xrange(len(intervention_list)):
intervention = intervention_list[i_index]
# TODO consider we are duplicating the mask_entire if num mask = 1
# (both in processing time and storage space). Could write a clever way
# to retrieve the mask if num mask = 1.
# apply intervention across landscape completely
lulcdir = os.path.join(pdict[u'outerdir'], 'modified_lulc')
if not os.path.exists(lulcdir):
os.makedirs(lulcdir)
save_as = os.path.join(lulcdir, 'lulc_entire_i%d.tif' % i_index)
apply_intervention(pdict, biophys, pdict[u'lulc'], mask, intervention,
save_as, delete_existing=True)
def apply_intervention(pdict, biophys, lulc_raster, mask, intervention,
save_as, delete_existing=False):
"""Generate a new lulc raster with the selected intervention applied to the
raster cells that fall under the mask, saved as save_as.
Returns nothing."""
lulc_arr = raster_to_np_array(lulc_raster)
if sparse.issparse(mask):
mask = mask.toarray()
if os.path.isfile(save_as):
if delete_existing:
delete_geotiff(save_as)
else:
er = "Error: modified lulc raster already exists"
raise Exception(er)
gdal.AllRegister()
biophys_lu = biophys.set_index('lucode')
biophys_desc = biophys.set_index('description')
allowed_lulc = get_allowable_lulc(lulc_arr, intervention, biophys)
arr_copy = lulc_arr.astype(np.int32, copy=True)
lulc_list = list(np.unique(lulc_arr[mask == 1]))
del lulc_arr
lulc_categories = set(lulc_list).intersection(allowed_lulc)
# TODO add list of lulc categories that are not allowed to log file
for lulc in lulc_categories:
# look up new category according to intervention and lulc type
desc = biophys_lu.loc[lulc]['description']
new_desc = desc + "->" + intervention
new_lulc = biophys_desc.loc[new_desc]['lucode']
arr_copy[(arr_copy == lulc) & (mask == 1)] = new_lulc
lulc_reclassified = os.path.join(pdict[u'intermediate'],
'reclassified_lulc.tif')
temp_mask = os.path.join(pdict[u'intermediate'], 'temporary_mask.tif')
array_to_raster(arr_copy, lulc_raster, lulc_reclassified, datatype=5)
array_to_raster(mask, lulc_raster, temp_mask, datatype=5)
set_nodata_areas(pdict, lulc_reclassified, temp_mask, save_as)
delete_geotiff(lulc_reclassified)
delete_geotiff(temp_mask)
def run_model(pdict, model, intervention_list, mask_list):
"""Create mask, apply interventions, and run the model."""
# default run of the model
model.execute()
biophys = pandas.read_csv(pdict[u'biophys'])
outdir = os.path.join(pdict[u'outerdir'], model.name, 'modified_lulc')
if not os.path.exists(outdir):
os.makedirs(outdir)
for i_index in xrange(len(intervention_list)):
intervention = intervention_list[i_index]
for m_index in xrange(len(mask_list)):
mask = mask_list[m_index]
# apply intervention for cells in raster mask
suffix = '_i%d_m%d' % (i_index, m_index)
save_as = os.path.join(outdir, 'lulc' + suffix + '.tif')
apply_intervention(pdict, biophys, pdict[u'lulc'], mask,
intervention, save_as, delete_existing=True)
model.args['lulc_uri'] = save_as
# run model
model.args['results_suffix'] = suffix
model.execute()
def align_results(pdict, model, num_intervention, num_mask):
"""Align all results rasters with original lulc raster, so that later the
results arrays can be reclassified based on lulc values at each pixel."""
results_list = []
# original lulc: everything aligns to this
results_list.append(pdict[u'lulc'])
# default model run
results_list.append(os.path.join(model.args[u'workspace_dir'],
model.output))
# find all intervention results
for i_index in xrange(num_intervention):
for m_index in xrange(num_mask):
suffix = '_i%d_m%d' % (i_index, m_index)
rastername = model.output[:-4] + suffix + '.tif'
results_list.append(os.path.join(model.args[u'workspace_dir'],
rastername))
intermediate_dir = os.path.join(model.args[u'workspace_dir'],
'aligned_data')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
dataset_names = [os.path.basename(file) for file in results_list]
aligned_list = [os.path.join(intermediate_dir, name) for name in
dataset_names]
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
results_list[0])
pygeoprocessing.geoprocessing.align_dataset_list(
results_list, aligned_list, ['nearest'] * len(aligned_list),
out_pixel_size, 'dataset', 0, dataset_to_bound_index=0)
return aligned_list
def calc_soln_agg_obj(pdict, model_list, objective_list, rau_list,
intervention_list, value_db, row=None, rsuf=None):
"""Calculate the actual aggregate objective achieved following
implementation of the optimal solution by creating lulc maps that reflect
the solution and running InVEST models on these maps. The aggregate
objective is calculated from model results per RAU and can be compared to
the aggregate objective predicted by the upper level solver."""
# create intervention maps to support translation of solution raster to
# lulc
create_intervention_maps(pdict, model_list, intervention_list)
# translate solution rasters to modified lulc
lulc_rasters = [pdict[u'lulc']]
soln_rasters = []
for rau in rau_list:
if row is None:
soln = os.path.join(pdict[u'sol_map_dir'], 'solution_rau%d.tif' %
int(rau))
else:
soln = os.path.join(pdict[u'sol_map_dir'],
'solution_rau%d_row%d.tif' % (int(rau), row))
lulc = pdict[u'lulc']
input_list = [soln, lulc]
if pygeoprocessing.geoprocessing.unique_raster_values_uri(soln) != [0]:
soln_rasters.append(soln)
for i_index in range(len(intervention_list)):
input_list.append(os.path.join(pdict[u'outerdir'], 'modified_lulc',
'lulc_entire_i%d.tif' % i_index))
result_raster = os.path.join(pdict[u'outerdir'], 'modified_lulc',
'soln_lulc_rau%d.tif' % int(rau))
lulc_rasters.append(result_raster)
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
pdict[u'lulc'])
def solution_to_lulc(soln, lulc, *args):
# a list with unknown length; must be sorted
intervention_rasters = list(args)
# intervention 0 ---> do nothing
intervention_rasters.insert(0, lulc)
result_raster = np.full((soln.shape), -9999)
for intervention in xrange(len(intervention_rasters)):
np.copyto(result_raster, intervention_rasters[intervention],
where=soln == intervention)
return result_raster
pygeoprocessing.geoprocessing.vectorize_datasets(
input_list, solution_to_lulc, result_raster,
gdal.GDT_Int32, -9999, out_pixel_size, "union",
dataset_to_align_index=0, vectorize_op=False)
if row is None:
if rsuf is None:
merged_lulc = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_lulc.tif')
merged_soln = os.path.join(pdict[u'sol_map_dir'],
'merged_soln.tif')
else:
merged_lulc = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_lulc_%s.tif' % rsuf)
merged_soln = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_%s.tif' % rsuf)
else:
if rsuf is None:
merged_lulc = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_lulc_row%d.tif' % row)
merged_soln = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_row%d.tif' % row)
else:
merged_lulc = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_lulc_row%d_%s.tif' %
(row, rsuf))
merged_soln = os.path.join(pdict[u'sol_map_dir'],
'merged_soln_row%d_%s.tif' %
(row, rsuf))
merge_rasters(lulc_rasters, merged_lulc)
if len(soln_rasters) > 0:
merge_rasters(soln_rasters, merged_soln)
soln_margv = {}
for model in model_list:
if not model.eval:
# we don't expect that re-running the model would make a difference
# (predicted == realized)
# populate soln_margv directly from value_db
soln_margv['realized_' + model.name] = []
for idx in xrange(len(value_db)):
soln_margv['realized_' + model.name].append(
value_db[idx][model.name][0])
continue
# run InVEST with solution rasters, collect actual marginal value
model.args[u'lulc_uri'] = merged_lulc
suffix = 'optim_soln'
model.args['results_suffix'] = suffix
model.execute()
baseline = os.path.join(model.args[u'workspace_dir'], model.output)
soln = os.path.join(model.args[u'workspace_dir'],
model.output[:-4] + '_' + suffix +
model.output[-4:])
marginal_scores = calc_marginal_scores(soln, baseline) # soln-baseline
rau_raster = get_rau_raster(pdict)
rau_mask = sparse.csr_matrix(raster_to_np_array(rau_raster))
model_margv = []
for rau in rau_list:
extr_vals = extract_by_mask(marginal_scores, rau_mask,
index=int(rau))
model_margv.append(np.sum(extr_vals))
soln_margv["realized_" + model.name] = model_margv
modeled_objs = [model.name for model in model_list]
for objective in objective_list:
if objective.name not in modeled_objs:
# get rau value some other way
soln_margv["realized_" + objective.name] = [0] * len(rau_list)
# calc aggregate objective
soln_margv['realized_agg_obj'] = []
for ridx in xrange(len(rau_list)): # for each rau
rau_agg_obj = 0
for objective in objective_list:
rau_agg_obj += (objective.weight * soln_margv["realized_" +
objective.name][ridx])
soln_margv['realized_agg_obj'].append(rau_agg_obj)
return soln_margv
def get_intervention_scores(pdict, model, baseline_raster_path,
result_raster_path, orig_lulc, intervention,
mask_entire):
"""Get marginal values from one InVEST run."""
# calculate marginal scores
marginal_scores = calc_marginal_scores(result_raster_path,
baseline_raster_path)
if _debug:
# check the final array
debugdir = os.path.join(model.args[u'workspace_dir'], 'debug')
if not os.path.exists(debugdir):
os.makedirs(debugdir)
suffix = os.path.basename(result_raster_path)[11:]
marg_arrtest = marginal_scores.toarray()
mname = 'marginal' + suffix
array_to_raster(marg_arrtest, result_raster_path, os.path.join(
debugdir, mname))
del marg_arrtest
# remove all nodata cells outside raster
extracted_scores = extract_by_mask(marginal_scores, mask_entire)
if _debug:
save_as = os.path.join(debugdir, 'extracted' + suffix)
values_to_raster(pdict, extracted_scores, save_as, index=None,
use_existing=False)
return extracted_scores
def collect_results(pdict, model, aligned_results_list, intervention_list):
"""Collect results from InVEST runs and format for input to the
optimizer."""
orig_lulc_arr = raster_to_np_array(pdict[u'lulc'])
orig_lulc_arr[orig_lulc_arr == -9999] = 0
orig_lulc = sparse.csr_matrix(orig_lulc_arr)
del orig_lulc_arr
mask_entire = get_mask_list_entire(pdict)[0]
intervention_scores_list = []
for i_index in xrange(len(intervention_list)):
intervention = intervention_list[i_index]
default_raster_path = aligned_results_list[1]
pattern = r"_i" + str(i_index)
result_rasters = [p for p in aligned_results_list if
re.search(pattern, p)]
# TODO handle multiple masks that need to be mosaicked together (will
# be the rasters in 'result_rasters')
# for now
result_raster_path = result_rasters[0]
scores = get_intervention_scores(pdict, model, default_raster_path,
result_raster_path, orig_lulc,
intervention, mask_entire)
if np.nanmax(scores) == 0 and np.nanmin(scores) == 0:
print "Warning: intervention scores are all zero"
intervention_scores_list.append(scores)
return intervention_scores_list
def get_ll_data_from_rasters(pdict, model, intervention_list, mask_type):
"""Collect data from results rasters that were created independently of an
InVEST model. The rasters, located in the folder specified by
model.args[u'workspace_dir'], should have the name model.output followed by
a suffix of the form 'i_X_m_0' where X is in the set of indices of the
intervention_list."""
print "Generating marginal values from rasters....."
intermediate_dir = os.path.join(pdict[u'outerdir'], 'intermediate')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
pdict[u'intermediate'] = intermediate_dir
raw_model_data = get_ll_data(pdict, model, intervention_list, mask_type)
model_arr = np.asarray(raw_model_data)
model_data = np.column_stack(model_arr)
model_data_file = os.path.join(pdict[u'intermediate'], model.name +
'_ll_data.npy')
np.save(model_data_file, model_data)
del model_data
del model_arr
del raw_model_data
def get_ll_data(pdict, model, intervention_list, mask_type):
"""Generate lower level data for input to the optimizer by running InVEST
models with different interventions applied."""
if mask_type == 'entire':
num_mask = 1
else:
num_mask = 1 # placeholder: determine # masks here from mask method
results_list = []
mask_range = xrange(num_mask)
intervention_range = xrange(len(intervention_list))
for mindx in mask_range:
for indx in intervention_range:
suffix = '_i%d_m%d' % (indx, mindx)
rastername = model.output[:-4] + suffix + '.tif'
results_list.append(os.path.join(model.args[u'workspace_dir'],
rastername))
found_results = [f for f in results_list if os.path.isfile(f)]
if len(found_results) < len(results_list):
# fancy mask generation goes here
mask_list = get_mask_list_entire(pdict)
run_model(pdict, model, intervention_list, mask_list)
del mask_list
aligned_results_list = align_results(pdict, model, len(intervention_list),
num_mask)
ll_data = collect_results(pdict, model, aligned_results_list,
intervention_list)
return ll_data
def values_to_raster(pdict, values, save_as, index=None, use_existing=False):
"""Create a raster with values from an array corresponding to the area
identified by a mask. Useful for testing/visualizing intermediate results.
This formulation assumes there is one value for each pixel in the mask."""
if index is None:
mask_uri = pdict[u'lulc']
else:
mask_uri = os.path.join(pdict[u'intermediate'], 'temp_mask.tif')
template_raster = pdict[u'lulc']
rau_raster = os.path.join(pdict[u'intermediate'], 'rau.tif')
mask_arr = get_mask_list_entire(pdict, index, rau_raster)[0]
array_to_raster(mask_arr, template_raster, mask_uri, datatype=5)
del mask_arr
filename = os.path.basename(save_as)[:-4]
data_file = os.path.join(pdict[u'intermediate'], filename + '.npz')
if os.path.isfile(data_file) and use_existing:
val_raster = load_sparse_csr(data_file)
else:
if index is None:
mask = get_mask_list_entire(pdict)[0]
else:
rau_raster = os.path.join(pdict[u'intermediate'], 'rau.tif')
mask = get_mask_list_entire(pdict, index, rau_raster)[0]
n_pixels = mask.nnz
assert values.shape[0] == n_pixels, """There should be one value for
each pixel"""
val_raster = sparse.lil_matrix(mask.shape, dtype=values.dtype)
pidx = zip(*mask.nonzero())
for i in xrange(n_pixels):
val_raster[pidx[i]] = values[i] # TODO this is amazingly slow!!!!
val_raster = val_raster.tocsr()
save_sparse_csr(data_file, val_raster)
del pidx
del mask
template_raster = pdict[u'lulc']
temp_name = filename + '_temp.tif'
array_to_raster(val_raster, template_raster, temp_name, datatype=6)
set_nodata_areas(pdict, temp_name, mask_uri, save_as)
delete_geotiff(mask_uri)
def extract_aoi(values, pdict, aoi, rau_i=None):
"""Extract values identified by a raster as from a small area of interest.
If rau_i is supplied, only values in the aoi raster identified by that
number are extracted."""
# TODO this is very inefficient - we build the *entire* value raster first
# then extract the relevant values from it. Would be better to build the
# entire raster once, extract values from each rau_i sequentially
mask_entire = get_mask_list_entire(pdict)[0]
n_pixels = mask_entire.nnz
assert values.shape[0] == n_pixels, """There should be one value for each
pixel"""
val_raster = sparse.lil_matrix(mask_entire.shape, dtype=values.dtype)
pidx = zip(*mask_entire.nonzero())
for i in xrange(n_pixels):
val_raster[pidx[i]] = values[i] # TODO this is amazingly slow!!!!
del mask_entire
aoi_mask = sparse.csr_matrix(raster_to_np_array(aoi))
extracted_vals = extract_by_mask(val_raster, aoi_mask, rau_i)
return extracted_vals
def get_rau_list_from_files(pdict, model_list):
"""Find the RAUs for which model data has been generated and extracted."""
rau_list_by_model = {}
for model in model_list:
rau_list_by_model[model.name] = []
# Ensure all models have data for each RAU
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' + model.name)
if not os.path.exists(rau_dir):
continue
files = [f for f in os.listdir(rau_dir) if os.path.isfile(
os.path.join(rau_dir, f))]
rau_files = [f for f in files if re.search('rau', f)]
for file in rau_files: # for each RAU
try:
rau = int(re.search('rau(.+?).npy', file).group(1))
except AttributeError:
raise Exception("RAU index not identified from file %s" % file)
rau_list_by_model[model.name].append(rau)
return rau_list_by_model
def get_solution_db(pdict, objective_list, model_list, cost_list,
intervention_list, mask_type, use_existing,
tables_list=None):
"""Run integer optimization for each RAU, get the lower level curves that
will be subjected to upper level optimization. If the curves have already
been generated and saved as csv and use_existing == True, the curves are
loaded instead of being generated."""
loaded_from_file = False
rau_list = get_rau_data(pdict, objective_list, model_list, cost_list,
intervention_list, mask_type, use_existing)
ll_value_db = []
if tables_list is None:
for rau in rau_list:
filename = os.path.join(pdict[u'output'],
'solution_curve_rau%d.csv' % rau)
if os.path.isfile(filename) and use_existing:
df = pandas.read_table(filename, sep=',')
value_db_RAU = df.to_dict(outtype='list')
if 'Unnamed: 0' in value_db_RAU.keys():
del value_db_RAU['Unnamed: 0']
ll_value_db.append(value_db_RAU)
loaded_from_file = True
if loaded_from_file:
assert len(ll_value_db) == len(rau_list), """Must load lower level value
db for each RAU"""
print "loading lower-level curves from file ..."
else:
for rau in rau_list:
cost_limit = calc_cost_limit(pdict, cost_list, objective_list, rau)
test_db = get_ll_solution(pdict, rau, objective_list, cost_limit)
upper_feasible_limit = test_db['cost'][0]
cost_range = np.linspace(upper_feasible_limit * 0.05,
upper_feasible_limit, num=10)
value_db_RAU = get_ll_solution(pdict, rau, objective_list,
cost_range, tables_list=tables_list)
ll_value_db.append(value_db_RAU)
return rau_list, ll_value_db
def normalize_values(folder, maximize):
"""Normalize marginal values across RAUs within an objective. All values
should range between 0 and 1. It is assumed that the marginal values
within one objective across RAUs all reside in the folder which is given as
input."""
savedir = os.path.join(folder, 'norm')
if not os.path.exists(savedir):
os.makedirs(savedir)
filenames = []
raw_arrays = []
split_indices = []
rau_files = [f for f in os.listdir(folder) if os.path.isfile(
os.path.join(folder, f))]
for file in rau_files:
# right now, assume we just want to take all files in a folder
filenames.append(file)
ar = np.load(os.path.join(folder, file))
raw_arrays.append(ar)
if len(split_indices) == 0:
split_indices.append(ar.shape[0])
else:
split_indices.append(split_indices[-1] + ar.shape[0])
joined_ar = np.concatenate(raw_arrays)
maxval = np.max(joined_ar)
minval = np.min(joined_ar)
if maximize:
result_ar = (joined_ar - minval) / (maxval - minval)
else:
result_ar = (maxval - joined_ar) / (maxval - minval)
# assert np.min(result_ar) == 0, "Normalization did not work correctly"
# assert np.max(result_ar) == 1, "Normalization did not work correctly"
norm_arrays = np.split(result_ar, split_indices)
del norm_arrays[-1]
for ar_indx in xrange(len(norm_arrays)):
assert norm_arrays[ar_indx].shape == raw_arrays[ar_indx].shape
save_as = os.path.join(savedir, filenames[ar_indx])
np.save(save_as, norm_arrays[ar_indx])
def get_ll_solution(pdict, rau, objective_list, cost_range, save_as=None,
tables_list=None, row=None, un_norm=False):
"""Get lower level solution for one RAU, under one or a range of cost
constraints. If save_as is supplied, a raster of the solution is saved
with that filename. Returns a dictionary composed of two lists, one giving
the value of the aggregate objective and another giving the cost of the
lower level solution. These lists correspond in their order, such that the
first value corresponds to the first cost. If un_norm is True, the values
returned correspond to non-normalized (untransformed) marginal values, in
the units of the marginal value calculations. Otherwise, normalized values
are assumed."""
ll_problem = {'weights': {},
'targets': {},
'targettypes': {}
}
for objective in objective_list:
ll_problem['weights'][objective.name] = objective.weight
if objective.l_target is not None:
ll_problem['targets'][objective.name] = objective.l_target
if objective.target_type is not None:
ll_problem['targettypes'][objective.name] = objective.target_type
value_db_RAU = {'budget': cost_range, 'agg_obj': [], 'num_IUs': [],
'num_IU_converted': []}
ll_data = {'factornames': []}
for objective in objective_list:
ll_data['factornames'].append(objective.name)
value_db_RAU[objective.name] = []
if tables_list is None:
if objective.name == 'cost' or objective.name == 'Cost':
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' +
objective.name)
else:
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' +
objective.name, 'norm')
file = os.path.join(rau_dir, 'rau' + str(rau) + '.npy')
if not os.path.isfile(file):
raise Exception("file %s not found" % file)
else:
match_string = objective.name + "_rau%d" % rau
pattern = r"." + re.escape(match_string)
files = [f for f in tables_list if re.search(pattern, f)]
assert len(files) == 1, "One table must match search query"
file = files[0]
ll_data[objective.name] = np.load(file)
if un_norm:
ll_marg_data = ll_data.copy()
for objective in objective_list:
if tables_list is None:
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' +
objective.name)
file = os.path.join(rau_dir, 'rau' + str(rau) + '.npy')
if not os.path.isfile(file):
raise Exception("file %s not found" % file)
else:
# TODO consider normalization of supplied marg value tables
# (this formulation makes no distinction between normalized
# and non-normalized supplied tables)
match_string = objective.name + "_rau%d" % rau
pattern = r"." + re.escape(match_string)
files = [f for f in tables_list if re.search(pattern, f)]
assert len(files) == 1, "One table must match search query"
file = files[0]
ll_marg_data[objective.name] = np.load(file)
else:
ll_marg_data = None
# load undefined pixels
undefined_ar_list = []
undefined_ar_list.append([0] * ll_data[objective_list[0].name].shape[0])
num_interventions = ll_data[objective_list[0].name].shape[1] - 1
for indx in xrange(num_interventions):
intervention_ar = np.load(os.path.join(pdict[u'intermediate'],
'undefined_pixels_i%d_rau%d.npy' % (indx,
rau)))
undefined_ar_list.append(intervention_ar)
undefined_array = np.column_stack(undefined_ar_list)
value_db_RAU['num_IUs'] = [ll_data[objective_list[0].name].shape[0]] * len(
cost_range)
for idx in xrange(len(cost_range)):
cost_constraint = cost_range[idx]
ll_problem['targets']['cost'] = cost_constraint
print "entering ll optimization: RAU %d, cost constraint %f" % (int(
rau), cost_constraint)
solution, scores = integer_optimization(ll_data, ll_problem, rau,
undefined_array, ll_marg_data)
if solution is not None:
value_db_RAU['agg_obj'].append(scores['objective'])
value_db_RAU['num_IU_converted'].append(np.count_nonzero(solution))
for factor in ll_data['factornames']:
value_db_RAU[factor].append(scores[factor])
if save_as is not None:
values_to_raster(pdict, np.array(solution), save_as,
index=int(rau), use_existing=False)
else:
value_db_RAU['agg_obj'].append([])
value_db_RAU['num_IU_converted'].append([])
for factor in ll_data['factornames']:
value_db_RAU[factor].append([])
del solution
del scores
return value_db_RAU
def check_curve(value_db_RAU):
"""Check the points of the lower level curve for convexity and
monotonicity. If either of these is violated, return a warning."""
warn_monot = False
warn_convex = False
convex_u = False
convex_d = False
slope_list = []
for idx in xrange(len(value_db_RAU['agg_obj']) - 1):
slope = (value_db_RAU['agg_obj'][idx + 1] -
value_db_RAU['agg_obj'][idx]) / (value_db_RAU['cost']
[idx + 1] - value_db_RAU['cost'][idx])
if slope < 0:
warn_convex = True
slope_list.append(slope)
for idx in xrange(len(slope_list) - 1):
if slope_list[idx + 1] == slope_list[idx]:
continue
if slope_list[idx + 1] > slope_list[idx]:
convex_u = True
if convex_d:
warn_monot = True
else:
convex_d = True
if convex_u:
warn_monot = True
return warn_convex, warn_monot
def get_rau_data(pdict, objective_list, model_list, cost_list,
intervention_list, mask_type, use_existing):
"""Get data to supply lower-level solver for each RAU, for all modelled
objectives and cost."""
rau_raster = get_rau_raster(pdict)
rau_list_from_raster = pygeoprocessing.geoprocessing.unique_raster_values_uri(
rau_raster)
for value in rau_list_from_raster:
assert value - int(value) == 0, "RAU ids must be integers"
rau_list_from_raster = [int(value) for value in rau_list_from_raster]
rau_list_by_model = get_rau_list_from_files(pdict, model_list)
for model in model_list:
if rau_list_by_model is None:
rau_to_generate = rau_list_from_raster
else:
rau_list = rau_list_by_model[model.name]
rau_to_generate = set(rau_list_from_raster) - set(rau_list)
if len(rau_to_generate) > 0:
# load or generate model results data
model_data_file = os.path.join(pdict[u'intermediate'], model.name +
'_ll_data.npy')
if os.path.isfile(model_data_file) and use_existing:
model_data = np.load(model_data_file)
else:
raw_model_data = get_ll_data(pdict, model, intervention_list,
mask_type)
model_arr = np.asarray(raw_model_data)
model_data = np.column_stack(model_arr)
np.save(model_data_file, model_data)
if _debug:
# check rasters (Careful! this uses a ton of memory and take
# forever)
debugdir = os.path.join(pdict[u'intermediate'], 'debug')
if not os.path.exists(debugdir):
os.makedirs(debugdir)
for i_index in xrange(len(intervention_list)):
intervention = intervention_list[i_index]
values = model_data[:, i_index]
raster_name = os.path.join(debugdir, intervention +
'_vals.tif')
values_to_raster(debugdir, values, raster_name,
use_existing=False)
# Generate per-RAU marginal value data
extract_rau_vals(pdict, model, model_data, rau_to_generate)
del model_data
rau_list_by_model = get_rau_list_from_files(pdict, model_list)
for model in model_list:
rau_list = rau_list_by_model[model.name]
assert rau_list == rau_list_from_raster, """Extraction of RAU vals
did not work correctly"""
copy_dir = os.path.join(pdict[u'intermediate'], 'rau_' +
model_list[0].name)
make_cost_data(pdict, cost_list, copy_dir)
for objective in objective_list:
if objective.name == 'cost' or objective.name == 'Cost':
continue
folder = os.path.join(pdict[u'intermediate'], 'rau_' + objective.name)
normalize_values(folder, objective.maximize)
return rau_list
def integer_optimization(data, problem, rau, undefined_array=None,
marg_data=None, tiebreaker_intervention=None):
"""Lower level optimization solver. Adapted from work by Peter Hawthorne.
The scores returned here are calculated from the data supplied to the
solver by default; these are assumed to be normalized. If marg_data is
supplied, the optimal solution is calculated from normalized data but the
returns from the solution are calculated from marg_data.
If a tiebreaker intervention is supplied, it is forcibly chosen in case of
implementation units where multiple interventions have equal objective
values."""
data['glpkopts'] = dict()
data['glpkopts']['mip_gap'] = 0.05
data['glpkopts']['tm_lim'] = 60000
# optimize
solution = isolve.optimize(data, problem, undefined_array,
tiebreaker_intervention)
if solution is None:
scores = None
else:
if marg_data is None:
scores = isolve.values(solution, data, problem)
else:
factors = data['factornames']
nparcels, nopts = np.array(data[factors[0]]).shape
marg_data['nparcels'] = nparcels
marg_data['nopts'] = nopts
scores = isolve.values(solution, marg_data, problem)
return solution, scores
def michaelis_menten_solver(params, max_cost, budget):
"""Upper-level solver assuming the params describe a Michaelis-Menten
saturating function. Adapted from work by Peter Hawthorne."""
va = cvxopt.matrix(params[:, 0])
vb = cvxopt.matrix(params[:, 1])
n = va.size[0]
Gin = cvxopt.matrix(0.0, (2*n+1, n))
hin = cvxopt.matrix(0.0, (2*n+1, 1))
for i in range(n):
Gin[i, i] = -1.0
Gin[i+n, i] = 1.0
hin[i+n] = max_cost[i] # upper bound for each RAU
Gin[2*n, :], hin[2*n] = 1.0, budget # sum of x <= budget
def Fmm(x=None, z=None):
if x is None:
return 0, cvxopt.matrix(float(budget)/n, (n, 1))
if min(x) < 0.0:
return None
f = -1*sum(cvxopt.div(cvxopt.mul(va, x), vb+x))
Df = -1*(cvxopt.div(va, (vb + x)) -
cvxopt.div(cvxopt.mul(va, x), (vb + x)**2)).T
if z is None:
return f, Df
h = 2*cvxopt.div(cvxopt.mul(va, x), (vb + x)**3) - \
cvxopt.div(2*va, (vb + x)**2)
H = -1*cvxopt.spdiag(z[0] * h)
return f, Df, H
cvxopt.solvers.options['show_progress'] = False
cvxopt.solvers.options['maxiters'] = 10000
x = np.squeeze(np.array(cvxopt.solvers.cp(Fmm, G=Gin, h=hin)['x']))
def mm(x, a, b):
return (a*x)/(b+x)
expobj = mm(x, params[:, 0], params[:, 1])
return x, expobj
def quad_optimization(params, budget):
"""Upper level quadratic optimization solver. Adapted from work by Peter
Hawthorne."""
# TODO Peter has a more complex model formulation outlined in QPSolver.py
# including constraints etc
n = params.shape[0]
x = cvx.Variable(n)
P0 = cvx.diag(params[:, 2])
q0 = np.column_stack(params[:, 1])
r0 = sum(params[:, 0])
objective = cvx.Maximize(cvx.quad_form(x, P0) + q0*x + r0)
constraints = [sum(x) <= budget, x >= 0] # x <= budget
prob = cvx.Problem(objective, constraints)
try:
prob.solve()
optx = x.value
except:
print "Warning: quadratic solver failed"
optx = None
# get predicted aggregate objective score for each RAU
ypred = []
if optx is not None:
optx_ar = np.ravel(optx)
for i in xrange(n):
solx = optx_ar[i]
ep = params[i, :]
ypred.append(ep[0] + ep[1]*solx + ep[2]*(solx**2))
return optx, ypred
def retrieve_implementation_solution(optx, pdict, objective_list, model_list,
intervention_list, rau_list, mask_type,
use_existing, save, tables_list=None,
row=None):
"""Retrieve the specified implementation solution given a cost constraint
and save the implementation solution as a raster."""
value_db = []
for r_indx in xrange(len(rau_list)):
cost_range = [optx[r_indx]]
rau = rau_list[r_indx]
save_as = None
if save:
if row is None:
save_as = os.path.join(pdict[u'sol_map_dir'],
'solution_rau%d.tif' % int(rau))
else:
save_as = os.path.join(pdict[u'sol_map_dir'],
'solution_rau%d_row%d.tif' % (int(rau),
row))
value_db_RAU = get_ll_solution(pdict, rau, objective_list, cost_range,
save_as, tables_list=tables_list,
un_norm=True)
value_db.append(value_db_RAU)
return value_db
def get_rau_raster(pdict):
"""Create a raster identifying RAUs that aligns with the lulc raster.
rau_shp must be a shapefile containing an integer field 'rau' that
is a unique identifier for each RAU."""
rau_raster = os.path.join(pdict[u'intermediate'], 'rau.tif')
template_raster = pdict[u'lulc']
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
template_raster, rau_raster, 'GTiff', -9999, gdal.GDT_Int32,
fill_value=-9999)
field = 'rau'
pygeoprocessing.geoprocessing.rasterize_layer_uri(
rau_raster, pdict[u'rau_shp'],
option_list=["ATTRIBUTE=%s" % field])
return rau_raster
def extract_rau_vals(pdict, model, model_results, rau_to_generate):
"""Extract model results values from the area identified as an RAU
(resource allocation unit)."""
rau_raster = get_rau_raster(pdict)
rau_dir = os.path.join(pdict[u'intermediate'], 'rau_' + model.name)
if not os.path.exists(rau_dir):
os.makedirs(rau_dir)
for rau in rau_to_generate: # for each RAU
print "extracting values for model %s, RAU %d" % (model.name, rau)
rau_data = []
# for each intervention
for col_index in xrange(model_results.shape[1]):
vals = model_results[:, [col_index]]
extr_vals = extract_aoi(vals, pdict, rau_raster, rau)
if np.nanmax(extr_vals) == 0 and np.nanmin(extr_vals) == 0:
print "Warning: extracted marginal values are all zero"
rau_data.append(extr_vals)
del extr_vals
# add an intervention which is to do nothing (marginal value = 0)
do_nothing = [0] * len(rau_data[0])
rau_data.insert(0, do_nothing)
model_data = np.column_stack(tuple(rau_data))
filename = 'rau' + str(rau) + '.npy'
filepath = os.path.join(rau_dir, filename)
np.save(filepath, model_data)
if _debug:
# check rasters (Careful! this uses a ton of memory and takes
# forever)
debugdir = os.path.join(pdict[u'intermediate'], 'debug')
if not os.path.exists(debugdir):
os.makedirs(debugdir)
for i_index in xrange(len(model_data[0])):
values = model_data[:, i_index]
raster_name = os.path.join(debugdir, 'rau_' + str(rau) + 'i_' +
str(i_index) + '_vals.tif')
values_to_raster(pdict, values, raster_name, index=int(rau),
use_existing=False)
def make_cost_data(pdict, cost_list, copy_dir):
"""Generate cost data for each RAU. Uses a copy directory where real model
results data reside to generate data for the correct shape. Relies on
symmetrical ordering of intervention_list and cost_list."""
cost_dir = os.path.join(pdict[u'intermediate'], 'rau_' + 'cost')
if not os.path.exists(cost_dir):
os.makedirs(cost_dir)
files = [f for f in os.listdir(copy_dir) if os.path.isfile(
os.path.join(copy_dir, f))]
rau_files = [f for f in files if re.search('rau', f)]
for file in rau_files: # for each RAU
rau = re.search('rau(.+?).npy', file).group(1)
copy_data = np.load(os.path.join(copy_dir, file))
assert len(cost_list) == copy_data.shape[1] - 1, """Cost must be
defined for each intervention"""
cost_data = copy_data
cost_data[:, [0]] = 0 # do-nothing intervention
for indx in xrange(len(cost_list)):
cost_data[:, [indx + 1]] = cost_list[indx]
filename = 'rau' + str(rau) + '.npy'
filepath = os.path.join(cost_dir, filename)
np.save(filepath, cost_data)
def michaelis_menten_fitter(pdict, row, rau_list, ll_value_db,
results_suffix=None):
"""Fit a Michaelis-Menten saturating function to data using nonlinear least
squares."""
x_list = []
y_list = []
results_list = []
n_RAU = len(ll_value_db) # number of RAUs
estparams = np.zeros((n_RAU, 2))
for i in xrange(len(ll_value_db)):
val_dict = ll_value_db[i]
cost = val_dict['cost']
cost.insert(0, 0)
obj = val_dict['agg_obj']
obj.insert(0, 0)
x = np.array(cost)
y = np.array(obj)
result = csopt_fit.michaelis_menten(y, x)
estparams[i, :] = [result.best_values['a'], result.best_values['b']]
x_list.append(x)
y_list.append(y)
results_list.append(result.best_fit)
if results_suffix is None:
save_as = os.path.join(pdict[u'intermediate'], 'll_curves_row%d.png'
% row)
else:
save_as = os.path.join(pdict[u'intermediate'], 'll_curves_row%d_%s.png'
% (row, results_suffix))
# plot_ll_curve(rau_list, x_list, y_list, save_as,
# results_list=results_list)
return estparams
def quadratic_curve_fitter(pdict, ll_value_db, rau_list):
"""Fit a quadratic model to each RAU describing the aggregate objective
score as a function of cost. This function was adapted from Peter's
CurveFittingModels.quadraticCumulativeCurveFitter function."""
Rsq = []
RsqAdj = []
Fstat = []
p_Fstat = []
Log_l = []
AIC = []
BIC = []
x_list = []
y_list = []
results_list = []
n_RAU = len(ll_value_db) # number of RAUs
estparams = np.zeros((n_RAU, 3))
for i in xrange(len(ll_value_db)):
val_dict = ll_value_db[i]
cost = val_dict['cost']
cost.insert(0, 0)
obj = val_dict['agg_obj']
obj.insert(0, 0)
x = np.array(cost)
y = np.array(obj)
X = sm.add_constant(np.column_stack((x, x**2)))
model = sm.OLS(y, X)
results = model.fit()
estparams[i, :] = results.params
x_list.append(x)
y_list.append(y)
results_list.append(results)
Rsq.append(results.rsquared)
RsqAdj.append(results.rsquared_adj)
Fstat.append(results.fvalue)
p_Fstat.append(results.f_pvalue)
Log_l.append(results.llf)
AIC.append(results.aic)
BIC.append(results.bic)
filedict = {'RAU': rau_list,
'R_squared': Rsq,
'R_squared_adj': RsqAdj,
'F_statistic': Fstat,
'F_statistic_prob': p_Fstat,
'Log-likelihood': Log_l,
'AIC': AIC,
'BIC': BIC}
df = pandas.DataFrame(filedict)
df = df[['RAU', 'R_squared', 'R_squared_adj', 'F_statistic',
'F_statistic_prob', 'Log-likelihood', 'AIC', 'BIC']]
df.set_index(['RAU'], inplace=True)
df.to_csv(os.path.join(pdict[u'intermediate'],
'rau_regression_summary.csv'))
# save_as = os.path.join(pdict[u'intermediate'],
# 'll_regressions.png')
# plot_ll_curve(rau_list, x_list, y_list, save_as, results_list)
return estparams
def plot_ll_curve(rau_list, x_list, y_list, save_as, results_list=None):
"""Plot data and regression from lower level optimization."""
for i in xrange(len(x_list)):
plt.plot(x_list[i], y_list[i], 'o',
label='RAU %d' % int(rau_list[i]))
if results_list is not None:
plt.plot(x_list[i], results_list[i], "^")
if results_list is not None:
plt.plot(x_list[0], results_list[0], "^", label='OLS')
plt.xlabel('Cost')
plt.ylabel('Aggregate objective')
plt.legend(loc='best')
plt.savefig(save_as, bbox_inches='tight')
plt.close()
def bilevel_optimization(pdict, objective_list, model_list, intervention_list,
cost_list, mask_type, budget, row=0,
evaluate_solution=True, save_solution=True,
results_suffix=None, tables_list=None,
use_existing=False):
"""The most general 'outer-level' function to call most of the
functionality of the marginal value script. This function runs the hybrid
bi-level optimization for one set of factor weights.
It produces as output a summary table of results containing predicted and
realized objective returns for each RAU for each factor weight combination,
and a table of diagnostics."""
use_existing = True # speed up for testing!
diagnostic_dict = {'table_row': [], 'RAU': [], 'budget_slack': [],
'num_IUs': [], 'num_IU_converted': []}
# set up summary results dictionary
summary_results = {
'table_row': [],
'RAU': [],
'solution': [],
'predicted_agg_obj_ul': [],
'predicted_agg_obj_ll': [],
}
for objective in objective_list:
summary_results['predicted_' + objective.name] = []
summary_results[objective.name + '_weight'] = []
if evaluate_solution:
save_solution = True
summary_results['realized_agg_obj'] = []
for objective in objective_list:
summary_results['realized_' + objective.name] = []
# set up directory structure
for idx in xrange(len(model_list)):
model_list[idx].args[u'lulc_uri'] = pdict[u'lulc']
model_list[idx].args[u'biophysical_table_uri'] = pdict[u'biophys']
intermediate_dir = os.path.join(pdict[u'outerdir'], 'intermediate')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
pdict[u'intermediate'] = intermediate_dir
output_dir = os.path.join(pdict[u'outerdir'], 'output')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
pdict[u'output'] = output_dir
sol_map_dir = os.path.join(output_dir, 'solution_maps')
if not os.path.exists(sol_map_dir):
os.makedirs(sol_map_dir)
pdict[u'sol_map_dir'] = sol_map_dir
identify_undefined_pixels(pdict, intervention_list)
# solve optimization problem
rau_list, ll_value_db = get_solution_db(pdict, objective_list,
model_list, cost_list,
intervention_list, mask_type,
use_existing,
tables_list=tables_list)
for idx in xrange(len(ll_value_db)):
value_db_RAU = ll_value_db[idx]
warn_convex, warn_monot = check_curve(value_db_RAU)
if warn_convex:
# curve is not convex
print 'lower level curve for RAU %d is not convex' \
% rau_list[idx]
if warn_monot:
# curve is not monotonic
print 'lower level curve for RAU %d is not monotonic' \
% rau_list[idx]
max_cost = [max(rau_dict['cost']) for rau_dict in ll_value_db]
# save lower-level value databases to file
for idx in xrange(len(rau_list)):
rau = int(rau_list[idx])
df = pandas.DataFrame(ll_value_db[idx])
filename = 'solution_curve_rau%d_row%d.csv' % (rau, row)
df.to_csv(os.path.join(pdict[u'output'], filename))
print "..... fitting upper-level curve ..... "
params = michaelis_menten_fitter(pdict, row, rau_list, ll_value_db,
results_suffix)
optx, ypred = michaelis_menten_solver(params, max_cost, budget)
if optx is None:
soln_margv = {}
else:
value_db = retrieve_implementation_solution(
optx, pdict, objective_list, model_list, intervention_list,
rau_list, mask_type, use_existing, save=save_solution, row=row)
for idx in xrange(len(value_db)):
rau = rau_list[idx]
diagnostic_dict['RAU'].append(rau)
diagnostic_dict['table_row'].append(row)
rau_dict = value_db[idx]
diagnostic_dict['num_IUs'].append(
rau_dict['num_IUs'][0])
diagnostic_dict['num_IU_converted'].append(
rau_dict['num_IU_converted'][0])
summary_results['predicted_agg_obj_ll'].append(
rau_dict['agg_obj'][0])
# calculate budget slack (unspent budget)
avail_budget = rau_dict['budget']
cost = rau_dict['cost']
budget_slack = [(avail_budget - cost) for avail_budget, cost in
zip(avail_budget, cost)]
for item in budget_slack:
diagnostic_dict['budget_slack'].append(item)
if evaluate_solution:
print "evaluating solution ......"
soln_margv = calc_soln_agg_obj(pdict, model_list,
objective_list, rau_list,
intervention_list, value_db,
row=row, rsuf=results_suffix)
soln_margv['realized_cost'] = []
for idx in xrange(len(value_db)):
rau_dict = value_db[idx]
soln_margv['realized_cost'].append(rau_dict['cost'][0])
else:
soln_margv = {}
# collect summary results for this weight combination
soln_margv['RAU'] = rau_list
soln_margv['table_row'] = []
soln_margv['solution'] = []
soln_margv['predicted_agg_obj_ul'] = []
for objective in objective_list:
soln_margv[objective.name + '_weight'] = []
for rau in rau_list:
soln_margv['table_row'].append(row)
for objective in objective_list:
soln_margv[objective.name + '_weight'].append(
objective.weight)
soln_margv['solution'] = optx.tolist()
soln_margv['predicted_agg_obj_ul'] = ypred.tolist()
for objective in objective_list:
soln_margv['predicted_' + objective.name] = []
for idx in xrange(len(value_db)):
soln_margv['predicted_' + objective.name].append(
value_db[idx][objective.name][0])
for key in soln_margv.keys():
for item in soln_margv[key]:
summary_results[key].append(item)
return summary_results, diagnostic_dict
def margv_tables_from_csv(pdict, objective_list, folder, identifier):
"""Folder: where the csv tables are located. Identifier: how to identify
the folder where these should be stored (will be located in
pdict[u'intermediate'])."""
tables_list = []
intermediate_dir = os.path.join(pdict[u'outerdir'], 'intermediate')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
pdict[u'intermediate'] = intermediate_dir
tables_folder = os.path.join(pdict[u'intermediate'], identifier)
if not os.path.exists(tables_folder):
os.makedirs(tables_folder)
rau_raster = get_rau_raster(pdict)
rau_list = pygeoprocessing.geoprocessing.unique_raster_values_uri(
rau_raster)
csv_files = [f for f in os.listdir(folder) if f[-4:] == '.csv']
assert len(csv_files) > 0, "No marginal value csv files identified"
for objective in objective_list:
objective_files = [f for f in csv_files if re.search(
objective.name, f)]
rau_list_obj = [int(re.search('rau(.+?).csv', file).group(1)) for
file in objective_files]
assert rau_list_obj == rau_list, """Supplied marginal value tables must
match RAUs identified by rau_shp"""
for filename in objective_files:
rau = int(re.search('rau(.+?).csv', file).group(1))
file = os.path.join(folder, filename)
array = np.genfromtxt(file, delimiter=',')
save_as = os.path.join(tables_folder, (objective.name +
"_rau%d.npy" % rau))
np.save(save_as, array)
tables_list.append(save_as)
return tables_list
def npy_to_csv(folder):
csv_dir = os.path.join(folder, 'csv')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
npy_files = [f for f in os.listdir(folder) if re.search('.npy', f)]
for file in npy_files:
arr = np.load(os.path.join(folder, file))
csv_name = os.path.join(csv_dir, file[:-4] + '.csv')
np.savetxt(csv_name, arr, delimiter=",")
def is_none(value):
"""Convert a string indicating 'None' or missing value to None."""
if value == 'NA' or value == 'None':
return None
else:
return value
def run_from_exp_table(table):
"""Launch the routine with inputs from a csv file."""
exp_df = pandas.read_table(table, sep=',')
outerdir_list = exp_df['outerdir'].tolist()
assert len(set(outerdir_list)) == 1, """All rows within one experimental
table must share outer directory (outerdir)"""
obj_num_list = [int(re.search('obj(.+?)_name', n).group(1)) for n in
exp_df.columns.values if re.search('obj(.+?)_name', n)]
model_num_list = [int(re.search('mod(.+?)_name', n).group(1)) for n in
exp_df.columns.values if re.search('mod(.+?)_name', n)]
for row in xrange(len(exp_df)):
pdict = {
u'outerdir': exp_df.iloc[row].outerdir,
u'rau_shp': exp_df.iloc[row].rau_shp,
u'lulc': exp_df.iloc[row].lulc,
u'biophys': exp_df.iloc[row].biophysical_table,
}
mask_type = exp_df.iloc[row].mask_type
intervention_list = exp_df.iloc[row].intervention_list.split(';')
cost_str = exp_df.iloc[row].cost_list.split(';')
cost_list = [float(str) for str in cost_str]
budget = float(exp_df.iloc[row].budget)
evaluate_solution = exp_df.iloc[row].evaluate_sol
save_solution = exp_df.iloc[row].save_sol
results_suffix = is_none(exp_df.iloc[row].results_suffix)
model_list = []
for midx in model_num_list:
name = exp_df.iloc[row]['mod%d_name' % midx]
args_table = exp_df.iloc[row]['mod%d_args_file' % midx]
with open(args_table, 'r') as inf:
model_args = eval(inf.read())
module = is_none(exp_df.iloc[row]['mod%d_module' % midx])
output = exp_df.iloc[row]['mod%d_output' % midx]
evaluate = exp_df.iloc[row]['mod%d_eval' % midx]
# TODO this 'raster location' idea is awkward
# but it should work
if ('obj%d_raster_location' % midx) in exp_df.columns.values:
if exp_df.iloc[row]['obj%d_raster_location' % midx] != 'None':
model_args[u'workspace_dir'] = exp_df.iloc[row][
'obj%d_raster_location' % midx]
model = InVEST_model(name, model_args, module, output, evaluate)
model_list.append(model)
if ('obj%d_raster_location' % midx) in exp_df.columns.values:
if is_none('obj%d_raster_location' % midx) is not None:
get_ll_data_from_rasters(pdict, model, intervention_list,
mask_type)
objective_list = []
for oidx in obj_num_list:
name = exp_df.iloc[row]['obj%d_name' % oidx]
weight = exp_df.iloc[row]['obj%d_weight' % oidx]
l_target = is_none(exp_df.iloc[row]['obj%d_ll_target' % oidx])
u_target = is_none(exp_df.iloc[row]['obj%d_ul_target' % oidx])
target_type = is_none(exp_df.iloc[row]['obj%d_target_type' % oidx])
maximize = exp_df.iloc[row]['obj%d_maximize' % oidx]
objective_list.append(Objective(name, weight, l_target,
u_target, target_type, maximize))
sum_weight = 0
for objective in objective_list:
sum_weight += objective.weight
assert sum_weight == 1, "Objective weights must sum to 1"
model_names = [model.name for model in model_list]
obj_names = [objective.name for objective in objective_list]
assert len(set(model_names) - set(obj_names)) == 0, """Each model name
must match an objective name"""
sum_dict, diag_dict = bilevel_optimization(
pdict, objective_list, model_list,
intervention_list, cost_list, mask_type,
budget, row, evaluate_solution, save_solution,
results_suffix)
if row == 0:
summary_results = sum_dict
diagnostic_dict = diag_dict
else:
for key in sum_dict:
summary_results[key] += sum_dict[key]
for key in diagnostic_dict:
diagnostic_dict[key] += diag_dict[key]
sum_df = pandas.DataFrame(summary_results)
sum_df.set_index(['table_row'], inplace=True)
diagnostic_df = pandas.DataFrame(diagnostic_dict)
diagnostic_df.to_csv(os.path.join(pdict[u'output'], 'diagnostics.csv'))
sum_df.to_csv(os.path.join(pdict[u'output'], 'summary_results.csv'))
|
# -*- coding: utf-8 -*-
from troposphere import Base64, Select, FindInMap, GetAtt, GetAZs, Join, Output
from troposphere import Parameter, Ref, Tags, Template
import troposphere.efs as efs
import troposphere.ec2 as ec2
from constants import *
from base import CloudformationAbstractBaseClass
class SecurityGroup(CloudformationAbstractBaseClass):
def __init__(self, sceptre_user_data):
super(self.__class__, self).__init__()
self.template.set_description("""Wordpress SG""")
self.add_parameters()
self.add_resources()
self.add_outputs()
def add_parameters(self):
self.VpcId = self.template.add_parameter(Parameter(
"VpcId",
Description="VpcId",
Type="AWS::EC2::VPC::Id",
))
def add_resources(self):
t = self.template
self.MountTargetSecurityGroup = t.add_resource(ec2.SecurityGroup(
"MountTargetSecurityGroup",
SecurityGroupIngress=[
{"ToPort": "2049", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", "FromPort": "2049"}],
VpcId=Ref(self.VpcId),
GroupDescription=Join("-", [Ref(self.Project), "efs", "sg"]),
Tags=Tags(
Name=Join("-", [Ref(self.Project), "efs", "sg"]),
Environment=Ref(self.Environment),
Project=Ref(self.Project),
)
))
self.ElbSecurityGroup = self.template.add_resource(ec2.SecurityGroup(
"ElbSecurityGroup",
SecurityGroupIngress=[
{"ToPort": "80", "IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0", "FromPort": "80"},
{"ToPort": "443", "IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0", "FromPort": "443"}
],
VpcId=Ref(self.VpcId),
GroupDescription=Join("-", [Ref(self.Project), "elb", "sg"]),
Tags=Tags(
Name=Join("-", [Ref(self.Project), "elb", "sg"]),
Environment=Ref(self.Environment),
Project=Ref(self.Project),
)
))
self.WebSecurityGroup = self.template.add_resource(ec2.SecurityGroup(
"WebSecurityGroup",
SecurityGroupIngress=[
{"ToPort": "80", "IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0", "FromPort": "80"},
{"ToPort": "22", "IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0", "FromPort": "22"}
],
VpcId=Ref(self.VpcId),
GroupDescription=Join("-", [Ref(self.Project), "web", "sg"]),
Tags=Tags(
Name=Join("-", [Ref(self.Project), "web", "sg"]),
Environment=Ref(self.Environment),
Project=Ref(self.Project),
)
))
self.RDSSecurityGroup = self.template.add_resource(ec2.SecurityGroup(
"RDSSecurityGroup",
SecurityGroupIngress=[
{"ToPort": "3306", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", "FromPort": "3306"}],
VpcId=Ref(self.VpcId),
GroupDescription=Join("-", [Ref(self.Project), "rds", "sg"]),
Tags=Tags(
Name=Join("-", [Ref(self.Project), "rds", "sg"]),
Environment=Ref(self.Environment),
Project=Ref(self.Project),
)
))
def add_outputs(self):
self.template.add_output([
Output("EFSsg", Value=Ref(self.MountTargetSecurityGroup)),
Output("ELBsg", Value=Ref(self.ElbSecurityGroup)),
Output("WEBsg", Value=Ref(self.WebSecurityGroup)),
Output("RDSsg", Value=Ref(self.RDSSecurityGroup)),
])
def sceptre_handler(sceptre_user_data):
return SecurityGroup(sceptre_user_data).template.to_json()
if __name__ == '__main__':
print (sceptre_handler())
|
import graphene
from graphene_django.types import DjangoObjectType
from .models import User
class UserType(DjangoObjectType):
class Meta:
model = User
class Query(object):
users = graphene.List(UserType)
user = graphene.Field(UserType,
user_id=graphene.NonNull(graphene.Int))
def resolve_users(self, info, **kwargs):
return User.objects.all()
def resolve_user(self, info, **kwargs):
user_id = kwargs.get('user_id')
return User.objects.get(pk=user_id) |
import sys
import time
for i in range(5):
print("Python:", i, end =' ')
sys.stdout.flush()
time.sleep(0.5)
# m = 0/0 #uncomment to test error handling
|
"""Binary (Protobuf) formatting utilities."""
import binascii
import numbers
import logging
import google.protobuf.message
from google.protobuf.internal.decoder import _DecodeVarint
from google.protobuf.internal import encoder
from openxc.formats.base import VehicleMessageStreamer
from openxc import openxc_pb2
LOG = logging.getLogger(__name__)
class UnrecognizedBinaryCommandError(Exception): pass
class ProtobufStreamer(VehicleMessageStreamer):
MAX_PROTOBUF_MESSAGE_LENGTH = 200
def parse_next_message(self):
message = None
remainder = self.message_buffer
message_data = ""
# 1. decode a varint from the top of the stream
# 2. using that as the length, if there's enough in the buffer, try and
# decode try and decode a VehicleMessage after the varint
# 3. if it worked, great, we're oriented in the stream - continue
# 4. if either couldn't be parsed, skip to the next byte and repeat
while message is None and len(self.message_buffer) > 1:
message_length, message_start = _DecodeVarint(self.message_buffer, 0)
# sanity check to make sure we didn't parse some huge number that's
# clearly not the length prefix
if message_length > self.MAX_PROTOBUF_MESSAGE_LENGTH:
self.message_buffer = self.message_buffer[1:]
continue
if message_start + message_length > len(self.message_buffer):
break
message_data = self.message_buffer[message_start:message_start +
message_length]
remainder = self.message_buffer[message_start + message_length:]
message = ProtobufFormatter.deserialize(message_data)
if message is None:
self.message_buffer = self.message_buffer[1:]
self.message_buffer = remainder
return message
def serialize_for_stream(self, message):
protobuf_message = ProtobufFormatter.serialize(message)
delimiter = encoder._VarintBytes(len(protobuf_message))
return delimiter + protobuf_message
class ProtobufFormatter(object):
@classmethod
def deserialize(cls, data):
message = openxc_pb2.VehicleMessage()
try:
message.ParseFromString(data)
except google.protobuf.message.DecodeError as e:
pass
except UnicodeDecodeError as e:
LOG.warn("Unable to parse protobuf: %s", e)
else:
#return type(cls._protobuf_to_dict(message)['payload'])
return cls._protobuf_to_dict(message)
@classmethod
def serialize(cls, data):
return cls._dict_to_protobuf(data).SerializeToString()
@classmethod
def _command_string_to_protobuf(self, command_name):
if command_name == "version":
return openxc_pb2.ControlCommand.VERSION
elif command_name == "device_id":
return openxc_pb2.ControlCommand.DEVICE_ID
elif command_name == "diagnostic_request":
return openxc_pb2.ControlCommand.DIAGNOSTIC
elif command_name == "passthrough":
return openxc_pb2.ControlCommand.PASSTHROUGH
elif command_name == "af_bypass":
return openxc_pb2.ControlCommand.ACCEPTANCE_FILTER_BYPASS
elif command_name == "payload_format":
return openxc_pb2.ControlCommand.PAYLOAD_FORMAT
elif command_name == "predefined_obd2":
return openxc_pb2.ControlCommand.PREDEFINED_OBD2_REQUESTS
else:
raise UnrecognizedBinaryCommandError(command_name)
@classmethod
def _dict_to_protobuf(cls, data):
message = openxc_pb2.VehicleMessage()
if 'command' in data:
command_name = data['command']
message.type = openxc_pb2.VehicleMessage.CONTROL_COMMAND
message.control_command.type = cls._command_string_to_protobuf(command_name)
if message.control_command.type == openxc_pb2.ControlCommand.PASSTHROUGH:
message.control_command.passthrough_mode_request.bus = data['bus']
message.control_command.passthrough_mode_request.enabled = data['enabled']
elif message.control_command.type == openxc_pb2.ControlCommand.ACCEPTANCE_FILTER_BYPASS:
message.control_command.acceptance_filter_bypass_command.bus = data['bus']
message.control_command.acceptance_filter_bypass_command.bypass = data['bypass']
elif message.control_command.type == openxc_pb2.ControlCommand.PREDEFINED_OBD2_REQUESTS:
message.control_command.predefined_obd2_requests_command.enabled = data['enabled']
elif message.control_command.type == openxc_pb2.ControlCommand.PAYLOAD_FORMAT:
if data['format'] == "json":
message.control_command.payload_format_command.format = openxc_pb2.PayloadFormatCommand.JSON
elif data['format'] == "protobuf":
message.control_command.payload_format_command.format = openxc_pb2.PayloadFormatCommand.PROTOBUF
elif message.control_command.type == openxc_pb2.ControlCommand.DIAGNOSTIC:
request_command = message.control_command.diagnostic_request
action = data['action']
if action == "add":
request_command.action = openxc_pb2.DiagnosticControlCommand.ADD
elif action == "cancel":
request_command.action = openxc_pb2.DiagnosticControlCommand.CANCEL
request = request_command.request
request_data = data['request']
request.bus = request_data['bus']
request.message_id = request_data['id']
request.mode = request_data['mode']
if 'frequency' in request_data:
request.frequency = request_data['frequency']
if 'name' in request_data:
request.name = request_data['name']
if 'multiple_responses' in request_data:
request.multiple_responses = request_data['multiple_responses']
if 'pid' in request_data:
request.pid = request_data['pid']
if 'payload' in request_data:
request.payload = binascii.unhexlify(request_data['payload'].split('0x')[1])
elif 'command_response' in data:
message.type = openxc_pb2.VehicleMessage.COMMAND_RESPONSE
message.command_response.type = cls._command_string_to_protobuf(data['command_response'])
if 'message' in data:
message.command_response.message = data['message']
message.command_response.status = data['status']
elif 'id' in data and 'data' in data:
message.type = openxc_pb2.VehicleMessage.CAN
if 'bus' in data:
message.can_message.bus = data['bus']
if 'frame_format' in data:
if data['frame_format'] == "standard":
message.can_message.frame_format = openxc_pb2.RawMessage.STANDARD
elif data['frame_format'] == "extended":
message.can_message.frame_format = openxc_pb2.RawMessage.EXTENDED
message.can_message.id = data['id']
message.can_message.data = binascii.unhexlify(data['data'].split('0x')[1])
elif 'id' in data and 'bus' in data and 'mode' in data:
message.type = openxc_pb2.VehicleMessage.DIAGNOSTIC
response = message.diagnostic_response
response.bus = data['bus']
response.message_id = data['id']
response.mode = data['mode']
if 'pid' in data:
response.pid = data['pid']
if 'success' in data:
response.success = data['success']
if 'negative_response_code' in data:
response.negative_response_code = data['negative_response_code']
if 'value' in data:
response.value = data['value']
if 'payload' in data:
response.payload = binascii.unhexlify(data['payload'].split('0x')[1])
elif 'name' in data and 'value' in data:
message.type = openxc_pb2.VehicleMessage.SIMPLE
message.simple_message.name = data['name']
value = data['value']
if isinstance(value, bool):
message.simple_message.value.type = openxc_pb2.DynamicField.BOOL
message.simple_message.value.boolean_value = value
elif isinstance(value, str):
message.simple_message.value.type = openxc_pb2.DynamicField.STRING
message.simple_message.value.string_value = value
elif isinstance(value, numbers.Number):
message.simple_message.value.type = openxc_pb2.DynamicField.NUM
message.simple_message.value.numeric_value = value
if 'event' in data:
event = data['event']
# TODO holy repeated code, batman. this will be easier to DRY
# when https://github.com/openxc/openxc-message-format/issues/19
# is resolved
if isinstance(event, bool):
message.simple_message.event.type = openxc_pb2.DynamicField.BOOL
message.simple_message.event.boolean_value = event
elif isinstance(event, str):
message.simple_message.event.type = openxc_pb2.DynamicField.STRING
message.simple_message.event.string_value = event
elif isinstance(event, numbers.Number):
message.simple_message.event.type = openxc_pb2.DynamicField.NUM
message.simple_message.event.numeric_value = event
return message
@classmethod
def _protobuf_to_dict(cls, message):
parsed_message = {}
if message is not None:
if message.type == message.CAN and message.HasField('can_message'):
can_message = message.can_message
if can_message.HasField('bus'):
parsed_message['bus'] = can_message.bus
if can_message.HasField('id'):
parsed_message['id'] = can_message.id
if can_message.HasField('data'):
parsed_message['data'] = "0x%s" % binascii.hexlify(can_message.data).decode("ascii")
if can_message.HasField('frame_format'):
if can_message.frame_format == openxc_pb2.RawMessage.STANDARD:
parsed_message['frame_format'] = "standard"
elif can_message.frame_format == openxc_pb2.RawMessage.EXTENDED:
parsed_message['frame_format'] = "extended"
elif message.type == message.DIAGNOSTIC:
diagnostic_message = message.diagnostic_response
if diagnostic_message.HasField('bus'):
parsed_message['bus'] = diagnostic_message.bus
if diagnostic_message.HasField('message_id'):
parsed_message['id'] = diagnostic_message.message_id
if diagnostic_message.HasField('mode'):
parsed_message['mode'] = diagnostic_message.mode
if diagnostic_message.HasField('pid'):
parsed_message['pid'] = diagnostic_message.pid
if diagnostic_message.HasField('success'):
parsed_message['success'] = diagnostic_message.success
if diagnostic_message.HasField('value'):
parsed_message['value'] = diagnostic_message.value
if diagnostic_message.HasField('negative_response_code'):
parsed_message['negative_response_code'] = diagnostic_message.negative_response_code
if diagnostic_message.HasField('payload'):
parsed_message['payload'] = "0x%s" % binascii.hexlify(diagnostic_message.payload).decode("ascii")
elif message.type == message.SIMPLE:
simple_message = message.simple_message
parsed_message['name'] = simple_message.name
if simple_message.HasField('event'):
event = simple_message.event
if event.HasField('numeric_value'):
parsed_message['event'] = event.numeric_value
elif event.HasField('boolean_value'):
parsed_message['event'] = event.boolean_value
elif event.HasField('string_value'):
parsed_message['event'] = event.string_value
if simple_message.HasField('value'):
value = simple_message.value
if value.HasField('numeric_value'):
parsed_message['value'] = value.numeric_value
elif value.HasField('boolean_value'):
parsed_message['value'] = value.boolean_value
elif value.HasField('string_value'):
parsed_message['value'] = value.string_value
else:
parsed_message = None
else:
parsed_message = None
elif message.type == message.CONTROL_COMMAND:
command = message.control_command
if command.type == openxc_pb2.ControlCommand.VERSION:
parsed_message['command'] = "version"
elif command.type == openxc_pb2.ControlCommand.DEVICE_ID:
parsed_message['command'] = "device_id"
elif command.type == openxc_pb2.ControlCommand.DIAGNOSTIC:
parsed_message['command'] = "diagnostic_request"
parsed_message['request'] = {}
action = command.diagnostic_request.action
if action == openxc_pb2.DiagnosticControlCommand.ADD:
parsed_message['action'] = "add"
elif action == openxc_pb2.DiagnosticControlCommand.CANCEL:
parsed_message['action'] = "cancel"
request = command.diagnostic_request.request
parsed_message['request']['id'] = request.message_id
parsed_message['request']['bus'] = request.bus
parsed_message['request']['mode'] = request.mode
if request.HasField('frequency'):
parsed_message['request']['frequency'] = request.frequency
if request.HasField('name'):
parsed_message['request']['name'] = request.name
if request.HasField('multiple_responses'):
parsed_message['request']['multiple_responses'] = request.multiple_responses
if request.HasField('pid'):
parsed_message['request']['pid'] = request.pid
if request.HasField('payload'):
parsed_message['request']['payload'] = "0x%s" % binascii.hexlify(request.payload).decode("ascii")
elif command.type == openxc_pb2.ControlCommand.PASSTHROUGH:
parsed_message['command'] = "passthrough"
parsed_message['bus'] = command.passthrough_mode_request.bus
parsed_message['enabled'] = command.passthrough_mode_request.enabled
elif command.type == openxc_pb2.ControlCommand.PREDEFINED_OBD2_REQUESTS:
parsed_message['command'] = "predefined_obd2"
parsed_message['enabled'] = command.predefined_obd2_requests_command.enabled
elif command.type == openxc_pb2.ControlCommand.ACCEPTANCE_FILTER_BYPASS:
parsed_message['command'] = "af_bypass"
parsed_message['bus'] = command.acceptance_filter_bypass_command.bus
parsed_message['bypass'] = command.acceptance_filter_bypass_command.bypass
elif command.type == openxc_pb2.ControlCommand.PAYLOAD_FORMAT:
parsed_message['command'] = "payload_format"
if command.payload_format_command.format == openxc_pb2.PayloadFormatCommand.JSON:
parsed_message['format'] = "json"
elif command.payload_format_command.format == openxc_pb2.PayloadFormatCommand.PROTOBUF:
parsed_message['format'] = "protobuf"
elif message.type == message.COMMAND_RESPONSE:
response = message.command_response
if response.type == openxc_pb2.ControlCommand.VERSION:
parsed_message['command_response'] = "version"
elif response.type == openxc_pb2.ControlCommand.DEVICE_ID:
parsed_message['command_response'] = "device_id"
elif response.type == openxc_pb2.ControlCommand.DIAGNOSTIC:
parsed_message['command_response'] = "diagnostic_request"
elif response.type == openxc_pb2.ControlCommand.PASSTHROUGH:
parsed_message['command_response'] = "passthrough"
elif response.type == openxc_pb2.ControlCommand.PAYLOAD_FORMAT:
parsed_message['command_response'] = "payload_format"
elif response.type == openxc_pb2.ControlCommand.ACCEPTANCE_FILTER_BYPASS:
parsed_message['command_response'] = "af_bypass"
elif response.type == openxc_pb2.ControlCommand.PREDEFINED_OBD2_REQUESTS:
parsed_message['command_response'] = "predefined_obd2"
else:
raise UnrecognizedBinaryCommandError(response.type)
parsed_message['status'] = response.status
if response.HasField('message'):
parsed_message['message'] = response.message
else:
parsed_message = None
return parsed_message
|
"""
This is an implementation of the Lex Code Hook Interface
in order to serve the ShaykhBot to get the ayah requested.
"""
import math
import dateutil.parser
import datetime
import time
import os
import logging
import requests
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
""" --- Helpers to build responses which match the structure of the necessary dialog actions --- """
def get_slots(intent_request):
return intent_request['currentIntent']['slots']
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
""" --- Helper Functions --- """
def parse_int(n):
try:
return int(n)
except ValueError:
return float('nan')
def build_validation_result(is_valid, violated_slot, message_content):
if message_content is None:
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
}
return {
'isValid': is_valid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
""" --- TODO: Remove this function --- """
def isvalid_date(date):
try:
dateutil.parser.parse(date)
return True
except ValueError:
return False
def validate_get_ayah(ayah_number):
return build_validation_result(True, None, None)
""" --- Functions that control the bot's behavior --- """
def get_ayah(intent_request):
"""
Performs dialog management and fulfillment for getting ayah.
Beyond fulfillment, the implementation of this intent demonstrates the use of the elicitSlot dialog action
in slot validation and re-prompting.
"""
ayah_number = get_slots(intent_request)["Ayah"]
source = intent_request['invocationSource']
if source == 'DialogCodeHook':
# Perform basic validation on the supplied input slots.
# Use the elicitSlot dialog action to re-prompt for the first violation detected.
slots = get_slots(intent_request)
validation_result = validate_get_ayah(ayah_number)
if not validation_result['isValid']:
slots[validation_result['violatedSlot']] = None
return elicit_slot(intent_request['sessionAttributes'],
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message'])
# Pass the price of the flowers back through session attributes to be used in various prompts defined
# on the bot model.
output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
# Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.
# In a real bot, this would likely involve a call to a backend service.
url = 'http://api.alquran.cloud/ayah/'+ayah_number+'/editions/en.sahih'
logger.debug("request url..{}".format(url))
response = requests.get(url)
resp_data = response.json()
logger.debug("Response... {}".format(resp_data))
#response ="this is a test response"
return close(intent_request['sessionAttributes'],
'Fulfilled',
{'contentType': 'PlainText',
'content': resp_data})
""" --- Intents --- """
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == 'TestOne':
return get_ayah(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
""" --- Main handler --- """
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
|
#!/usr/bin/env python
# Copyright 2020 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import sys
def check_expected_input(input_files, expected_files):
if len(input_files) != len(expected_files):
print("Length mismatch! Input: {} Expected: {}".format(input_files, expected_files))
sys.exit(1)
for exp in expected_files:
found = False
for inp in input_files:
if inp.endswith(exp):
found = True
break
if not found:
print("Missed expected file '{}' within input {}".format(exp, input_files))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='''Check whether provided input files match the \
expected ones. Generate fun3.c using input \
from funcs.txt''')
parser.add_argument('--in', dest='input', nargs='+', default=[], required=True,
help='Input file list')
parser.add_argument('--expected', dest='expected', default=[], nargs='+',
required=True, help='Expected input file list')
parser.add_argument('--out', dest='output', action='store', required=True, help='Output file',
type=argparse.FileType('wt'))
args = parser.parse_args()
s = '''
#define FUNCS "%(funcs)s"
int fun3(void)
{
return 0;
}
'''.lstrip()
check_expected_input(args.input, args.expected)
try:
for f in args.input:
filename = os.path.basename(f)
if filename == "funcs.txt":
with open(f, 'r') as infile:
d = {'funcs': infile.read()}
args.output.write((s % d) + '\n')
except IOError as e:
print("Input file couldn't be opened: " + str(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
import tensorflow as tf
import tensorflow.keras.backend as K
import abc
from typing import Optional, Callable, List
from functools import reduce
from common import common
class WordsSubtokenMetricBase(tf.metrics.Metric):
FilterType = Callable[[tf.Tensor, tf.Tensor], tf.Tensor]
def __init__(self,
index_to_word_table: Optional[tf.lookup.StaticHashTable] = None,
topk_predicted_words=None,
predicted_words_filters: Optional[List[FilterType]] = None,
subtokens_delimiter: str = '|', name=None, dtype=None):
super(WordsSubtokenMetricBase, self).__init__(name=name, dtype=dtype)
self.tp = self.add_weight('true_positives', shape=(), initializer=tf.zeros_initializer)
self.fp = self.add_weight('false_positives', shape=(), initializer=tf.zeros_initializer)
self.fn = self.add_weight('false_negatives', shape=(), initializer=tf.zeros_initializer)
self.index_to_word_table = index_to_word_table
self.topk_predicted_words = topk_predicted_words
self.predicted_words_filters = predicted_words_filters
self.subtokens_delimiter = subtokens_delimiter
def _get_true_target_word_string(self, true_target_word):
if self.index_to_word_table is None:
return true_target_word
true_target_word_index = tf.cast(true_target_word, dtype=self.index_to_word_table.key_dtype)
return self.index_to_word_table.lookup(true_target_word_index)
def update_state(self, true_target_word, predictions, sample_weight=None):
"""Accumulates true positive, false positive and false negative statistics."""
if sample_weight is not None:
raise NotImplemented("WordsSubtokenMetricBase with non-None `sample_weight` is not implemented.")
# For each example in the batch we have:
# (i) one ground true target word;
# (ii) one predicted word (argmax y_hat)
topk_predicted_words = predictions if self.topk_predicted_words is None else self.topk_predicted_words
assert topk_predicted_words is not None
predicted_word = self._get_prediction_from_topk(topk_predicted_words)
true_target_word_string = self._get_true_target_word_string(true_target_word)
true_target_word_string = tf.reshape(true_target_word_string, [-1])
# We split each word into subtokens
true_target_subwords = tf.compat.v1.string_split(true_target_word_string, sep=self.subtokens_delimiter)
prediction_subwords = tf.compat.v1.string_split(predicted_word, sep=self.subtokens_delimiter)
true_target_subwords = tf.sparse.to_dense(true_target_subwords, default_value='<PAD>')
prediction_subwords = tf.sparse.to_dense(prediction_subwords, default_value='<PAD>')
true_target_subwords_mask = tf.not_equal(true_target_subwords, '<PAD>')
prediction_subwords_mask = tf.not_equal(prediction_subwords, '<PAD>')
# Now shapes of true_target_subwords & true_target_subwords are (batch, subtokens)
# We use broadcast to calculate 2 lists difference with duplicates preserving.
true_target_subwords = tf.expand_dims(true_target_subwords, -1)
prediction_subwords = tf.expand_dims(prediction_subwords, -1)
# Now shapes of true_target_subwords & true_target_subwords are (batch, subtokens, 1)
true_target_subwords__in__prediction_subwords = \
tf.reduce_any(tf.equal(true_target_subwords, tf.transpose(prediction_subwords, perm=[0, 2, 1])), axis=2)
prediction_subwords__in__true_target_subwords = \
tf.reduce_any(tf.equal(prediction_subwords, tf.transpose(true_target_subwords, perm=[0, 2, 1])), axis=2)
# Count ground true label subwords that exist in the predicted word.
batch_true_positive = tf.reduce_sum(tf.cast(
tf.logical_and(prediction_subwords__in__true_target_subwords, prediction_subwords_mask), tf.float32))
# Count ground true label subwords that don't exist in the predicted word.
batch_false_positive = tf.reduce_sum(tf.cast(
tf.logical_and(~prediction_subwords__in__true_target_subwords, prediction_subwords_mask), tf.float32))
# Count predicted word subwords that don't exist in the ground true label.
batch_false_negative = tf.reduce_sum(tf.cast(
tf.logical_and(~true_target_subwords__in__prediction_subwords, true_target_subwords_mask), tf.float32))
self.tp.assign_add(batch_true_positive)
self.fp.assign_add(batch_false_positive)
self.fn.assign_add(batch_false_negative)
def _get_prediction_from_topk(self, topk_predicted_words):
# apply given filter
masks = []
if self.predicted_words_filters is not None:
masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters]
if masks:
# assert all(mask.shape.assert_is_compatible_with(top_k_pred_indices) for mask in masks)
legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
else:
legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool)
# the first legal predicted word is our prediction
first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words,
first_legal_predicted_target_word_idx)
prediction = tf.reshape(first_legal_predicted_word_string, [-1])
return prediction
@abc.abstractmethod
def result(self):
...
def reset_states(self):
for v in self.variables:
K.set_value(v, 0)
class WordsSubtokenPrecisionMetric(WordsSubtokenMetricBase):
def result(self):
precision = tf.math.divide_no_nan(self.tp, self.tp + self.fp)
return precision
class WordsSubtokenRecallMetric(WordsSubtokenMetricBase):
def result(self):
recall = tf.math.divide_no_nan(self.tp, self.tp + self.fn)
return recall
class WordsSubtokenF1Metric(WordsSubtokenMetricBase):
def result(self):
recall = tf.math.divide_no_nan(self.tp, self.tp + self.fn)
precision = tf.math.divide_no_nan(self.tp, self.tp + self.fp)
f1 = tf.math.divide_no_nan(2 * precision * recall, precision + recall + K.epsilon())
return f1
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Standalone CLI script for Qpyl.qanalysis.QAnalyseDyns
# Extracts dynamics information from the logfile and saves to json
#
from qscripts_config import __version__, QScriptsConfig as QScfg
import sys
import argparse
import os
from collections import OrderedDict as ODict
import logging
from Qpyl.qanalysis import QAnalyseDyns, QAnalyseDynsError
from Qpyl import plotdata
from Qpyl.common import backup_file, init_logger, get_version_full
logger = init_logger('Qpyl')
parser = argparse.ArgumentParser(description="""
Script for extracting temperatures and energies from QDyn outputs.
Mostly used for debugging.
""", add_help=False)
reqarg = parser.add_argument_group("Required")
reqarg.add_argument("outputs", nargs="+", help="Qdyn output files")
optarg = parser.add_argument_group("Optional")
optarg.add_argument("--plots_out", dest="plots_out",
help="Output filename for plot data (default='{}')"
"".format(QScfg.get("files", "analysedyns_plots")),
default=QScfg.get("files", "analysedyns_plots"))
optarg.add_argument("--stepsize", dest="stepsize", default=None, type=float,
help="If the stepsize in your Qdyn output is 0.000, "
"define it with this flag.")
optarg.add_argument("--timeunit", dest="timeunit", default="ps",
help="Time unit. Options are 'fs', 'ps', 'ns'. "
"Default is ps.")
optarg.add_argument("--stride", dest="stride", type=int, default=1,
help="Read only every Nth point. Default=1")
optarg.add_argument("-v", "--version", action="version",
version=get_version_full())
optarg.add_argument("-h", "--help", action="help", help="show this help "
" message and exit")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
for qdynout in args.outputs:
if not os.path.lexists(qdynout):
print "FATAL! File '{}' doesn't exist".format(qdynout)
sys.exit(1)
try:
qads = QAnalyseDyns(args.outputs,
time_unit=args.timeunit,
step_size=args.stepsize)
except QAnalyseDynsError as e:
print "Error: {}".format(e)
sys.exit(1)
print qads.get_temp_stats()
plots = qads.get_plotdata(stride=args.stride)
jsonenc = plotdata.PlotDataJSONEncoder(indent=2)
backup = backup_file(args.plots_out)
if backup:
print "Backed up '{}' to '{}'".format(args.plots_out, backup)
open(args.plots_out, 'w').write(jsonenc.encode(plots))
print "\nWrote '{}'. Use q_plot.py to visualize the plots.".format(args.plots_out)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithm implementations required for Kronecker-Factored Lattice layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import utils
import numpy as np
import tensorflow as tf
def custom_reduce_prod(t, axis):
"""tf.reduce_prod(t, axis) with faster custom gradient.
Shows comparable speed on CPU, up to 2x speed up on GPU, and 7x on TPU.
Args:
t: The tensor to reduce.
axis: The dimension to reduce.
Returns:
prod(t) and grad(prod(t))
"""
@tf.custom_gradient
def fn(t):
# Can safely use the built in forward op.
fwd = tf.reduce_prod(t, axis=axis)
def grad_fn(dy):
"""Computes the gradient function.
Args:
dy: The gradient flowing into the output of this function.
Returns:
The gradient flowing out through the input of this function.
"""
is_zero = tf.cast(tf.equal(t, 0), tf.float32)
num_zeros = tf.reduce_sum(is_zero, axis=axis)
# If the product contains no zero elements, then simply divide the
# product by each element to determine the partial gradients.
grad0 = tf.math.divide_no_nan(tf.expand_dims(fwd, axis=axis), t)
# If the product contained one zero element, then compute the gradient
# for that zero element. The gradients for other elements should be
# zero.
prod = tf.reduce_prod(t + is_zero, axis=axis)
grad1 = tf.cast(tf.equal(num_zeros, 1), tf.float32) * prod
grad1 = tf.expand_dims(grad1, axis=axis) * is_zero
return tf.expand_dims(dy, axis=axis) * (grad0 + grad1)
return fwd, grad_fn
return fn(t)
def evaluate_with_hypercube_interpolation(inputs, scale, bias, kernel, units,
num_terms, lattice_sizes,
clip_inputs):
"""Evaluates a Kronecker-Factored Lattice using hypercube interpolation.
Kronecker-Factored Lattice function is the product of the piece-wise linear
interpolation weights for each dimension of the input.
Args:
inputs: Tensor representing points to apply lattice interpolation to. If
units = 1, tensor should be of shape: `(batch_size, ..., dims)` or list of
`dims` tensors of same shape `(batch_size, ..., 1)`. If units > 1,
tensor
should be of shape: `(batch_size, ..., units, dims)` or list of `dims`
tensors of same shape `(batch_size, ..., units, 1)`. A typical shape is
`(batch_size, dims)`.
scale: Kronecker-Factored Lattice scale of shape `(units, num_terms)`.
bias: Kronecker-Factored Lattice bias of shape `(units)`.
kernel: Kronecker-Factored Lattice kernel of shape `(1, lattice_sizes, units
* dims, num_terms)`.
units: Output dimension of the Kronecker-Factored Lattice.
num_terms: Number of independently trained submodels per unit, the outputs
of which are averaged to get the final output.
lattice_sizes: Number of vertices per dimension.
clip_inputs: If inputs should be clipped to the input range of the
Kronecker-Factored Lattice.
Returns:
Tensor of shape: `(batch_size, ..., units)`.
"""
# Convert list of tensors to single tensor object.
if isinstance(inputs, list):
inputs = tf.concat(inputs, axis=-1)
if clip_inputs:
inputs = tf.clip_by_value(inputs, 0.0, lattice_sizes - 1.0)
inputs_shape = inputs.get_shape().as_list()
dims = inputs_shape[-1]
# Compute total dimension size before units excluding batch to squeeze into
# one axis.
idx = -1 if units == 1 else -2
rows = int(np.prod(inputs_shape[1:idx]))
inputs = tf.reshape(inputs, [-1, rows, units * dims])
# interpolation_weights.shape: (batch, rows, lattice_sizes, units * dims).
# interpolation_weights[m,n,i,j] should be the interpolation weight of the
# (m,n,j) input in the i'th vertex, i.e. 0 if dist(input[m,n,j], i) >= 1,
# otherwise 1 - dist(input[m,n,j], i), where `dist(...)` denotes the Euclidean
# distance between scalars.
if lattice_sizes == 2:
interpolation_weights = tf.stack([1 - inputs, inputs], axis=-2)
else:
vertices = tf.constant(
list(range(lattice_sizes)),
shape=(lattice_sizes, 1),
dtype=inputs.dtype)
interpolation_weights = vertices - tf.expand_dims(inputs, axis=-2)
interpolation_weights = 1 - tf.minimum(tf.abs(interpolation_weights), 1)
# dotprod.shape: (batch, rows, 1, units * dims * num_terms)
dotprod = tf.nn.depthwise_conv2d(
interpolation_weights, kernel, [1, 1, 1, 1], padding="VALID")
dotprod = tf.reshape(dotprod, [-1, rows, units, dims, num_terms])
prod = custom_reduce_prod(dotprod, axis=-2)
results = scale * prod
# Average across terms for each unit.
results = tf.reduce_mean(results, axis=-1)
results = results + bias
# results.shape: (batch, rows, units)
results_shape = [-1] + inputs_shape[1:-1]
if units == 1:
results_shape.append(1)
results = tf.reshape(results, results_shape)
return results
def default_init_params(output_min, output_max):
"""Returns default initialization bounds depending on layer output bounds.
Args:
output_min: None or minimum layer output.
output_max: None or maximum layer output.
"""
if output_min is None and output_max is None:
return 0.5, 1.5
else:
return 0.0, 1.0
def kfl_random_monotonic_initializer(shape,
scale,
monotonicities,
init_min=0.5,
init_max=1.5,
dtype=tf.float32,
seed=None):
"""Returns a uniformly random sampled monotonic weight tensor.
- The uniform random monotonic function will initilaize the lattice parameters
uniformly at random and make it such that the parameters are monotonically
increasing for each input.
- The random parameters will be sampled from `[init_min, init_max]`
Args:
shape: Shape of weights to initialize. Must be: `(1, lattice_sizes, units *
dims, num_terms)`.
scale: Scale variable of shape: `(units, num_terms)`.
monotonicities: None or list or tuple of length dims of elements of {0,1}
which represents monotonicity constraints per dimension. 1 stands for
increasing (non-decreasing in fact), 0 for no monotonicity constraints.
init_min: The lower bound on the range of initialized weights.
init_max: The upper bound on the range of initialized weights.
dtype: dtype
seed: A Python integer. Used to create a random seed for the distribution.
Returns:
Kronecker-Factored Lattice weights tensor of shape:
`(1, lattice_sizes, units * dims, num_terms)`.
"""
# Sample from the uniform distribution.
weights = tf.random.uniform(
shape, minval=init_min, maxval=init_max, dtype=dtype, seed=seed)
if utils.count_non_zeros(monotonicities) > 0:
# To sort, we must first reshape and unstack our weights.
dims = len(monotonicities)
_, lattice_sizes, units_times_dims, num_terms = shape
if units_times_dims % dims != 0:
raise ValueError(
"len(monotonicities) is {}, which does not evenly divide shape[2]."
"len(monotonicities) should be equal to `dims`, and shape[2] "
"should be equal to units * dims.".format(dims))
units = units_times_dims // dims
weights = tf.reshape(weights, [-1, lattice_sizes, units, dims, num_terms])
# Make all dimensions monotonically increasing with respect to the sign of
# scale.
direction = tf.expand_dims(tf.sign(scale), axis=1)
# Now we can unstack each dimension.
weights = tf.unstack(direction * weights, axis=3)
monotonic_weights = [
tf.sort(weight, axis=1) if monotonicity else weight
for monotonicity, weight in zip(monotonicities, weights)
]
# Restack, reshape, and return weights
weights = tf.stack(monotonic_weights, axis=3)
weights = tf.reshape(direction * weights, shape)
return weights
def scale_initializer(units, num_terms, output_min, output_max):
"""Initializes scale depending on output_min and output_max.
If both output_min and output_max are set, scale is initialized to half their
difference, alternating signs for each term. If only output_min is set, scale
is initialized to 1 for each term. If only output_max is set, scale is
initialized to -1 for each term. Otherwise scale is initialized to alternate
between 1 and -1 for each term.
Args:
units: Output dimension of the layer. Each unit's scale will be initialized
identically.
num_terms: Number of independently trained submodels per unit, the outputs
of which are averaged to get the final output.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
Returns:
Kronecker-Factored Lattice scale of shape: `(units, num_terms)`.
"""
if output_min is not None and output_max is None:
return np.ones([units, num_terms])
if output_min is None and output_max is not None:
return -np.ones([units, num_terms])
# Both or neither bounds are set, so we alternate sign.
signs = (np.arange(num_terms) % -2) * 2 + 1
scale = np.tile(signs, [units, 1])
if output_min is not None and output_max is not None:
scale = scale * ((output_max - output_min) / 2.0)
return scale
def bias_initializer(units, output_min, output_max, dtype=tf.float32):
"""Initializes bias depending on output_min and output_max.
If both output_min and output_max are set, bias is initialized to their
average. If only output_min is set, bias is initialized to output_min. If only
output_max is set, bias is initialized to output_max. Otherwise bias is
initialized to zeros.
Args:
units: Output dimension of the layer. Each of units bias will be initialized
identically.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
dtype: dtype
Returns:
Kronecker-Factored Lattice bias of shape: `(units)`.
"""
if output_min is not None and output_max is not None:
return tf.constant(
(output_min + output_max) / 2.0, shape=[units], dtype=dtype)
elif output_min is not None:
return tf.constant(output_min, shape=[units], dtype=dtype)
elif output_max is not None:
# In this case, weights will be nonnegative and scale will be nonpositive so
# we add output_max to interpolation output to achieve proper bound.
return tf.constant(output_max, shape=[units], dtype=dtype)
else:
return tf.zeros(shape=[units], dtype=dtype)
def _approximately_project_monotonicity(weights, units, scale, monotonicities):
"""Approximately projects to strictly meet monotonicity constraints.
For more details, see _approximately_project_monotonicity in lattice_lib.py.
Args:
weights: Tensor with weights of shape `(1, lattice_sizes, units * dims,
num_terms)`.
units: Number of units per input dimension.
scale: Scale variable of shape: `(units, num_terms)`.
monotonicities: List or tuple of length dims of elements of {0,1} which
represents monotonicity constraints per dimension. 1 stands for increasing
(non-decreasing in fact), 0 for no monotonicity constraints.
Returns:
Tensor with projected weights matching shape of input weights.
"""
# Recall that w.shape is (1, lattice_sizes, units * dims, num_terms).
weights_shape = weights.get_shape().as_list()
_, lattice_sizes, units_times_dims, num_terms = weights_shape
assert units_times_dims % units == 0
dims = units_times_dims // units
weights = tf.reshape(weights, [-1, lattice_sizes, units, dims, num_terms])
# Extract the sign of scale to determine the projection direction.
direction = tf.expand_dims(tf.sign(scale), axis=1)
# TODO: optimize for case where all dims are monotonic and we won't
# need to unstack.
# Unstack our weights such that we have the weight for each dimension. We
# multiply by direction such that we always project the weights to be
# increasing.
weights = tf.unstack(direction * weights, axis=3)
projected = []
for weight, monotonicity in zip(weights, monotonicities):
if monotonicity:
# First we go forward to find the maximum projection.
max_projection = tf.unstack(weight, axis=1)
for i in range(1, len(max_projection)):
max_projection[i] = tf.maximum(max_projection[i], max_projection[i - 1])
# Find the halfway projection to find the minimum projection.
half_projection = (weight + tf.stack(max_projection, axis=1)) / 2.0
# Now we go backwards to find the minimum projection.
min_projection = tf.unstack(half_projection, axis=1)
for i in range(len(min_projection) - 2, -1, -1):
min_projection[i] = tf.minimum(min_projection[i], min_projection[i + 1])
# Restack our weight from the minimum projection.
weight = tf.stack(min_projection, axis=1)
# Add our projected weight to our running list.
projected.append(weight)
# Restack our final projected weights. We multiply by direction such that if
# direction is negative we end up with decreasing weights.
weights = direction * tf.stack(projected, axis=3)
# Reshape projected weights into original shape and return them.
weights = tf.reshape(weights, weights_shape)
return weights
def _approximately_project_bounds(weights, units, output_min, output_max):
"""Approximately projects to strictly meet bound constraints.
For more details, see _approximately_project_bounds in lattice_lib.py.
Args:
weights: Tensor with weights of shape `(1, lattice_sizes, units * dims,
num_terms)`.
units: Number of units per input dimension.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
Returns:
Tensor with projected weights matching shape of input weights.
"""
if output_min is None and output_max is None:
return weights
# We project by the dims'th root projection factor of the weights, ultimately
# projecting each term into the range [-1,1], but only if both output_min and
# output_max are specified. Otherwise, we restrict the weights to be
# nonnegative and the interpolation will do a final shift to respect the
# one-sided bound.
if output_min is not None and output_max is not None:
# Recall that w.shape is (1, lattice_sizes, units * dims, num_terms).
weights_shape = weights.get_shape().as_list()
_, lattice_sizes, units_times_dims, num_terms = weights_shape
assert units_times_dims % units == 0
dims = units_times_dims // units
weights = tf.reshape(weights, [-1, lattice_sizes, units, dims, num_terms])
max_keypoint_values = tf.reduce_max(tf.abs(weights), axis=1, keepdims=True)
max_output_value = tf.reduce_prod(
max_keypoint_values, axis=3, keepdims=True)
full_projection_factor = tf.maximum(max_output_value, 1.0)
individual_projection_factor = tf.pow(full_projection_factor, 1.0 / dims)
weights = weights / individual_projection_factor
# We must reshape to get our final projected weights.
weights = tf.reshape(weights, weights_shape)
else:
weights = tf.maximum(weights, 0)
return weights
# Note: this function must not depend on the result of projecting scale.
# Currently this function depends on the sign of scale, but the scale projection
# will not flip the sign of scale (only make it 0 in the worse case), which will
# not cause any issues.
def finalize_weight_constraints(weights, units, scale, monotonicities,
output_min, output_max):
"""Approximately projects weights to strictly satisfy all constraints.
This projeciton guarantees that constraints are strictly met, but it is not
an exact projection w.r.t. the L2 norm. The computational cost is
`O(num_monotonic_dims * num_lattice_weights)`.
See helper functions `_approximately_project_*` for details of the individual
projection algorithms for each set of constraints.
Args:
weights: Kronecker-Factored Lattice weights tensor of shape: `(1,
lattice_sizes, units * dims, num_terms)`.
units: Number of units per input dimension.
scale: Scale variable of shape: `(units, num_terms)`.
monotonicities: List or tuple of length dims of elements of {0,1} which
represents monotonicity constraints per dimension. 1 stands for increasing
(non-decreasing in fact), 0 for no monotonicity constraints.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
Returns:
Projected weights tensor of same shape as `weights`.
"""
if utils.count_non_zeros(monotonicities) > 0:
# TODO: in the case of only one monotonic dimension, we only have to
# constrain the non-monotonic dimensions to be positive.
# There must be monotonicity constraints, so we need all nonnegative
# weights.
weights = tf.maximum(weights, 0)
weights = _approximately_project_monotonicity(
weights=weights,
units=units,
scale=scale,
monotonicities=monotonicities)
if output_min is not None or output_max is not None:
weights = _approximately_project_bounds(
weights=weights,
units=units,
output_min=output_min,
output_max=output_max)
return weights
# Note: we cannot rely on the weights projection occuring always before or
# always after the scale projection, so this function must not result in a
# projection that would ultimately change the results of the weights projection.
# Currently the weights projection depends on the sign of scale, so this
# function does not change the sign (only makes scale 0 in the worst case),
# which will not cause any issues.
def finalize_scale_constraints(scale, output_min, output_max):
"""Clips scale to strictly satisfy all constraints.
Args:
scale: Scale variable of shape: `(units, num_terms)`.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
Returns:
Clipped scale tensor of same shape as `scale`.
"""
if output_min is not None and output_max is not None:
bound = (output_max - output_min) / 2.0
scale = tf.clip_by_value(scale, clip_value_min=-bound, clip_value_max=bound)
elif output_min is not None:
# In this case, we need scale to be nonnegative to properly shift by bias
# and satisfy the one-sided max bound.
scale = tf.maximum(scale, 0)
elif output_max is not None:
# In this case, we need scale to be nonpositive to properly mirror and shift
# by bias and satisfy the one-sided min bound.
scale = tf.minimum(scale, 0)
return scale
def verify_hyperparameters(lattice_sizes=None,
units=None,
num_terms=None,
input_shape=None,
monotonicities=None,
output_min=None,
output_max=None):
"""Verifies that all given hyperparameters are consistent.
This function does not inspect weights themselves. Only their shape. Use
`assert_constraints()` to assert actual weights against constraints.
See `tfl.layers.KroneckerFactoredLattice` class level comment for detailed
description of arguments.
Args:
lattice_sizes: Lattice size to check against.
units: Units hyperparameter of `KroneckerFactoredLattice` layer.
num_terms: Number of independently trained submodels hyperparameter of
`KroneckerFactoredLattice` layer.
input_shape: Shape of layer input. Useful only if `units` and/or
`monotonicities` is set.
monotonicities: Monotonicities hyperparameter of `KroneckerFactoredLattice`
layer. Useful only if `input_shape` is set.
output_min: Minimum output of `KroneckerFactoredLattice` layer.
output_max: Maximum output of `KroneckerFactoredLattice` layer.
Raises:
ValueError: If lattice_sizes < 2.
ValueError: If units < 1.
ValueError: If num_terms < 1.
ValueError: If len(monotonicities) does not match number of inputs.
"""
if lattice_sizes and lattice_sizes < 2:
raise ValueError("Lattice size must be at least 2. Given: %s" %
lattice_sizes)
if units and units < 1:
raise ValueError("Units must be at least 1. Given: %s" % units)
if num_terms and num_terms < 1:
raise ValueError("Number of terms must be at least 1. Given: %s" %
num_terms)
# input_shape: (batch, ..., units, dims)
if input_shape:
# It also raises errors if monotonicities is specified incorrectly.
monotonicities = utils.canonicalize_monotonicities(
monotonicities, allow_decreasing=False)
# Extract shape to check units and dims to check monotonicity
if isinstance(input_shape, list):
dims = len(input_shape)
# Check monotonicity.
if monotonicities and len(monotonicities) != dims:
raise ValueError("If input is provided as list of tensors, their number"
" must match monotonicities. 'input_list': %s, "
"'monotonicities': %s" % (input_shape, monotonicities))
shape = input_shape[0]
else:
dims = input_shape.as_list()[-1]
# Check monotonicity.
if monotonicities and len(monotonicities) != dims:
raise ValueError("Last dimension of input shape must have same number "
"of elements as 'monotonicities'. 'input shape': %s, "
"'monotonicities': %s" % (input_shape, monotonicities))
shape = input_shape
if units and units > 1 and (len(shape) < 3 or shape[-2] != units):
raise ValueError("If 'units' > 1 then input shape of "
"KroneckerFactoredLattice layer must have rank at least "
"3 where the second from the last dimension is equal to "
"'units'. 'units': %s, 'input_shape: %s" %
(units, input_shape))
if output_min is not None and output_max is not None:
if output_min >= output_max:
raise ValueError("'output_min' must be strictly less than 'output_max'. "
"'output_min': %f, 'output_max': %f" %
(output_min, output_max))
def _assert_monotonicity_constraints(weights, units, scale, monotonicities,
eps):
"""Asserts that weights satisfy monotonicity constraints.
Args:
weights: `KroneckerFactoredLattice` weights tensor of shape: `(1,
lattice_sizes, units * dims, num_terms)`.
units: Number of units per input dimension.
scale: Scale variable of shape: `(units, num_terms)`.
monotonicities: Monotonicity constraints.
eps: Allowed constraints violation.
Returns:
List of monotonicity assertion ops in graph mode or directly executes
assertions in eager mode and returns a list of NoneType elements.
"""
monotonicity_asserts = []
# Recall that w.shape is (1, lattice_sizes, units * dims, num_terms).
weights_shape = weights.get_shape().as_list()
_, lattice_sizes, units_times_dims, num_terms = weights_shape
assert units_times_dims % units == 0
dims = units_times_dims // units
weights = tf.reshape(weights, [-1, lattice_sizes, units, dims, num_terms])
# Extract the sign of scale to determine the assertion direction.
direction = tf.expand_dims(tf.sign(scale), axis=1)
# Unstack our weights given our extracted sign.
weights = tf.unstack(direction * weights, axis=3)
for i, (weight, monotonicity) in enumerate(zip(weights, monotonicities)):
if monotonicity:
keypoints = tf.unstack(weight, axis=1)
for j in range(1, len(keypoints)):
diff = tf.reduce_min(keypoints[j] - keypoints[j - 1])
monotonicity_asserts.append(
tf.Assert(
diff >= -eps,
data=[
"Monotonicity violation", "Feature index:", i,
"Min monotonicity diff:", diff, "Upper layer number:", j,
"Epsilon:", eps, "Keypoints:", keypoints[j],
keypoints[j - 1]
]))
return monotonicity_asserts
def _assert_bound_constraints(weights, units, scale, output_min, output_max,
eps):
"""Asserts that weights satisfy monotonicity constraints.
Args:
weights: `KroneckerFactoredLattice` weights tensor of shape: `(1,
lattice_sizes, units * dims, num_terms)`.
units: Number of units per input dimension.
scale: Scale variable of shape: `(units, num_terms)`.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
eps: Allowed constraints violation.
Returns:
List of monotonicity assertion ops in graph mode or directly executes
assertions in eager mode and returns a list of NoneType elements.
"""
bound_asserts = []
# Recall that w.shape is (1, lattice_sizes, units * dims, num_terms).
weights_shape = weights.get_shape().as_list()
_, lattice_sizes, units_times_dims, num_terms = weights_shape
assert units_times_dims % units == 0
dims = units_times_dims // units
weights = tf.reshape(weights, [-1, lattice_sizes, units, dims, num_terms])
# If both bounds are specified, we must also have that the maximum output be
# between -1 and 1.
if output_min is not None and output_max is not None:
for term, term_weights in enumerate(tf.unstack(weights, axis=4)):
max_keypoint_values = tf.reduce_max(
tf.abs(term_weights), axis=1, keepdims=True)
max_output_values = tf.reduce_prod(
max_keypoint_values, axis=3, keepdims=True)
for unit, unit_max_output_value in enumerate(
tf.unstack(max_output_values, axis=2)):
diff = tf.squeeze(1 - unit_max_output_value)
bound_asserts.append(
tf.Assert(
diff >= -eps,
data=[
"Bound violation (max output greater than 1)", "Diff", diff,
"Epsilon", eps, "Maximum output value",
unit_max_output_value, "Term index", term, "Unit", unit,
"Weights", weights
]))
else:
# If only one bound is specified, we must have that all of our weights are
# nonnegative at this point. There can be no allowed epsilon error here
# because of the effect of a negative value.
total_negative_weights = tf.reduce_sum(tf.cast(weights < 0, tf.int32))
bound_asserts.append(
tf.Assert(
total_negative_weights <= 0,
data=[
"Bound violation (negative weights)",
"Number of negative weights", total_negative_weights, "Weights",
weights
]))
# If both bounds are specified, scale must be between
# -(output_max-output_min)/2 and (output_max-output_min)/2. If only output_min
# is specified, then scale must be nonnegative. If only output_max is
# specified, then scale must be nonpositive.
if output_min is not None and output_max is not None:
bound = (output_max - output_min) / 2.0
below_bound_scales = tf.reduce_sum(tf.cast(scale < -bound, tf.int32))
above_bound_scale = tf.reduce_sum(tf.cast(scale > bound, tf.int32))
bound_asserts.append(
tf.Assert(
below_bound_scales + above_bound_scale <= 0,
data=[
"Bound violation (scale out of bounds)", "Bound", bound,
"Scale", scale
]))
elif output_min is not None:
negative_scales = tf.reduce_sum(tf.cast(scale < 0, tf.int32))
bound_asserts.append(
tf.Assert(
negative_scales <= 0,
data=[
"Bound violation (only output_min specified with negative "
"scale values)", "Scale", scale
]))
elif output_max is not None:
positive_scales = tf.reduce_sum(tf.cast(scale > 0, tf.int32))
bound_asserts.append(
tf.Assert(
positive_scales <= 0,
data=[
"Bound violation (only output_max specified with positive "
"scale values)", "Scale", scale
]))
return bound_asserts
def assert_constraints(weights,
units,
scale,
monotonicities,
output_min,
output_max,
eps=1e-6):
"""Asserts that weights satisfy constraints.
Args:
weights: `KroneckerFactoredLattice` weights tensor of shape: `(1,
lattice_sizes, units * dims, num_terms)`.
units: Number of units per input dimension.
scale: Scale variable of shape: `(units, num_terms)`.
monotonicities: Monotonicity constraints.
output_min: None or minimum layer output.
output_max: None or maximum layer output.
eps: Allowed constraints violation.
Returns:
List of assertion ops in graph mode or directly executes assertions in eager
mode.
"""
asserts = []
if monotonicities:
monotonicity_asserts = _assert_monotonicity_constraints(
weights=weights,
units=units,
scale=scale,
monotonicities=monotonicities,
eps=eps)
asserts.extend(monotonicity_asserts)
if output_min is not None or output_max is not None:
bound_asserts = _assert_bound_constraints(
weights=weights,
units=units,
scale=scale,
output_min=output_min,
output_max=output_max,
eps=eps)
asserts.extend(bound_asserts)
return asserts
|
#!/usr/bin/env python3
import numpy as np
from calc11 import almacalc
nant = 1
ntimes = 60
refx = 1000.0
refy = 1299.0
refz = 700.0
antx = np.array([-3101.52])
anty = np.array([-11245.77])
antz = np.array([8916.26])
temp = np.array([30.0])
pressure = np.array([1000.0])
humidity = np.array([0.7])
mjd = np.array([58701.45833333334, 58701.45902777778,
58701.45972222222, 58701.46041666667,
58701.46111111111, 58701.46180555555,
58701.46250000000, 58701.46319444444,
58701.46388888889, 58701.46458333333,
58701.46527777778, 58701.46597222222,
58701.46666666667, 58701.46736111111,
58701.46805555555, 58701.46875000000,
58701.46944444445, 58701.47013888889,
58701.47083333333, 58701.47152777778,
58701.47222222222, 58701.47291666667,
58701.47361111111, 58701.47430555556,
58701.47500000000, 58701.47569444445,
58701.47638888889, 58701.47708333333,
58701.47777777778, 58701.47847222222,
58701.47916666666, 58701.47986111111,
58701.48055555556, 58701.48125000000,
58701.48194444444, 58701.48263888889,
58701.48333333333, 58701.48402777778,
58701.48472222222, 58701.48541666667,
58701.48611111111, 58701.48680555556,
58701.48750000000, 58701.48819444444,
58701.48888888889, 58701.48958333334,
58701.49027777778, 58701.49097222222,
58701.49166666667, 58701.49236111111,
58701.49305555555, 58701.49375000000,
58701.49444444444, 58701.49513888889,
58701.49583333333, 58701.49652777778,
58701.49722222222, 58701.49791666667,
58701.49861111111, 58701.49930555555])
ra = np.array([2.0] * ntimes)
dec = np.array([1.0] * ntimes)
ssobj = np.zeros(ntimes, dtype=bool)
dx = np.zeros(ntimes)
dy = np.zeros(ntimes)
dut = np.zeros(ntimes)
leapsec = 37.0
axisoff = np.zeros(nant)
sourcename = ['P'] * ntimes
jpx_de421 = 'data/DE421_little_Endian'
geodelay, drydelay, wetdelay = almacalc(refx, refy, refz, antx, anty,
antz, temp, pressure,
humidity, mjd, ra, dec, ssobj,
dx, dy, dut, leapsec, axisoff,
sourcename, jpx_de421)
print('geodelay:')
print(geodelay)
print('drydelay:')
print(drydelay)
print('wetdelay:')
print(wetdelay)
|
import torch
import torch.nn as nn
from collections import defaultdict
from tqdm import tqdm
def train_model(model, train_dl, valid_dl, lr, epochs, verbose=True):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=lr,
steps_per_epoch=len(train_dl), epochs=epochs)
criterion = nn.MSELoss()
history = defaultdict(list)
mean_losses = []
for epoch in tqdm(range(1, epochs + 1)):
model.train()
train_loss = 0
nsamples_train = 0
for x, y in train_dl:
optimizer.zero_grad()
# Forward pass
x_prime = model(x.to(device))
loss = criterion(x_prime, y.to(device))
# Backward pass
loss.backward()
optimizer.step()
scheduler.step()
# log losses
batch_size = x.shape[0]
nsamples_train += batch_size
train_loss += batch_size*(loss.item())
valid_loss = 0
nsamples_valid = 0
model = model.eval()
with torch.no_grad():
for x, y in valid_dl:
x_prime = model(x.to(device))
loss = criterion(x_prime, x.to(device))
# log losses
batch_size = x.shape[0]
nsamples_valid += batch_size
valid_loss += batch_size*(loss.item())
train_loss = train_loss / nsamples_train
valid_loss = valid_loss / nsamples_valid
history['train'].append(train_loss)
history['valid'].append(valid_loss)
if verbose and epoch%10==0:
print(f'Epoch {epoch}: train loss {train_loss}; valid loss {valid_loss}')
return model, history
def get_encodings(model, dl):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
with torch.no_grad():
encodings = [model.encoder(x.to(device)) for x, _ in dl]
return torch.cat(encodings, dim=0) |
p = int(raw_input('Price of lemonade'))
c = int(raw_input('Cost of lemonade'))
w = int(raw_input('Wage of wokers'))
a = int(raw_input('utility function of lemonades U = a*Q + b, a = '))
b = int(raw_input('utility function of lemonades U = a*Q + b, b = '))
ul = int(raw_input('how much you hate work'))
fq = int(raw_input('how many person/hour it takes to make a lemonade'))
class function:
def __init__(self, slope, shift):
self.slope = slope
self.shift = shift
def shift_function(self, shift_num):
self.shift += shift_num
def rotate_function(self, rotate_num):
self.slope += rotate_num
def value(self, x):
return self.slope * x + self.shift
uq = function(a, b)
q = 0
C = fq * q *(w-ul) - p*q + uq.value(q)
F = (p-c) *q -fq*q*w
TU = C+F
q_new = q+1
C_new=fq*q_new*(w-ul) - p*q_new + uq.value(q_new)
F_new = (p-c) *q_new -fq*q_new*w
TU_new = C_new+F_new
while TU_new > TU & C_new > 0 & F_new > 0:
q = q+1
C = fq*q*(w-ul) - p*q + uq.value(q)
F = (p-c) *q -fq*q*w
TU = C+F
q_new = q+1
C_new=fq*q_new*(w-ul) - p*q_new + uq.value(q_new)
F_new = (p-c) *q_new -fq*q_new*w
TU_new = C_new+F_new
print "there will be " + str(q) + " cups of lemonades in this world"
|
from django.test import TestCase
from bug_tracker_v2.users.models import User
# testing coverage: coverage run manage.py test --settings=config.settings.test
# coverage html --omit="admin.py"
class TrivialTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='name', password='password')
def test_trivial(self):
self.assertEqual(True, True)
def test_bad_login(self):
self.assertEqual(False, self.client.login(username='name', password='wrong_password'))
def test_good_login(self):
self.assertEqual(True, self.client.login(username='name', password='password'))
def test_nonexistent_user_login(self):
self.assertEqual(False, self.client.login(username='not_exist', password='password'))
|
'''
Linking records of various aac affiliated museums against ULAN museum.
Record blocking using birth year and record linkage string comparison algorithms
'''
import sys, os, re, json, time
from unidecode import unidecode
from pprint import pprint
import pkg_resources
from optparse import OptionParser
class recordLinkage:
basedatabase = "ulan"
basedir = 'dataset'
#get topN matches
topN = 3
#block first N characters
firstN = 2
absdir = os.path.dirname(os.path.realpath(__file__))
def __init__(self,base):
self.basedatabase = base
#Checks if firstN characters of name match; extract all space separated tokens from the name
def check_name_match(self, museum_author_name, ulan_name):
if isinstance(ulan_name, unicode):
a = unidecode(ulan_name)
else:
a = unidecode(unicode(ulan_name.decode('unicode-escape').encode('utf-8'),'utf-8')).strip().lower()
if isinstance(museum_author_name, unicode):
b = unidecode(museum_author_name)
else:
b = unidecode(unicode(museum_author_name.decode('unicode-escape').encode('utf-8'),'utf-8')).strip().lower()
#print(a,b)
# Keep only alpha numerics
a = re.sub('[^A-Za-z0-9 ]+', '', a)
b = re.sub('[^A-Za-z0-9 ]+', '', b)
#extract all space separated tokens from the names
tk_a = a.split(' ')
tk_b = b.split(' ')
for t1 in tk_a:
for t2 in tk_b:
if t1[:self.firstN] == t2[:self.firstN]:
return True
return False
def v1Matching(self, ulanentity, entity):
# Check if ulan entity birth year belong to any of the block keys.
#print(ulanentity)
linkage = {"match":False}
if 'byear' in entity:
if self.preprocessBirth(ulanentity['byear']['value']) == self.preprocessBirth(entity['byear']['value']):
# do string similarity
linkage = self.matchNames(ulanentity['name']['value'], entity['name']['value'],'hj', 0.8)
# If threshold was met
if linkage['match']:
return {"id1":entity['uri']['value'],
"id2":ulanentity['uri']['value'],
"record linkage score":linkage["score"],
"human curated":False,
"linkage":{}}
else:
return None
#default check first 2 characters of the last name before matching Names
def v2Matching(self, ulanentity, entity, k=2):
ulan_author_name = ulanentity['name']['value']
museum_author_name = entity['name']['value']
linkage = {"match":False}
name_blocking_match = self.check_name_match(museum_author_name, ulan_author_name)
if name_blocking_match:
# do string similarity
linkage = self.matchNames(ulanentity['name']['value'], entity['name']['value'],'hj', 0.9)
if linkage['match']:
return {"id1":entity['uri']['value'],
"id2":ulanentity['uri']['value'],
"record linkage score":linkage["score"],
"human curated":False,
"linkage":{}}
else:
return None
# Run record linkage against base database with blocking on birth year
def findPotentialMatches(self, d, output_folder):
if d:
datasets = d.split()
else:
# Create list of all datasets available
datasets = [dname[:dname.index('.')] for dname in os.listdir(self.basedir)]
output_dir = os.path.join(self.absdir, output_folder)
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as exc: # Guard against race condition
raise
# Iterate over all datasets
for dname in datasets:
# Skip ulan
if dname == self.basedatabase:
continue
print "Analyzing ",dname
start_time = time.time()
# Open output file
out = open(os.path.join(self.absdir, output_folder, dname+".json"),'w')
# Open dataset file and ulan file
entities = open(os.path.join(self.absdir,self.basedir,dname+".json"))
# Record blocking + Record Linkage
for entity in entities:
# convert line read into json
entity = json.loads(entity)
potential_matches = []
ulanentities = open(os.path.join(self.absdir,self.basedir,self.basedatabase+".json"))
current_matches = set()
for ulanentity in ulanentities:
# convert line read into json
ulanentity = json.loads(ulanentity)
'''
Get matches by both blocking by birth year and name blocking of first 2 characters
Add threshold to take the topN matches
v1 blocks on birthyear
v2 blocks on firstN characters
'''
match_v1 = self.v1Matching(ulanentity, entity)
if match_v1:
match_v1['linkage']['ulan_name'] = ulanentity['name']['value']
match_v1['linkage']['museum_name'] = entity['name']['value']
if match_v1['id2'] not in current_matches:
potential_matches.append(match_v1)
current_matches.add(match_v1['id2'])
match_v2 = self.v2Matching(ulanentity, entity)
if match_v2:
match_v2['linkage']['ulan_name'] = ulanentity['name']['value']
match_v2['linkage']['museum_name'] = entity['name']['value']
if match_v2['id2'] not in current_matches:
potential_matches.append(match_v2)
current_matches.add(match_v2['id2'])
# Close ULAN entities file handle
ulanentities.close()
# Sort potential matches based on matching score and select top N
potential_matches = sorted( potential_matches ,key=lambda x: x['record linkage score'],reverse=True )
perfactMatch = False
for i in range(0,self.topN):
# Break if no potential matches
if len(potential_matches) == 0:
#print "No matches were found for entity", entity
break
# Break if not enough potential matches
elif len(potential_matches)-1 < i:
#print "Enough matches were not found for entity", entity
break
elif perfactMatch and potential_matches[i]['record linkage score'] < 1:
#print "Found all perfect matched for entity ", entity
break
out.write(json.dumps(potential_matches[i]))
out.write('\n')
# Break if perfect match is found
if potential_matches[i]['record linkage score'] == 1:
#print "Found perfect match for entity ", entity
perfactMatch = True
# Close output file handle
out.close()
print "Completed %s dataset in %s seconds " % (dname, (time.time()-start_time) )
# Close entities file handle
entities.close()
# Extract birth year from birth date
def preprocessBirth(self, s):
m = re.search('.*(\\d\\d\\d\\d).*',s)
if m:
return m.group(1)
else:
return 0
# Match names using specified technique
def matchNames(self, s1, s2, technique, threshold):
if technique == "hj": # Hybrid Jaccard
return self.matchNames_hj(s1, s2, threshold)
else:
return {"match":False}
# Match names using hybrid jaccard, default threshold = 0.67
def matchNames_hj(self,s1,s2, threshold=0.67):
sys.path.append(os.path.join(self.absdir,'..','HybridJaccard'))
from hybridJaccard import HybridJaccard
match = {'match':False}
sm = HybridJaccard(config_path=os.path.join('..',"hj_config.txt"))
# Pre process strings
s1 = unidecode(unicode(s1.encode('utf-8'),'utf-8')).strip().lower()
s2 = unidecode(unicode(s2.encode('utf-8'),'utf-8')).strip().lower()
# Keep only alpha numerics
s1 = re.sub('[^A-Za-z0-9 ]+', ' ', s1)
s2 = re.sub('[^A-Za-z0-9 ]+', ' ', s2)
match['score'] = sm.sim_measure(s1.split(), s2.split())
if match['score'] > threshold:
match['match'] = True
return match
def main():
# Create record linkage instance with base database as ulan.json
parser = OptionParser()
parser.add_option("-d", "--data_set", dest="data_set", type="string",
help="Data sets")
parser.add_option("-o", "--output_folder", dest="output_folder", type="string",
help="Output folder containing result")
(options, args) = parser.parse_args()
data_set = options.data_set
output_folder = options.output_folder
if output_folder is None:
output_folder = 'questions'
start_time = time.time()
rl = recordLinkage('ulan')
rl.findPotentialMatches(data_set, output_folder)
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main() |
# Intel® Single Event API
#
# This file is provided under the BSD 3-Clause license.
# Copyright (c) 2021, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import os
import imp
import sys
import sea
import copy
import time
import shutil
import struct
import signal
import fnmatch
import tempfile
import binascii
import platform
import traceback
import threading
import subprocess
from python_compat import *
from glob import glob
from datetime import datetime, timedelta
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), 'decoders')))
try:
sys.setdefaultencoding("utf-8")
except:
pass
ProgressConst = 20000
TIME_SHIFT_FOR_GT = 1000
# on OSX an Application launched from Launchpad has nothing in PATH
if sys.platform == 'darwin':
if '/usr/bin' not in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + '/usr/bin'
if '/usr/sbin' not in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + '/usr/sbin'
def global_storage(name, default={}):
if isinstance(__builtins__, dict):
seapi = __builtins__.setdefault('SEAPI', {})
else: # pypy
if not hasattr(__builtins__, 'SEAPI'):
setattr(__builtins__, 'SEAPI', {})
seapi = getattr(__builtins__, 'SEAPI', None)
return seapi.setdefault(name, copy.deepcopy(default)) if name else seapi # FIXME put copy.deepcopy under condition
def reset_global(name, value):
global_storage(None)[name] = value
def format_bytes(num):
for unit in ['', 'K', 'M', 'G']:
if abs(num) < 1024.0:
return "%3.1f %sB" % (num, unit)
num /= 1024.0
return str(num) + 'B'
class DummyWith(): # for conditional with statements
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return False
class Profiler():
def __enter__(self):
try:
import cProfile as profile
except:
import profile
self.profiler = profile.Profile()
self.profiler.enable()
return self
def __exit__(self, type, value, traceback):
self.profiler.disable()
self.profiler.print_stats('time')
return False
def get_extensions(name, multiple=False):
big_name = (name + 's').upper()
this_module = sys.modules[__name__]
if big_name in dir(this_module):
return getattr(this_module, big_name)
extensions = {}
root = os.path.join(os.path.dirname(os.path.realpath(__file__)), name + 's')
for extension in glob(os.path.join(root, '*.py')):
module_name = name + '.' + os.path.splitext(os.path.basename(extension))[0]
if name not in sys.modules:
sys.modules[name] = imp.new_module(name)
if module_name in sys.modules:
module = sys.modules[module_name]
else:
module = imp.load_source(module_name, extension)
for desc in getattr(module, name.upper() + '_DESCRIPTORS', []):
if desc['available']:
if multiple:
extensions.setdefault(desc['format'], []).append(desc[name])
else:
extensions[desc['format']] = desc[name]
setattr(this_module, big_name, extensions)
return extensions
def get_exporters():
return get_extensions('exporter')
def get_collectors():
return get_extensions('collector')
verbose_choices = ['fatal', 'error', 'warning', 'info']
def parse_args(args):
import argparse
parser = argparse.ArgumentParser(epilog="After this command line add ! followed by command line of your program")
format_choices = list(get_exporters().keys())
if sys.platform == 'win32':
format_choices.append("etw")
elif sys.platform == 'darwin':
format_choices.append("xcode")
elif sys.platform == 'linux':
format_choices.append("kernelshark")
parser.add_argument("-f", "--format", choices=format_choices, nargs='*', default=[], help='One or many output formats.')
parser.add_argument("-o", "--output", help='Output folder pattern -<pid> will be added to it')
parser.add_argument("-b", "--bindir", help='If you run script not from its location')
parser.add_argument("-i", "--input", help='Provide input folder for transformation (<the one you passed to -o>-<pid>)')
parser.add_argument("-t", "--trace", nargs='*', help='Additional trace file in one of supported formats')
parser.add_argument("-d", "--dir", help='Working directory for target (your program)')
parser.add_argument("-v", "--verbose", default="warning", choices=verbose_choices)
parser.add_argument("-c", "--cuts", nargs='*', help='Set "all" to merge all cuts in one trace')
parser.add_argument("-r", "--ring", type=int, const='5', default=None, action='store', nargs='?', help='Makes trace to cycle inside ring buffer of given length in seconds')
parser.add_argument("--target", help='Pid of target')
parser.add_argument("--stacks", action="store_true", help='Collect stacks')
parser.add_argument("--profile", action="store_true", help='Internal: profile runtool execution')
parser.add_argument("--collector", choices=list(get_collectors().keys()) + ['default'])
separators = ['!', '?', '%']
separator = None
for sep in separators:
if sep in args:
separator = args.index(sep)
break
# separator = args.index("!") if "!" in args else args.index("?") if "?" in args else None
if separator is not None:
parsed_args = parser.parse_args(args[:separator])
if parsed_args.input:
parser.print_help()
print("Error: Input argument (-i) contradicts launch mode")
sys.exit(-1)
victim = args[separator + 1:]
victim[-1] = victim[-1].strip() # removal of trailing '\r' - when launched from .sh
if not parsed_args.output:
if sys.platform != 'win32':
parsed_args.output = '/tmp/isea_collection'
print('Collection will be written into:' , parsed_args.output)
else:
parser.print_help()
print("Error: No output (-o) given in launch mode")
sys.exit(-1)
handle_args(parsed_args)
return parsed_args, victim
else: # nothing to launch, transformation mode
if args:
args[-1] = args[-1].strip() # removal of trailing '\r' - when launched from .sh
parsed_args = parser.parse_args(args)
handle_args(parsed_args)
if not parsed_args.input:
if sys.platform != 'win32':
parsed_args.input = '/tmp/isea_collection'
if os.path.exists(parsed_args.input):
print('Collection will be read from:', parsed_args.input)
else:
parser.print_help()
sys.exit(-1)
else:
print("--input argument is required for transformation mode.")
parser.print_help()
sys.exit(-1)
if not parsed_args.format:
parsed_args.format = ['gt']
setattr(parsed_args, 'user_input', parsed_args.input)
if not parsed_args.output:
parsed_args.output = parsed_args.input
return parsed_args, None
def handle_args(args):
if args.input:
args.input = subst_env_vars(args.input)
if args.output:
args.output = subst_env_vars(args.output)
if args.dir:
args.dir = subst_env_vars(args.dir)
if not args.bindir:
args.bindir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../bin')
args.bindir = os.path.abspath(args.bindir)
def get_args():
return global_storage('arguments')
def get_original_env():
return global_storage('environ')
def verbose_level(level=None, statics={}):
if not statics:
args = get_args()
if not args:
return verbose_choices.index(level) if level else 'warning'
statics['level'] = verbose_choices.index(get_args().verbose)
return verbose_choices.index(level) if level else statics['level']
def message(level, txt, statics={}):
assert isinstance(statics, dict)
if level and verbose_level(level) > verbose_level(): # see default in "parse_args"
return False
# in python2 type(parent_frame) is tuple with length == 4
# in python3 type(parent_frame) is FrameSummary
parent_frame = traceback.extract_stack()[-2]
# slice operation returns tuple
history = statics.setdefault(parent_frame[:4], {'count': 0, 'heap': []})
history['count'] += 1
if history['count'] < 5 or not level:
print('\n', (level.upper() + ':') if level else '', '%s' % txt)
print('\tFile "%s", line %d, in %s' % parent_frame[:3])
Collector.log("\n%s:\t%s\n" % (level.upper() if level else 'RUNTIME', txt), stack=(verbose_level(level) <= verbose_level('warning')))
elif history['count'] == 5:
print('\n', level.upper(), 'Stopping pollution from', parent_frame[:3])
return True
def main():
reset_global('environ', os.environ.copy())
(args, victim) = parse_args(sys.argv[1:]) # skipping the script name
reset_global('arguments', args)
if victim:
ensure_dir(args.output, clean=True)
launch(args, victim)
else:
ext = os.path.splitext(args.input)[1] if not os.path.isdir(args.input) else None
transform_all(args)
Collector.log('Started with arguments: %s' % str(sys.argv))
save_domains()
def os_lib_ext():
if sys.platform == 'win32':
return '.dll'
elif sys.platform == 'darwin':
return '.dylib'
elif 'linux' in sys.platform:
return '.so'
assert (not "Unsupported platform")
def get_pids(victim, tracer):
assert len(victim) == 1 # one wildcard is supported yet
assert sys.platform != 'win32' # no Windows support yet
out, err = tracer.execute('ps -o pid,ppid,command -ax', log=False)
if err:
tracer.log(err)
return []
parsed = {}
for line in out.split('\n'):
if not line:
continue
parts = line.split()
if len(parts) < 3:
continue
cmd = ' '.join(parts[2:])
if fnmatch.fnmatch(cmd.lower(), victim[0].lower()) and __file__ not in cmd: # get matching cmd
parsed[parts[0]] = cmd
print("Matching cmd:\t", parts[0], cmd)
return set(parsed.keys())
def launch(args, victim):
sea.prepare_environ(args)
sea_itf = sea.ITT('tools')
global_storage('collection').setdefault('time', {'start': time.time(), 'itt_start': sea_itf.get_timestamp()})
env = {}
paths = []
macosx = sys.platform == 'darwin'
win32 = sys.platform == 'win32'
bits_array = [''] if macosx else ['32', '64']
for bits in bits_array:
search = os.path.sep.join([args.bindir, "*IntelSEAPI" + os_lib_ext()])
files = glob(search)
if not len(files):
message('warning', "didn't find any files for: %s" % search)
continue
paths.append((bits, files[0]))
if not len(paths):
print("Error: didn't find any *IntelSEAPI%s files. Please check that you run from bin directory, or use --bindir." % os_lib_ext())
sys.exit(-1)
if macosx:
env["DYLD_INSERT_LIBRARIES"] = paths[0][1]
else:
paths = dict(paths)
if '32' in paths:
env["INTEL_LIBITTNOTIFY32"] = paths['32']
env["INTEL_JIT_PROFILER32"] = paths['32']
if '64' in paths:
env["INTEL_LIBITTNOTIFY64"] = paths['64']
env["INTEL_JIT_PROFILER64"] = paths['64']
env["INTEL_SEA_FEATURES"] = os.environ['INTEL_SEA_FEATURES'] if 'INTEL_SEA_FEATURES' in os.environ else ""
env["INTEL_SEA_FEATURES"] += (" " + str(args.format)) if args.format else ""
env["INTEL_SEA_FEATURES"] += " stacks" if args.stacks else ""
if args.verbose == 'info':
env['INTEL_SEA_VERBOSE'] = '1'
if args.ring:
env["INTEL_SEA_RING"] = str(args.ring)
if args.output:
env["INTEL_SEA_SAVE_TO"] = os.path.join(args.output, 'pid')
# vulkan support
os_name = 'WIN' if win32 else 'OSX' if macosx else 'LIN'
var_name = os.pathsep.join(['VK_LAYER_INTEL_SEA_%s%s' % (os_name, bits) for bits in bits_array])
env['VK_INSTANCE_LAYERS'] = (os.environ['VK_INSTANCE_LAYERS'] + os.pathsep + var_name) if 'VK_INSTANCE_LAYERS' in os.environ else var_name
env['VK_LAYER_PATH'] = (os.environ['VK_LAYER_PATH'] + os.pathsep + args.bindir) if 'VK_LAYER_PATH' in os.environ else args.bindir
message('info', "Running: " + str(victim))
message('info', "Environment: " + str(env))
environ = global_storage('sea_env')
for key, val in env.items():
if key in environ and val != environ[key]:
assert key in ['LD_PRELOAD', 'DYLD_INSERT_LIBRARIES']
environ[key] += ':' + val
else:
environ[key] = val
if 'kernelshark' in args.format:
victim = 'trace-cmd record -e IntelSEAPI/* ' + victim
tracer = None
if args.collector:
tracer = get_collectors()[args.collector]
elif not tracer: # using default collector per system
if 'linux' in sys.platform:
tracer = get_collectors()['ftrace']
elif 'win32' == sys.platform:
tracer = get_collectors()['etw']
elif 'darwin' in sys.platform:
tracer = get_collectors()['dtrace']
run_suspended = False
if args.dir:
full_victim = os.path.join(args.dir, victim[0])
if os.path.exists(full_victim):
victim[0] = full_victim
setattr(args, 'victim', victim[0])
tracer = tracer(args) if tracer else None # turning class into instance
if '!' in sys.argv[1:]:
assert tracer
if hasattr(tracer, 'launch_victim'):
victim[0] = victim[0].replace(' ', r'\ ')
proc = tracer.launch_victim(victim, env=environ)
else:
if run_suspended: # might consider using preload of SEA lib and do the suspend there. Or allow tracers to run it.
suspended = '(cd "%s"; kill -STOP $$; exec %s )' % (args.dir or '.', ' '.join(victim))
proc = subprocess.Popen(suspended, env=environ, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(victim, env=environ, shell=False, cwd=args.dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if sys.platform != 'win32' and not run_suspended: # FIXME: implement suspended start on Windows!
if not args.ring:
proc.send_signal(signal.SIGSTOP)
args.target = proc.pid
tracer.start()
if sys.platform != 'win32': # collector start may be long, so we freeze victim during this time
print("PID:", proc.pid)
if not args.ring:
proc.send_signal(signal.SIGCONT)
print("Waiting application to exit...")
global_storage('collection')['time']['before'] = time.time()
try:
proc.wait()
except KeyboardInterrupt:
print("Stopping all...")
proc.send_signal(signal.SIGABRT)
out, err = proc.communicate()
if out or err:
print("\n\n -= Target output =- {\n")
print(out.decode().strip())
print("\n", "-" * 50, "\n")
print(err.decode().strip())
print("\n}\n\n")
elif '?' in sys.argv[1:]:
print("Attach to:", victim)
pids = get_pids(victim, tracer)
if not pids:
print("Error: nothing found...")
return
if tracer:
args.target = list(pids)
tracer.start()
print("Waiting for CTRL+C...")
global_storage('collection')['time']['before'] = time.time()
def is_running(pid):
try:
os.kill(int(pid), 0)
return True
except OSError:
return False
try:
while any(is_running(pid) for pid in pids):
time.sleep(0.5)
except KeyboardInterrupt:
pass
else:
message('error', 'unsupported separator')
return -1
global_storage('collection')['time']['after'] = time.time()
print("Stopping collectors...")
if tracer:
args.trace = tracer.stop()
if not args.output:
return []
args.input = args.output
times = global_storage('collection')['time']
times['end'] = time.time()
times['itt_end'] = sea_itf.get_timestamp()
if args.target:
if isinstance(args.target, list):
allowed_pids = args.target
else:
allowed_pids = [args.target]
global_storage('collection').setdefault('targets', allowed_pids)
if args.format:
transform_all(args)
def subst_env_vars(path):
return os.path.expandvars(path) if sys.platform == 'win32' else os.path.expanduser(path)
UserProfile = subst_env_vars('%USERPROFILE%' if sys.platform == 'win32' else '~')
PermanentCache = os.path.join(UserProfile, '.isea_cache.dict')
def ensure_dir(path, clean, statics={}):
if path in statics:
assert(statics[path] or not clean)
return
statics[path] = clean
if os.path.exists(path):
if clean:
shutil.rmtree(path)
else:
return
os.makedirs(path)
def transform_all(args):
setattr(args, 'user_input', args.input)
path = os.path.join(args.user_input, 'transform')
ensure_dir(path, True)
output = []
saved_output = args.output
sea_folders = [folder for folder in glob(os.path.join(args.input, 'pid-*')) if os.path.isdir(folder)]
if sea_folders:
for folder in sea_folders:
args.input = folder
args.output = saved_output + '.' + os.path.basename(folder)
output += transform(args)
args.output = saved_output
replacement = ('/', '\\') if sys.platform == 'win32' else ('\\', '/')
for path in output:
print('Result:', os.path.abspath(path).replace(*replacement), format_bytes(os.path.getsize(path)))
return output
def split_filename(path):
(dir, name) = os.path.split(path)
(name, ext) = os.path.splitext(name)
ring = None
cut = None
if '-' in name:
(name, ring) = name.split("-")
if '!' in name:
(name, cut) = name.split("!")
return {'dir': dir, 'name': name, 'cut': cut, 'ring':ring, 'ext': ext}
def default_tree(args):
tree = {"strings": {}, "domains": {}, "threads": {}, "groups": {}, "modules": {}, "ring_buffer": False, "cuts": set()}
if os.path.isdir(args.input):
for filename in glob(os.path.join(args.input, '*.mdl')):
with open(filename, 'r') as file:
parts = file.readline().split()
tree["modules"][int(os.path.basename(filename).replace(".mdl", ""))] = [' '.join(parts[0:-1]), parts[-1]]
return tree
def build_tid_map(args, path):
tid_map = {}
def parse_process(src):
if not os.path.isdir(src):
return
pid = src.rsplit('-', 1)[1]
if not pid.isdigit():
return
pid = int(pid)
for folder in glob(os.path.join(src, '*', '*.sea')):
tid = int(os.path.basename(folder).split('!')[0].split('-')[0].split('.')[0])
tid_map[tid] = pid
if pid not in tid_map:
tid_map[pid] = pid
for folder in glob(os.path.join(path, '*-*')):
parse_process(folder)
return tid_map
def sea_reader(args): # reads the structure of .sea format folder into dictionary
folder = args.input
if not os.path.exists(folder):
print("""Error: folder "%s" doesn't exist""" % folder)
tree = default_tree(args)
pos = folder.rfind("-") # pid of the process is encoded right in the name of the folder
tree["pid"] = int(folder[pos + 1:])
folder = folder.replace("\\", "/").rstrip("/")
toplevel = next(os.walk(folder))
for filename in toplevel[2]:
with open("/".join([folder, filename]), "r") as file:
if filename.endswith(".str"): # each string_handle_create writes separate file, name is the handle, content is the value
tree["strings"][int(filename.replace(".str", ""))] = file.readline()
elif filename.endswith(".tid"): # named thread makes record: name is the handle and content is the value
tree["threads"][filename.replace(".tid", "")] = file.readline()
elif filename.endswith(".pid"): # named groups (pseudo pids) makes record: group is the handle and content is the value
tree["groups"][filename.replace(".pid", "")] = file.readline()
for domain in toplevel[1]: # data from every domain gets recorded into separate folder which is named after the domain name
tree["domains"][domain] = {"files": []}
for file in next(os.walk("/".join([folder, domain])))[2]: # each thread of this domain has separate file with data
if not file.endswith(".sea"):
print("Warning: weird file found:", file)
continue
filename = file[:-4]
tree["ring_buffer"] = tree["ring_buffer"] or ('-' in filename)
tid = int(filename.split("!")[0].split("-")[0])
tree["cuts"].add(split_filename(filename)['cut'])
tree["domains"][domain]["files"].append((tid, "/".join([folder, domain, file])))
def time_sort(item):
with open(item[1], "rb") as file:
tuple = read_chunk_header(file)
return tuple[0]
tree["domains"][domain]["files"].sort(key=time_sort)
return tree
g_progress_interceptor = None
verbose_progress = True
# FIXME: doesn't belong this file, move to 'utils'
class Progress:
def __init__(self, total, steps, message=""):
self.total = total
self.steps = steps
self.shown_steps = -1
self.message = message
self.last_tick = None
def __enter__(self):
return self
def time_to_tick(self, interval=1):
return (datetime.now() - self.last_tick).total_seconds() > interval if self.last_tick else True
def tick(self, current):
self.last_tick = datetime.now()
if g_progress_interceptor:
g_progress_interceptor(self.message, current, self.total)
if self.total:
self.show_progress(int(self.steps * current / self.total), float(current) / self.total)
def show_progress(self, show_steps, percentage):
if self.shown_steps < show_steps:
if verbose_progress:
print('\r%s: %d%%' % (self.message, int(100*percentage)), end='')
sys.stdout.flush()
self.shown_steps = show_steps
def __exit__(self, type, value, traceback):
if g_progress_interceptor:
g_progress_interceptor(self.message, self.total, self.total)
self.show_progress(self.steps, 1)
if verbose_progress:
print('\r%s: %d%%\n' % (self.message, 100))
return False
@staticmethod
def set_interceptor(interceptor, verbose_mode=False):
global g_progress_interceptor
global verbose_progress
g_progress_interceptor = interceptor
verbose_progress = verbose_mode
class PseudoProgress(Progress):
def profiler(self, frame, event, arg):
if 'return' not in event:
return
cur_time = time.time()
if cur_time - self.time > 1:
self.time = cur_time
self.tick(cur_time)
def __init__(self, message=""):
self.time = None
Progress.__init__(self, 0, 0, message)
self.old_profiler = sys.getprofile()
def __enter__(self):
self.time = time.time()
sys.setprofile(self.profiler)
return self
def __exit__(self, type, value, traceback):
sys.setprofile(self.old_profiler)
return Progress.__exit__(self, type, value, traceback)
def read_chunk_header(file):
chunk = file.read(10) # header of the record, see STinyRecord in Recorder.cpp
if not chunk:
return 0, 0, 0
return struct.unpack('Qbb', chunk)
def transform(args):
message('info', "Transform: " + str(args))
tree = sea_reader(args) # parse the structure
if args.cuts and args.cuts == ['all'] or not args.cuts:
return transform2(args, tree)
else:
result = []
output = args.output[:] # deep copy
for current_cut in tree['cuts']:
if args.cuts and current_cut not in args.cuts:
continue
args.output = (output + "!" + current_cut) if current_cut else output
print("Cut #", current_cut if current_cut else "<None>")
def skip_fn(path):
filename = os.path.split(path)[1]
if current_cut: # read only those having this cut name in filename
if current_cut != split_filename(filename)['cut']:
return True
else: # reading those having not cut name in filename
if "!" in filename:
return True
return False
result += transform2(args, tree, skip_fn)
args.output = output
return result
# FIXME: doesn't belong this file, move to Combiners or something
TaskTypes = [
"task_begin", "task_end",
"task_begin_overlapped", "task_end_overlapped",
"metadata_add",
"marker",
"counter",
"frame_begin", "frame_end",
"object_new", "object_snapshot", "object_delete",
"relation"
]
class TaskCombinerCommon:
def __init__(self, args, tree):
self.no_begin = [] # for the ring buffer case when we get task end but no task begin
self.time_bounds = [2 ** 64, 0] # left and right time bounds
self.tree = tree
self.args = args
self.domains = {}
self.prev_sample = 0
self.total_memory = 0
self.prev_memory = None
self.memcounters = {}
def finish(self):
pass
def __call__(self, fn, data):
domain = self.domains.setdefault(data['domain'], {'tasks': {}, 'counters': {}})
thread = domain['tasks'].setdefault(data['tid'], {'byid': {}, 'stack': [], 'args': {}})
def get_tasks(id):
if not id:
return thread['stack']
return thread['byid'].setdefault(id, [])
def get_task(id):
if id:
tasks = get_tasks(id)
if not tasks: # they can be stacked
tasks = get_tasks(None)
if not tasks or ('id' not in tasks[-1]) or tasks[-1]['id'] != id:
return None
else:
tasks = get_tasks(None)
if tasks:
return tasks[-1]
else:
return None
def find_task(id):
for thread_stacks in domain['tasks'].values(): # look in all threads
if (id in thread_stacks['byid']) and thread_stacks['byid'][id]:
return thread_stacks['byid'][id][-1]
else:
for item in thread_stacks['stack']:
if ('id' in item) and item['id'] == id:
return item
def get_stack(tid):
stack = []
for domain in self.domains.values():
if tid not in domain['tasks']:
continue
thread = domain['tasks'][tid]
for byid in thread['byid'].values():
stack += byid
if thread['stack']:
stack += thread['stack']
stack.sort(key=lambda item: item['time'])
return stack
def get_last_index(tasks, type):
if not len(tasks):
return None
index = len(tasks) - 1
while index > -1 and tasks[index]['type'] != type:
index -= 1
if index > -1:
return index
return None
if fn == "task_begin" or fn == "task_begin_overlapped":
if not (('str' in data) or ('pointer' in data)):
data['str'] = 'Unknown'
self.time_bounds[0] = min(self.time_bounds[0], data['time'])
if 'delta' in data and data['delta']: # turbo mode, only begins are written
end = data.copy()
end['time'] = data['time'] + int(data['delta'])
self.time_bounds[1] = max(self.time_bounds[1], end['time'])
self.complete_task('task', data, end) # for now arguments are not supported in turbo tasks. Once argument is passed, task gets converted to normal.
else:
get_tasks(None if fn == "task_begin" else data['id']).append(data)
elif fn == "task_end" or fn == "task_end_overlapped":
self.time_bounds[1] = max(self.time_bounds[1], data['time'])
tasks = get_tasks(None if fn == "task_end" else data['id'])
index = get_last_index(tasks, data['type'] - 1)
if index is not None:
item = tasks.pop(index)
if self.task_postprocessor:
self.task_postprocessor.postprocess('task', item, data)
if not self.handle_special('task', item, data):
if data['time'] > item['time']:
self.complete_task('task', item, data)
else:
message('warning', 'Negative length task: %s => %s' % (str(item), str(data)))
else:
assert (self.tree["ring_buffer"] or self.tree['cuts'])
if 'str' in data: # nothing to show without name
self.no_begin.append(data)
elif fn == "frame_begin":
get_tasks(data['id'] if 'id' in data else None).append(data)
elif fn == "frame_end":
frames = get_tasks(data['id'] if 'id' in data else None)
index = get_last_index(frames, 7)
if index is not None:
item = frames.pop(index)
self.complete_task("frame", item, data)
else:
assert (self.tree["ring_buffer"] or self.tree['cuts'])
elif fn == "metadata_add":
if 'id' in data:
task = get_task(data['id'])
if task:
args = task.setdefault('args', {})
else:
args = thread['args'].setdefault(data['id'], {})
args[data['str']] = data['delta'] if 'delta' in data else '0x0'
else: # global metadata
if not self.handle_special('meta', data, None):
self.global_metadata(data)
elif fn == "object_snapshot":
if 'args' in data:
args = data['args'].copy()
else:
args = {'snapshot': {}}
if 'data' in data:
state = data['data']
for pair in state.split(","):
(key, value) = tuple(pair.split("="))
args['snapshot'][key] = value
data['args'] = args
self.complete_task(fn, data, data)
elif fn in ["marker", "counter", "object_new", "object_delete"]:
if fn == "marker" and data['data'] == 'task':
markers = get_tasks("marker_" + (data['id'] if 'id' in data else ""))
if markers:
item = markers.pop()
item['type'] = 7 # frame_begin
item['domain'] += ".continuous_markers"
item['time'] += 1
self.complete_task("frame", item, data)
markers.append(data)
else:
if ('id' in data) and (data['id'] in thread['args']):
data['args'] = thread['args'][data['id']]
del thread['args'][data['id']]
self.complete_task(fn, data, data)
elif fn == "relation":
self.relation(
data,
get_task(data['id'] if 'id' in data else None),
get_task(data['parent']) or find_task(data['parent'])
)
else:
assert (not "Unsupported type:" + fn)
def compress_counter(self, cache, data):
values = cache['values']
if values and not data:
length = len(values)
avg_value = sum([value['delta'] for value in values]) / length
if cache['last'] != avg_value:
avg_time = int(sum([value['time'] for value in values]) / length)
self.process(values[0]['pid']).thread(values[0]['tid']).counter(values[0]['str']).set_value(avg_time, avg_value)
cache['last'] = avg_value
cache['values'] = []
def handle_special(self, kind, begin, end):
if self.sea_decoders:
for decoder in self.sea_decoders:
if decoder.handle_special(kind, begin, end):
return True
return False
def flush_counters(self, domain, data):
for name, counter in domain['counters'].items():
common_data = data.copy()
common_data['time'] = counter['begin'] + (counter['end'] - counter['begin']) / 2
common_data['str'] = name
common_data['delta'] = sum(counter['values']) / len(counter['values'])
self.complete_task('counter', common_data, common_data)
def flush_compressed_counters(self):
for pid, threads in self.memcounters.items():
for tid, counters in threads.items():
for name, counter in counters.items():
self.compress_counter(counter, None)
def default_event_filer(cls, type, begin, end):
if begin['domain'] == 'Metal':
if 'FailureType' in begin['str']:
return None, None, None
return type, begin, end
class Callbacks(TaskCombinerCommon):
event_filter = default_event_filer
task_postprocessor = None
def __init__(self, args, tree):
TaskCombinerCommon.__init__(self, args, tree)
self.callbacks = [] # while parsing we might have one to many 'listeners' - output format writers
self.stack_sniffers = [] # only stack listeners
self.allowed_pids = set()
self.processes = {}
self.tasks_from_samples = {}
self.on_finalize_callbacks = []
collection = global_storage('collection')
if 'targets' in collection:
self.allowed_pids = set(collection['targets'])
else:
self.allowed_pids = set()
self.tid_map = self.get_globals()['tid_map']
if hasattr(self.args, 'user_input') and os.path.isdir(self.args.user_input):
tid_map = build_tid_map(self.args, self.args.user_input)
self.tid_map.update(tid_map)
self.allowed_pids |= set(tid_map.values())
for fmt in args.format:
self.callbacks.append(get_exporters()[fmt](args, tree))
if args.target:
if isinstance(args.target, list):
self.allowed_pids += args.target
else:
self.allowed_pids.add(int(args.target))
self.sea_decoders = []
self.globals = self.get_globals()
self.cpus = set()
self.all_cpus_started = os.path.isfile(self.args.user_input) or None
self.proc_names = {}
@classmethod
def get_globals(cls):
return global_storage('Callbacks', {
'starts': {}, 'ends': {}, 'dtrace': {'finished': False}, 'tid_map': {}
})
def add_stack_sniffer(self, sniffer):
self.stack_sniffers.append(sniffer)
@classmethod
def set_event_filter(cls, filter):
prev = cls.event_filter
cls.event_filter = filter
return prev
@classmethod
def set_task_postprocessor(cls, postprocessor):
cls.task_postprocessor = postprocessor
def on_finalize(self, function): # will be called with callbacks(self) as the only argument
self.on_finalize_callbacks.append(function)
def is_empty(self):
return 0 == len(self.callbacks)
def __enter__(self):
[callback.__enter__() for callback in self.callbacks]
return self
def __exit__(self, type, value, traceback):
self.finalize()
[callback.__exit__(type, value, traceback) for callback in self.callbacks] # emulating 'with' statement
return False
def finalize(self):
for decoder in self.sea_decoders:
decoder.finalize()
for kind, data in self.tasks_from_samples.items():
for pid, threads in data.items():
for tid, tasks in threads.items():
self.handle_stack(pid, tid, tasks.last_stack_time + TIME_SHIFT_FOR_GT * len(tasks) + 1, [], kind)
for function in self.on_finalize_callbacks:
function(self)
if self.allowed_pids:
global_storage('collection').setdefault('targets', self.allowed_pids)
self.finish()
def on_event(self, type, data):
if self.event_filter:
type, data, end = self.event_filter(type, data, None)
if not type:
return False
if not is_domain_enabled(data['domain']):
return False
if data.get('internal_name', None) and not is_domain_enabled('%s.%s' % (data['domain'], data['internal_name'])):
return False
self.__call__(type, data)
return True
def complete_task(self, type, begin, end):
if self.event_filter:
type, begin, end = self.event_filter(type, begin, end)
if not type:
return False
if self.handle_special(type, begin, end): # returns True if event is consumed and doesn't require processing
return True
if not is_domain_enabled(begin['domain']):
return False
if end:
# copy here as handler can change the data for own good - this shall not affect other handlers
[callback.complete_task(type, begin.copy(), end.copy() if end else end) for callback in self.callbacks]
return True
else:
return False
def global_metadata(self, data):
[callback.global_metadata(data.copy()) for callback in self.callbacks]
def relation(self, data, head, tail):
for callback in self.callbacks:
callback.relation(data, head, tail)
def get_result(self):
res = []
for callback in self.callbacks:
res += callback.get_targets()
return res
def check_time_in_cs_bounds(self, timestamp, statics={}):
if not statics:
globals = self.get_globals()
if not globals['dtrace']['finished'] or 'context_switch' not in self.globals['ends']:
return None
statics['start'] = globals['starts']['context_switch']
statics['end'] = globals['ends']['context_switch']
return statics['start'] <= timestamp <= statics['end']
def get_pid(self, tid):
if tid in self.tid_map:
return self.tid_map[tid]
return None
class Process:
def __init__(self, callbacks, pid, name):
self.callbacks = callbacks
self.pid = int(pid)
self.threads = {}
if name:
self.set_name(name)
def set_name(self, name):
self.callbacks.set_process_name(self.pid, name)
class Thread:
def __init__(self, process, tid, name):
self.process = process
self.tid = int(tid)
tid_map = self.process.callbacks.tid_map
if process.pid > 0 and self.tid > 0:
if self.tid not in tid_map:
tid_map[self.tid] = process.pid
elif tid_map[self.tid] != process.pid:
message('error', 'TID %d was part of PID %d and now PID %d... How come?' % (self.tid, tid_map[self.tid], process.pid))
self.overlapped = {}
self.to_overlap = {}
self.task_stack = []
self.task_pool = {}
self.snapshots = {}
self.lanes = {}
if name:
self.set_name(name)
self.process.callbacks.on_finalize(self.finalize)
def auto_break_overlapped(self, call_data, begin):
id = call_data['id']
if begin:
call_data['realtime'] = call_data['time'] # as we gonna change 'time'
call_data['lost'] = 0
self.overlapped[id] = call_data
else:
if id in self.overlapped:
real_time = self.overlapped[id]['realtime']
to_remove = []
del self.overlapped[id] # the task has ended, removing it from the pipeline
time_shift = 0
for begin_data in sorted(self.overlapped.values(), key=lambda data: data['realtime']): # finish all and start again to form melting task queue
time_shift += 1 # making sure the order of tasks on timeline, probably has to be done in Chrome code rather
end_data = begin_data.copy() # the end of previous part of task is also here
end_data['time'] = call_data['time'] - time_shift # new begin for every task is here
end_data['type'] = call_data['type']
self.process.callbacks.on_event('task_end_overlapped', end_data) # finish it
if begin_data['realtime'] < real_time:
begin_data['lost'] += 1
if begin_data['lost'] > 10: # we seem lost the end ETW call
to_remove.append(begin_data['id']) # main candidate is the event that started earlier but nor finished when finished the one started later
else:
begin_data['time'] = call_data['time'] + time_shift # new begin for every task is here
self.process.callbacks.on_event('task_begin_overlapped', begin_data) # and start again
for id in to_remove: # FIXME: but it's better somehow to detect never ending tasks and not show them at all or mark somehow
if id in self.overlapped:
del self.overlapped[id] # the task end was probably lost
else:
message('error', '???')
def process_overlapped(self, threshold=100):
if not threshold or 0 != (len(self.to_overlap) % threshold):
return
keys = sorted(self.to_overlap)[0:threshold//2]
to_del = set()
for key in keys:
task = self.to_overlap[key]
if task.overlap_begin:
self.auto_break_overlapped(task.data, True)
self.process.callbacks.on_event("task_begin_overlapped", task.data)
task.overlap_begin = False
else:
end_data = task.data.copy()
end_data['time'] = key
end_data['type'] += 1
self.auto_break_overlapped(end_data, False)
self.process.callbacks.on_event("task_end_overlapped", end_data)
to_del.add(key)
for key in to_del:
del self.to_overlap[key]
def finalize(self, _):
self.process_overlapped(0)
def set_name(self, name):
self.process.callbacks.set_thread_name(self.process.pid, self.tid, name)
class EventBase:
def __init__(self, thread, name, domain, internal_name=None):
self.thread = thread
self.name = name
self.domain = domain
self.internal_name = internal_name
class Counter(EventBase):
def __init__(self, *args):
Callbacks.Process.Thread.EventBase.__init__(self, *args)
def set_value(self, time_stamp, value):
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'delta': value, 'type': 6,
'internal_name': self.internal_name
}
self.thread.process.callbacks.on_event('counter', data)
def set_multi_value(self, time_stamp, values_dict): # values_dict is name:value dictionary
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'args': values_dict, 'type': 6
}
self.thread.process.callbacks.on_event('counter', data)
def counter(self, name, domain='sea', internal_name=None):
return Callbacks.Process.Thread.Counter(self, name, domain, internal_name)
class Marker(EventBase):
def __init__(self, thread, scope, name, domain):
Callbacks.Process.Thread.EventBase.__init__(self, thread, name, domain)
self.scope = scope
def set(self, time_stamp, args=None):
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'type': 5, 'data': self.scope
}
if args is not None:
data.update({'args': args})
return self.thread.process.callbacks.on_event('marker', data)
def marker(self, scope, name, domain='sea'): # scope is one of 'task', 'global', 'process', 'thread'
scopes = {'task': 'task', 'global': 'global', 'process': 'track_group', 'thread': 'track'}
return Callbacks.Process.Thread.Marker(self, scopes[scope], name, domain)
class TaskBase(EventBase):
def __init__(self, type_id, type_name, thread, name, domain):
Callbacks.Process.Thread.EventBase.__init__(self, thread, name, domain)
self.data = None
self.args = {}
self.meta = {}
# These must be set in descendants!
self.event_type = type_id # first of types
self.event_name = type_name
self.overlap_begin = True
def __begin(self, time_stamp, task_id, args, meta):
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'str': self.name, 'type': self.event_type
}
if task_id is not None:
data.update({'id': task_id})
if args:
data.update({'args': args})
if meta:
data.update(meta)
return data
def begin(self, time_stamp, task_id=None, args={}, meta={}):
self.data = self.__begin(time_stamp, task_id, args, meta)
if self.event_type == 2: # overlapped task
self.thread.auto_break_overlapped(self.data, True)
self.thread.process.callbacks.on_event("task_begin_overlapped", self.data)
return self
def add_args(self, args): # dictionary is expected
self.args.update(args)
return self
def add_meta(self, meta): # dictionary is expected
self.meta.update(meta)
return self
def get_data(self):
return self.data
def get_args(self):
args = self.data['args'].copy()
args.update(self.args)
return args
def end(self, time_stamp):
assert self.data # expected to be initialized in self.begin call
if time_stamp:
end_data = self.data.copy()
end_data.update({'time': time_stamp, 'type': self.event_type + 1})
if self.args:
if 'args' in end_data:
end_data['args'].update(self.args)
else:
end_data['args'] = self.args
if self.meta:
end_data.update(self.meta)
else:
end_data = None # special case when end is unknown and has to be calculated by viewer
if self.event_type == 2: # overlapped task
self.thread.auto_break_overlapped(end_data, False)
self.thread.process.callbacks.on_event("task_end_overlapped", end_data)
else:
self.thread.process.callbacks.complete_task(self.event_name, self.data, end_data)
self.data = None
self.args = {}
self.meta = {}
def complete(self, start_time, duration, task_id=None, args={}, meta={}):
begin_data = self.__begin(start_time, task_id, args, meta)
end_data = begin_data.copy()
end_data['time'] = start_time + duration
end_data['type'] = self.event_type + 1
self.thread.process.callbacks.complete_task(self.event_name, begin_data, end_data)
return begin_data
def end_overlap(self, time_stamp):
while self.data['time'] in self.thread.to_overlap:
self.data['time'] += 1
self.thread.to_overlap[self.data['time']] = self
while time_stamp in self.thread.to_overlap:
time_stamp -= 1
self.thread.to_overlap[time_stamp] = self
self.data['id'] = time_stamp
self.data['type'] = self.event_type = 2
self.thread.process_overlapped()
class Task(TaskBase):
def __init__(self, thread, name, domain, overlapped):
Callbacks.Process.Thread.TaskBase.__init__(
self,
2 if overlapped else 0,
'task',
thread,
name, domain
)
self.relation = None
self.related_begin = None
def end(self, time_stamp):
begin_data = self.data.copy() # expected to be initialized in self.begin call
Callbacks.Process.Thread.TaskBase.end(self, time_stamp)
self.__check_relation(begin_data)
def __check_relation(self, begin):
if not self.relation:
return
if self.related_begin: # it's the later task, let's emit the relation
self.__emit_relation(begin, self.related_begin)
self.related_begin = None
else: # we store our begin in the related task and it will emit the relation on its end
self.relation.related_begin = begin
self.relation = None
def __emit_relation(self, left, right):
relation = (left.copy(), right.copy(), left)
if 'realtime' in relation[1]:
relation[1]['time'] = relation[1]['realtime']
if 'realtime' in relation[2]:
relation[2]['time'] = relation[2]['realtime']
relation[0]['parent'] = left['id'] if 'id' in left else id(left)
self.thread.process.callbacks.relation(*relation)
def complete(self, start_time, duration, task_id=None, args={}, meta={}):
begin_data = Callbacks.Process.Thread.TaskBase.complete(self, start_time, duration, task_id, args, meta)
self.__check_relation(begin_data)
def relate(self, task): # relation is being written when last of two related tasks was fully emitted
if self.relation != task:
self.relation = task
task.relate(self)
def end_overlap(self, time_stamp):
Callbacks.Process.Thread.TaskBase.end_overlap(self, time_stamp)
if self.relation:
self.__emit_relation(self.data, self.relation.data)
def task(self, name, domain='sea', overlapped=False):
return Callbacks.Process.Thread.Task(self, name, domain, overlapped)
class Frame(TaskBase):
def __init__(self, thread, name, domain):
Callbacks.Process.Thread.TaskBase.__init__(self, 7, 'frame', thread, name, domain)
def frame(self, name, domain='sea'):
return Callbacks.Process.Thread.Frame(self, name, domain)
class Lane:
def __init__(self, thread, name, domain):
self.thread, self.domain = thread, domain
self.name = '%s (%d):' % (name, thread.tid)
self.first_frame = None
self.id = hex(hash(self))
self.thread.process.callbacks.on_finalize(self.finalize)
def finalize(self, _):
if self.first_frame:
Callbacks.Process.Thread\
.TaskBase(7, 'frame', self.thread, self.name, self.domain) \
.begin(self.first_frame - 1000, self.id).end(None) # the open-ended frame (automatically closed by viewer)
def frame_begin(self, time_stamp, name, args={}, meta={}):
if not self.first_frame or time_stamp < self.first_frame:
self.first_frame = time_stamp
return Callbacks.Process.Thread.TaskBase(7, 'frame', self.thread, name, self.domain).begin(time_stamp, self.id, args, meta)
def lane(self, name, domain='sea'):
if name not in self.lanes:
self.lanes[name] = Callbacks.Process.Thread.Lane(self, name, domain)
return self.lanes[name]
class Object(EventBase):
def __init__(self, thread, id, name, domain):
Callbacks.Process.Thread.EventBase.__init__(self, thread, name, domain)
self.id = id
if not self.thread.snapshots:
self.thread.snapshots = {'last_time': 0}
def create(self, time_stamp):
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'type': 9, 'id': self.id
}
self.thread.process.callbacks.on_event("object_new", data)
return self
def snapshot(self, time_stamp, args):
if time_stamp is None or time_stamp <= self.thread.snapshots['last_time']:
time_stamp = self.thread.snapshots['last_time'] + 1
self.thread.snapshots['last_time'] = time_stamp
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'type': 10, 'id': self.id,
'args': {'snapshot': args}
}
self.thread.process.callbacks.on_event("object_snapshot", data)
return self
@staticmethod # use to prepare argument for 'snapshot' call, only png in base64 string is supported by chrome
def create_screenshot_arg(png_base64):
return {'screenshot': png_base64}
def destroy(self, time_stamp):
data = {
'pid': self.thread.process.pid, 'tid': self.thread.tid,
'domain': self.domain, 'str': self.name,
'time': time_stamp, 'type': 11, 'id': self.id
}
self.thread.process.callbacks.on_event("object_delete", data)
def object(self, id, name, domain='sea'):
return Callbacks.Process.Thread.Object(self, id, name, domain)
def thread(self, tid, name=None):
if tid not in self.threads:
self.threads[tid] = Callbacks.Process.Thread(self, tid, name)
return self.threads[tid]
def process(self, pid, name=None):
if pid not in self.processes:
self.processes[pid] = Callbacks.Process(self, pid, name)
return self.processes[pid]
def vsync(self, time_stamp, args={}, statics={}):
if not statics:
statics['marker'] = self.process(-1).thread(-1, 'VSYNC').marker('thread', 'vblank', 'gpu')
args.update({'AbsTime': time_stamp})
statics['marker'].set(time_stamp, args)
def context_switch(self, time_stamp, cpu, prev_tid, next_tid, prev_name='', next_name='', prev_state='S', prev_prio=0, next_prio=0):
if cpu not in self.cpus:
self.cpus.add(cpu)
all_cpus_started = max(self.cpus) + 1 == len(self.cpus)
if self.all_cpus_started != all_cpus_started:
self.globals['starts']['context_switch'] = time_stamp
self.all_cpus_started = all_cpus_started
if not self.all_cpus_started:
return
self.globals['ends']['context_switch'] = time_stamp
for callback in self.callbacks:
callback.context_switch(
time_stamp, cpu,
{
'tid': prev_tid,
'name': prev_name.replace(' ', '_'),
'state': prev_state,
'prio': prev_prio,
},
{
'tid': next_tid,
'prio': next_prio,
'name': next_name.replace(' ', '_')
}
)
def wakeup(self, time_stamp, cpu, prev_pid, prev_tid, next_pid, next_tid, prev_name='', next_name='', sync_prim='', sync_prim_addr=None):
if prev_pid not in self.allowed_pids and next_pid not in self.allowed_pids:
return False
args = {'target': next_tid, 'type': sync_prim, 'addr': sync_prim_addr} if sync_prim_addr else {}
args.update({'target': next_tid, 'by': prev_tid})
event_width = 2000
from_task = self.process(prev_pid).thread(prev_tid).task('wakes').begin(time_stamp - event_width, args=args)
to_task = self.process(next_pid).thread(next_tid).task('woken').begin(time_stamp, args=args)
from_task.relate(to_task)
from_task.end(time_stamp - event_width/2)
to_task.end(time_stamp + event_width/2)
for callback in self.callbacks:
callback.wakeup(
time_stamp, cpu,
{
'pid': prev_pid,
'tid': prev_tid,
'name': prev_name.replace(' ', '_')
},
{
'pid': next_pid,
'tid': next_tid,
'name': next_name.replace(' ', '_')
}
)
def get_process_name(self, pid):
return self.proc_names[pid] if pid in self.proc_names else None
def set_process_name(self, pid, name, labels=[]):
order = -1 if pid in self.allowed_pids else pid
if pid not in self.proc_names:
self.proc_names[pid] = [name]
self.__call__("metadata_add", {'domain': 'IntelSEAPI', 'str': '__process__', 'pid': pid, 'tid': -1, 'delta': order, 'data': name, 'labels': labels})
elif name not in self.proc_names[pid]:
self.proc_names[pid].append(name)
full_name = '->'.join(self.proc_names[pid])
self.__call__("metadata_add", {'domain': 'IntelSEAPI', 'str': '__process__', 'pid': pid, 'tid': -1, 'delta': order, 'data': full_name, 'labels': labels})
message('warning', 'Pid %d name changed: %s' % (pid, full_name))
def set_thread_name(self, pid, tid, name):
self.__call__("metadata_add", {'domain': 'IntelSEAPI', 'str': '__thread__', 'pid': pid, 'tid': tid, 'data': '%s (%d)' % (name, tid), 'delta': tid})
def add_metadata(self, name, data):
self.__call__("metadata_add", {'domain': 'IntelSEAPI', 'data': data, 'str': name, 'tid': None})
class AttrDict(dict):
pass # native dict() refuses setattr call
def handle_stack(self, pid, tid, time, stack, kind='sampling'):
use_lanes = False
tasks = self.tasks_from_samples.setdefault(kind, {}).setdefault(pid, {}).setdefault(tid, self.AttrDict())
tasks.last_stack_time = time
to_remove = []
if not use_lanes:
pid = -pid if pid > 100 else pid
tid = -tid
# Find currently present tasks:
present = set()
for frame in stack:
ptr = frame['ptr']
if not frame['str']:
frame['str'] = '0x%x' % ptr
else:
frame['str'] = '%s(0x%x)' % (frame['str'], ptr)
present.add(ptr)
# Remove currently absent tasks (they are finished):
for ptr in tasks:
if ptr not in present:
to_remove.append(ptr)
to_add = []
# Find affected tasks, those to the right of most recent of removed. These affected are to be 'restarted'
if to_remove:
leftmost_time = min(tasks[ptr]['begin'] for ptr in to_remove)
for ptr, task in tasks.items():
if task['begin'] > leftmost_time and ptr not in to_remove:
to_remove.append(ptr)
to_add.append(task.copy())
# Actual removal of the tasks with flushing them to timeline:
to_remove.sort(key=lambda ptr: tasks[ptr]['begin'])
shift = 1
if use_lanes:
lane = self.process(pid).thread(tid).lane(kind) #TODO: implement proper lane frames
else:
thread = self.process(pid).thread(tid)
for ptr in to_remove:
task = tasks[ptr]
end_time = time - TIME_SHIFT_FOR_GT * shift
if end_time <= task['begin']: # this might happen on finalization and with very deep stack
continue
args = {'module': task['module'].replace('\\', '/')}
if '__file__' in task and '__line__' in task:
args.update({
'pos': '%s(%d)' % (task['__file__'], int(task['__line__']))
})
if use_lanes:
lane.frame_begin(
task['begin'], task['str'], args=args, meta={'sampled': True}
).end(end_time)
else:
if kind in ['sampling', 'ustack'] or (pid == 0 and kind == 'kstack'): # temporary workaround for OSX case where there are three stacks
thread.task(task['str']).begin(task['begin'], args=args, meta={'sampled': True}).end(end_time)
del tasks[ptr]
shift += 1
# pre-sort restarted tasks by their initial time to keep natural order
to_add.sort(key=lambda task: task['begin'])
# Add new tasks to the end of the list
for frame in reversed(stack): # Frames originally come in reverse order [bar, foo, main]
if frame['ptr'] not in tasks:
to_add.append(frame.copy())
# Actual adding of tasks:
shift = 1
for task in to_add:
task['begin'] = time + TIME_SHIFT_FOR_GT * shift
tasks[task['ptr']] = task
shift += 1
for callback in self.callbacks + self.stack_sniffers:
callback.handle_stack({'pid': pid, 'tid': tid, 'time': time}, stack, kind)
# example:
#
# the_thread = callbacks.process(-1).thread(-1)
# counter = the_thread.counter(domain='mydomain', name='countername')
# for i in range(5):
# counter.set_value(time_stamp=%timestamp%, value=i)
# task = the_thread.task('MY_TASK') # same with frames
# for i in range(7):
# task.begin(%timestamp%)
# task.add_args({'a':1, 'b':'2'})
# task.end(%timestamp%)
# FIXME: doesn't belong this file, move to 'SEA reader' or something
class FileWrapper:
def __init__(self, path, args, tree, domain, tid):
self.args = args
self.tree = tree
self.domain = domain
self.tid = tid
self.next_wrapper = None
self.file = open(path, "rb")
self.record = self.read()
def __del__(self):
self.file.close()
def next(self):
self.record = self.read()
def get_record(self):
return self.record
def get_pos(self):
return self.file.tell()
def get_size(self):
return os.path.getsize(self.file.name)
def get_path(self):
return self.file.name
def read(self):
call = {"tid": self.tid, "pid": self.tree["pid"], "domain": self.domain}
tuple = read_chunk_header(self.file)
if tuple == (0, 0, 0): # mem mapping wasn't trimmed on close, zero padding goes further
return None
call["time"] = tuple[0]
assert (tuple[1] < len(TaskTypes)) # sanity check
call["type"] = tuple[1]
flags = tuple[2]
if flags & 0x1: # has id
chunk = self.file.read(2 * 8)
call["id"] = struct.unpack('QQ', chunk)[0]
if flags & 0x2: # has parent
chunk = self.file.read(2 * 8)
call["parent"] = struct.unpack('QQ', chunk)[0]
if flags & 0x4: # has string
chunk = self.file.read(8)
str_id = struct.unpack('Q', chunk)[0] # string handle
call["str"] = self.tree["strings"][str_id]
if flags & 0x8: # has tid, that differs from the calling thread (virtual tracks)
chunk = self.file.read(8)
call["tid"] = int(struct.unpack('q', chunk)[0])
if flags & 0x10: # has data
chunk = self.file.read(8)
length = struct.unpack('Q', chunk)[0]
call["data"] = self.file.read(length).decode()
if flags & 0x20: # has delta
chunk = self.file.read(8)
call["delta"] = struct.unpack('d', chunk)[0]
if flags & 0x40: # has pointer
chunk = self.file.read(8)
ptr = struct.unpack('Q', chunk)[0]
if not resolve_pointer(self.args, self.tree, ptr, call):
call["pointer"] = ptr
if flags & 0x80: # has pseudo pid
chunk = self.file.read(8)
call["pid"] = struct.unpack('q', chunk)[0]
return call
def set_next(self, wrapper):
self.next_wrapper = wrapper
def get_next(self):
return self.next_wrapper
def transform2(args, tree, skip_fn=None):
with Callbacks(args, tree) as callbacks:
if callbacks.is_empty():
return callbacks.get_result()
wrappers = {}
for domain, content in tree["domains"].items(): # go thru domains
for tid, path in content["files"]: # go thru per thread files
parts = split_filename(path)
file_wrapper = FileWrapper(path, args, tree, domain, tid)
if file_wrapper.get_record(): # record is None if something wrong with file reading
wrappers.setdefault(parts['dir'] + '/' + parts['name'], []).append(file_wrapper)
for unordered in wrappers.values(): # chain wrappers by time
ordered = sorted(unordered, key=lambda wrapper: wrapper.get_record()['time'])
prev = None
for wrapper in ordered:
if prev:
prev.set_next(wrapper)
prev = wrapper
files = []
for unordered in wrappers.values():
for wrapper in unordered:
next = wrapper.get_next()
if skip_fn and skip_fn(wrapper.get_path()): # for "cut" support
continue
files.append(wrapper)
if verbose_level() > verbose_level('warning'):
progress = DummyWith()
else:
size = sum([file.get_size() for file in files])
progress = Progress(size, 50, 'Converting: %s (%s)' % (os.path.basename(args.input), format_bytes(size)))
with progress:
count = 0
while True: # records iteration
record = None
earliest = None
for file in files:
rec = file.get_record()
if not rec: # finished
continue
if not record or rec['time'] < record['time']:
record = rec
earliest = file
if not record: # all finished
break
earliest.next()
if message('info', "%d\t%s\t%s" % (count, TaskTypes[record['type']], record)):
pass
elif count % ProgressConst == 0:
progress.tick(sum([file.get_pos() for file in files]))
callbacks.on_event(TaskTypes[record['type']], record)
count += 1
callbacks("metadata_add", {'domain': 'IntelSEAPI', 'str': '__process__', 'pid': tree["pid"], 'tid': -1, 'delta': -1})
for pid, name in tree['groups'].items():
callbacks.set_process_name(tree["pid"], name)
return callbacks.get_result()
# FIXME: doesn't belong this file, move to 'utils'
def get_module_by_ptr(tree, ptr):
keys = list(tree['modules'].keys())
keys.sort() # looking for first bigger the address, previous is the module we search for
item = keys[0]
for key in keys[1:]:
if key > ptr:
break
item = key
module = tree['modules'][item]
if item < ptr < item + int(module[1]):
return item, module[0]
else:
return None, None
def win_parse_symbols(symbols):
sym = []
for line in symbols.split('\n'):
line = line.strip()
if not line:
continue
if '\t' in line:
parts = line.strip().split('\t')
addr, size, name = parts[:3]
if int(size):
sym.append({'addr': int(addr), 'size': int(size), 'name': name})
if len(parts) == 4:
sym[-1].update({'pos': parts[3]})
sym.sort(key=lambda data: data['addr'])
return sym
def win_resolve(symbols, addr):
idx = bisect_right(symbols, addr, lambda data: data['addr']) - 1
if idx > -1:
sym = symbols[idx]
if sym['addr'] <= addr <= (sym['addr'] + sym['size']):
return (sym['pos'] + '\n' + sym['name']) if 'pos' in sym else sym['name']
return ''
def resolve_cmd(args, path, load_addr, ptr, cache={}):
if sys.platform == 'win32':
if path.startswith('\\'):
path = 'c:' + path
if path.lower() in cache:
return win_resolve(cache[path.lower()], ptr - load_addr)
bitness = '32' if '32' in platform.architecture()[0] else '64'
executable = os.path.sep.join([args.bindir, 'TestIntelSEAPI%s.exe' % bitness])
cmd = '"%s" "%s"' % (executable, path)
elif sys.platform == 'darwin':
cmd = 'atos -o "%s" -l %s %s' % (path, to_hex(load_addr), to_hex(ptr))
elif 'linux' in sys.platform:
cmd = 'addr2line %s -e "%s" -i -p -f -C' % (to_hex(ptr), path)
else:
assert (not "Unsupported platform!")
env = dict(os.environ)
if "INTEL_SEA_VERBOSE" in env:
del env["INTEL_SEA_VERBOSE"]
try:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(symbol, err) = proc.communicate()
except IOError:
err = traceback.format_exc()
import gc
gc.collect()
print("gc.collect()")
except:
err = traceback.format_exc()
if err:
print(cmd)
print(err)
return ''
if sys.platform == 'win32':
cache[path.lower()] = win_parse_symbols(symbol.decode())
return win_resolve(cache[path.lower()], ptr - load_addr)
return symbol
# finds first bigger
def bisect_right(array, value, key=lambda item: item): #upper_bound, dichotomy, binary search
lo = 0
hi = len(array)
while lo < hi:
mid = (lo + hi) // 2
if value < key(array[mid]):
hi = mid
else:
lo = mid + 1
return lo
def resolve_jit(tree, ptr, cache):
if 'jit' not in tree:
return False
jit = tree['jit']
if jit['start'] <= ptr <= jit['end']:
jit_data = jit['data']
idx = bisect_right(jit_data, ptr, lambda item: item['addr']) - 1
if idx > -1:
offset = ptr - jit_data[idx]['addr']
if offset > jit_data[idx]['size']:
return False
cache[ptr] = {'module': 'jit'}
cache[ptr]['str'] = jit_data[idx]['name']
if not cache[ptr]['str']:
cache[ptr]['str'] = 'jit_method_%d' % jit_data[idx]['id']
cache[ptr]['__file__'] = jit_data[idx]['file']
lines = jit_data[idx]['lines']
idx = bisect_right(lines, offset, lambda item: item[0]) - 1
if idx > -1:
cache[ptr]['__line__'] = lines[idx][1]
return True
else:
return False
def resolve_pointer(args, tree, ptr, call, cache={}):
if ptr not in cache:
if not resolve_jit(tree, ptr, cache):
(load_addr, path) = get_module_by_ptr(tree, ptr)
if path is None or not os.path.exists(path):
cache[ptr] = None
else:
symbol = resolve_cmd(args, path, load_addr, ptr)
cache[ptr] = {'module': path}
lines = symbol.splitlines()
if lines:
if sys.platform == 'win32':
if len(lines) == 1:
cache[ptr]['str'] = lines[0]
elif len(lines) == 2:
cache[ptr]['str'] = lines[1]
(cache[ptr]['__file__'], cache[ptr]['__line__']) = lines[0].rstrip(")").rsplit("(", 1)
elif sys.platform == 'darwin':
if '(in' in lines[0]:
parts = lines[0].split(" (in ")
cache[ptr]['str'] = parts[0]
if ') (' in parts[1]:
(cache[ptr]['__file__'], cache[ptr]['__line__']) = parts[1].split(') (')[1].split(':')
cache[ptr]['__line__'] = cache[ptr]['__line__'].strip(')')
else:
if ' at ' in lines[0]:
(cache[ptr]['str'], fileline) = lines[0].split(' at ')
(cache[ptr]['__file__'], cache[ptr]['__line__']) = fileline.strip().split(':')
if not cache[ptr] or 'str' not in cache[ptr]:
return False
call.update(cache[ptr])
return True
def resolve_stack(args, tree, data):
if tree['process']['bits'] == 64:
frames = struct.unpack('Q' * (len(data) / 8), data)
else:
frames = struct.unpack('I' * (len(data) / 4), data)
stack = []
for frame in frames:
res = {'ptr': frame}
if resolve_pointer(args, tree, frame, res):
stack.append(res)
return stack
def attachme():
print("Attach me!")
while not sys.gettrace():
pass
import time
time.sleep(1)
class TaskCombiner:
not_implemented_err_string = 'You must implement this method in the TaskCombiner derived class!'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.finish()
return False
def __init__(self, args, tree):
self.tree = tree
self.args = args
self.event_map = {}
self.events = []
(self.source_scale_start, self.target_scale_start, self.ratio) = tuple([0, 0, 1. / 1000]) # nanoseconds to microseconds
def get_targets(self):
"""Returns list with the paths to output files."""
raise NotImplementedError(TaskCombiner.not_implemented_err_string)
def complete_task(self, type, begin, end):
"""
Handles task to the derived class output format.
Args:
type: Task type.
begin: Dictionary with task begin data.
end: Dictionary with task end data.
"""
raise NotImplementedError(TaskCombiner.not_implemented_err_string)
def finish(self):
"""Called to finalize a derived class."""
raise NotImplementedError(TaskCombiner.not_implemented_err_string)
def convert_time(self, time):
return (time - self.source_scale_start) * self.ratio + self.target_scale_start
def global_metadata(self, data):
pass
def relation(self, data, head, tail):
pass
def handle_stack(self, task, stack, name='stack'):
pass
def context_switch(self, time, cpu, prev, next):
"""
Called to process context switch events on CPU.
:param cpu: CPU number (int)
:param prev: previous task description (dict. Example: {'tid': 2935135, 'state': 'S', 'name': u'vtplay', 'prio': 31})
:param next: next task description (dict. see above)
"""
pass
def wakeup(self, time, cpu, prev, next):
"""
Called to process thread wakup events on CPU.
:param cpu: CPU on which the event occurred
:param prev: currently running process description for CPU (dict. Example: {'tid': 123, 'name': 'kuku', 'pid': 12})
:param next: thread being woken up (dict. see above)
"""
pass
def to_hex(value):
return "0x" + hex(value).rstrip('L').replace("0x", "").upper()
def get_name(begin):
if 'str' in begin:
return begin['str']
elif 'pointer' in begin:
return "func<" + to_hex(begin['pointer']) + ">"
else:
return "<unknown>"
def get_filter_path():
filter = os.environ.get('INTEL_SEA_FILTER')
if filter:
filter = subst_env_vars(filter)
else:
filter = os.path.join(UserProfile, '.isea_domains.fltr')
return filter
def is_domain_enabled(domain, default=True):
domains = global_storage('sea.is_domain_enabled', {})
if not domains:
filter = get_filter_path()
try:
with open(filter) as file:
for line in file:
enabled = not line.startswith('#')
name = line.strip(' #\n\r')
domains[name] = enabled
if not enabled:
message('warning', 'The domain "%s" is disabled in %s' % (name, filter))
except IOError:
pass
if domain not in domains:
domains[domain] = default
return domains[domain]
def save_domains():
domains = global_storage('sea.is_domain_enabled', {})
filter = get_filter_path()
print("Saving domains:", filter)
with open(filter, 'w') as file:
for key, value in domains.items():
file.write('%s%s\n' % ('#' if not value else '', key))
class GraphCombiner(TaskCombiner):
def __init__(self, args, tree):
TaskCombiner.__init__(self, args, tree)
self.args = args
self.per_domain = {}
self.relations = {}
self.threads = set()
self.per_thread = {}
@staticmethod
def get_name_ex(begin):
name = get_name(begin)
if ':' in name:
parts = name.split(':')
if parts[1].isdigit():
return parts[0]
return name
def get_per_domain(self, domain):
return self.per_domain.setdefault(domain, {
'counters': {}, 'objects': {}, 'frames': {}, 'tasks': {}, 'markers': {}, 'threads': {}
})
def complete_task(self, type, begin, end):
if 'sampled' in begin and begin['sampled']:
return
tid = begin['tid'] if 'tid' in begin else None
self.threads.add(tid)
domain = self.get_per_domain(begin['domain'])
if type == 'task':
task = domain['tasks'].setdefault(self.get_name_ex(begin), {'time': []})
task['time'].append(end['time'] - begin['time'])
if '__file__' in begin:
task['src'] = begin['__file__'] + ":" + begin['__line__']
if begin['type'] == 0: # non-overlapped only
# We expect parents to be reported in the end order (when the end time becomes known)
orphans = self.per_thread.setdefault(begin['tid'], [])
left_index = bisect_right(orphans, begin['time'], lambda orphan: orphan[0]['time']) # first possible child
right_index = bisect_right(orphans, end['time'], lambda orphan: orphan[0]['time']) - 1 # last possible child
for i in range(right_index, left_index - 1, -1): # right to left to be able deleting from array
orphan = orphans[i]
if orphan[1]['time'] < end['time']: # a parent is found!
self.add_relation({
'label': 'calls', 'from': self.make_id(begin['domain'], self.get_name_ex(begin)),
'to': self.make_id(orphan[0]['domain'], self.get_name_ex(orphan[0]))})
del orphans[i]
orphans.insert(left_index, (begin, end))
else:
self.add_relation({'label': 'executes', 'from': self.make_id("threads", str(tid)),
'to': self.make_id(begin['domain'], self.get_name_ex(begin)), 'color': 'gray'})
elif type == 'marker':
domain['markers'].setdefault(begin['str'], [])
elif type == 'frame':
pass
elif type == 'counter':
if 'delta' in begin:
domain['counters'].setdefault(begin['str'], []).append(begin['delta'])
else:
return # TODO: add multi-value support
elif 'object' in type:
if 'snapshot' in type:
return
objects = domain['objects'].setdefault(begin['str'], {})
object = objects.setdefault(begin['id'], {})
if 'new' in type:
object['create'] = begin['time']
elif 'delete' in type:
object['destroy'] = begin['time']
else:
message('message', "Unhandled: " + type)
def finish(self):
for tid, orphans in self.per_thread.items():
last_time = 0
for orphan in orphans:
if (orphan[1]['time'] < last_time):
print("FIXME: orphan[1]['time'] < last_time")
last_time = orphan[1]['time']
begin = orphan[0]
self.add_relation({'label': 'executes', 'from': self.make_id("threads", str(tid)),
'to': self.make_id(begin['domain'], self.get_name_ex(begin)), 'color': 'gray'})
@staticmethod
def make_id(domain, name):
import re
res = "%s_%s" % (domain, name)
return re.sub("[^a-z0-9]", "_", res.lower())
def relation(self, data, head, tail):
if head and tail:
self.add_relation({'label': self.get_name_ex(data), 'from': self.make_id(head['domain'], self.get_name_ex(head)), 'to': self.make_id(tail['domain'], self.get_name_ex(tail)), 'color': 'red'})
def add_relation(self, relation):
key = frozenset(relation.items())
if key in self.relations:
return
self.relations[key] = relation
def handle_stack(self, task, stack, name='stack'):
tid = abs(task['tid']) if 'tid' in task else None
self.threads.add(tid)
parent = None
for frame in reversed(stack):
domain = self.get_per_domain(frame['module'])
name = frame['str'].split('+')[0]
domain['tasks'].setdefault(name, {'time': [0]})
if parent:
self.add_relation({'label': 'calls', 'from': self.make_id(parent['module'], parent['name']), 'to': self.make_id(frame['module'], name)})
else:
self.add_relation({'label': 'executes', 'from': self.make_id("threads", str(tid)), 'to': self.make_id(frame['module'], name), 'color': 'gray'})
parent = frame.copy()
parent.update({'name': name})
class Collector:
def __init__(self, args):
self.args = args
@classmethod
def set_output(cls, output): # has to be object supporting 'write' method
global_storage('log')['file'] = output
@classmethod
def get_output(cls, statics = {}):
log = global_storage('log')
if not log:
args = get_args()
log_name = datetime.now().strftime('sea_%Y_%m_%d__%H_%M_%S.log')
if args:
log_path = subst_env_vars(args.output)
if os.path.isfile(log_path):
log_path = os.path.dirname(log_path)
ensure_dir(log_path, False)
if 'tempfile' in statics:
statics['tempfile'].close()
if os.path.dirname(statics['tempfile'].name) != log_path:
shutil.copy(statics['tempfile'].name, log_path)
del statics['tempfile']
else:
if 'tempfile' in statics:
return statics['tempfile']
log_path = (tempfile.gettempdir() if sys.platform == 'win32' else '/tmp')
log_file = os.path.join(
log_path,
log_name
)
print("For execution details see:", log_file)
if args:
cls.set_output(open(log_file, 'a'))
else:
statics['tempfile'] = open(log_file, 'a')
return statics['tempfile']
return log['file']
@classmethod
def log(cls, msg, stack=False):
assert type(stack) is bool # to avoid "log" function being misused as "print" where comma allows more args
msg = msg.strip()
cut = '\n' + '-' * 100 + '\n'
msg = cut + msg + '\n\n' + (''.join(traceback.format_stack()[:-1]) if stack else '') + cut
output = cls.get_output()
output.write(msg + '\n')
output.flush()
@classmethod
def execute(cls, cmd, log=True, **kwargs):
start_time = time.time()
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
if 'env' not in kwargs:
kwargs['env'] = get_original_env()
if sys.version[0] == '3':
kwargs['encoding'] = 'utf8'
(out, err) = subprocess.Popen(cmd, shell=True, **kwargs).communicate()
if log:
cls.log("\ncmd:\t%s:\nout:\t%s\nerr:\t%s\ntime: %s" % (cmd, str(out).strip(), str(err).strip(), str(timedelta(seconds=(time.time() - start_time)))), stack=True if err else False)
if verbose_level() == verbose_level('info'):
print("\n\n -= '%s' output =- {\n" % cmd)
print(out.strip() if out else '')
print("\n", "-" * 50, "\n")
print(err.strip() if err else '')
print("\n}\n\n")
return out, err
@classmethod
def execute_detached(cls, cmd, **kwargs):
cls.log("\nDetached:\t%s" % cmd)
if sys.platform == 'win32':
DETACHED_PROCESS = 0x00000008
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_NO_WINDOW = 0x08000000
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = 0 # SW_HIDE
subprocess.Popen(cmd, shell=True, startupinfo=info, stdin=None, stdout=None, stderr=None, creationflags=(CREATE_NO_WINDOW | CREATE_NEW_PROCESS_GROUP), **kwargs)
else:
subprocess.Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, **kwargs)
def start(self):
raise NotImplementedError('Collector.start is not implemented!')
def stop(self, wait=True):
raise NotImplementedError('Collector.stop is not implemented!')
@classmethod
def detect_instances(cls, what):
instances = []
cmd = 'where' if sys.platform == 'win32' else 'which'
(out, err) = cls.execute('%s %s' % (cmd, what))
out = out.decode() if hasattr(out, 'decode') else out
if err:
return instances
for line in out.split('\n'):
line = line.strip()
if line:
instances.append(line)
return instances
if __name__ == "__main__":
start_time = time.time()
main()
elapsed = time.time() - start_time
print("Time Elapsed:", str(timedelta(seconds=elapsed)).split('.')[0])
|
from click import ClickException
from .ui import format_full_image_name
from .types import DockerImage
class PieroneException(ClickException):
'''Thrown when something does not go as expected'''
class APIException(PieroneException):
"""
Exception when accessing the API.
``action`` is a format string, and everything in ``kwargs`` is used to format it. If there's
an ``image`` key in ``kwargs`` it's value is formated with ``format_full_image_name``.
"""
def __init__(self, action: str, **kwargs):
if "image" in kwargs:
kwargs["image"] = format_full_image_name(kwargs["image"])
formatted_action = action.format_map(kwargs)
self.message = "You can't {}.".format(formatted_action)
class ArtifactNotFound(APIException):
"""
Exception When Image was Not Found.
"""
def __init__(self, image: DockerImage):
self.image = image
self.message = "{} doesn't exist.".format(format_full_image_name(self.image))
class Forbidden(APIException):
"""
Exception When Pierone Returns a 403.
"""
class Conflict(APIException):
"""
Exception When Pierone Returns a 409.
"""
class UnprocessableEntity(APIException):
"""
Exception When Pierone Returns a 422.
"""
|
"""
Pipeline helper functions.
"""
import os
from pipeline_config import *
import traceback
import time
def vbPrint(s):
"""print only if verbose is enabled
Args:
s ([type]): [description]
"""
if(config['verbose']==1):
print(s)
def setConfig(argv):
"""
* Takes the terminal arguments as input
* Sets configuration variable for the pipeline or sets pipeline step depending on argument signature
Argument Signatures:
* Configuration Variable : --<KEY>=<VALUE>
* Pipeline Step : -<PIPELINE_STEP>
"""
# This sets the types for each argument. Any new arguments that are not strings need to have their types defined.
# The arguments will be parsed as these, if not parsable in the below format, the program will terminate.
# The default is argument is str
argType = {
'verbose': int,
'pef' : float,
'score_threshold': float,
'det_score_threshold':float,
'rowsSplitPerTile':int,
'colsSplitPerTile':int,
'patchDimX':int,
'patchDimY':int,
'tvRatio':float
}
global config
argv = argv[1:]
if('--verbose=0' in argv):
argv = ['--verbose=0']+argv
# Processes each argument based on their argType and overwrites their value in the config variable
# If it is a pipeline step, it gets added into the pipeline.
# If an argument cannot be parsed, an error is thrown and the program quits.
# The pipeline only runs after all arguments have been parsed.
for arg in argv:
try:
if(arg[:2] == '--'):
x = arg[2:].split('=')
if(x[0] in argType):
config[x[0]] = argType[x[0]](x[1])
else:
config[x[0]] = str(x[1])
else:
assert arg[0] == '-'
config['pipeline'].append(arg[1:])
except:
traceback.print_exc()
vbPrint('Bad Argument: %s'%(arg))
def __listdir(directory:str,extensions:list)->list: #list files with specified extensions (filter for tif/png/jpeg etc)
"""Returns a list of files that match the given extensions .
Args:
directory (str): [description]
extensions (list): [description]
Returns:
list: [description]
"""
Files = os.listdir(directory) #list all files
files = []
for file in Files:
if (file.lower().rsplit('.',1)[1] in extensions) or ('all' in extensions): #find extension and check membership in requested
files.append(file)
return files #return file names that match requested extensions
def __make_folders(rootfolder:str,subfolder:str)->None:
"""Create the directories in the specified rootfolder and subfolder .
Args:
rootfolder (str): [description]
subfolder (str): [description]
"""
## Making the dirs
if(os.path.isdir(rootfolder)): #Make/check for Root directory
vbPrint('Found dir: %s'%(rootfolder))
else:
vbPrint('Making dir: %s'%(rootfolder))
os.mkdir(rootfolder)
if(os.path.isdir(subfolder)): #Make/check for world subfolder
vbPrint('Found dir: %s'%(subfolder))
else:
vbPrint('Making dir: %s'%(subfolder))
os.mkdir(subfolder)
def __getWindow(window_config:str): #Called in genImgPatches()
"""Parses window_config to get height and width as integers
Args:
window_config (str): string of window height and width. Example: '5000,5000'
Outputs:
window (dict): dictionary containing ax:dim pairs
"""
dims = window_config.split(',',1)
axis = ('width','height')
window = {ax:int(dim) for (ax,dim) in zip(axis,dims)}
return window
def __time_this(func): #Currently used on loadFiles & genImgPatches
"""CPU Time of a function execution.
Args:
func ([type]): [description]
"""
def __wrapper(*args,**kwargs):
start = time.perf_counter()
result = func(*args,**kwargs)
end = time.perf_counter()
delta = end-start
ms = round((delta%1)*1000,2)
ty_res = time.gmtime(delta)
res = f"{time.strftime('h:%H m:%M s:%S',ty_res)} ms:{ms}"
vbPrint(f'{func.__name__} took: {res}')
if result is not None:
return result
return __wrapper |
class NavigationHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/") and len(wd.find_elements_by_name("Select all")) > 0):
wd.get(self.app.base_url)
|
###############################################################################
# url.py #
# Copyright Human
# Author ASC_8384
# Mail ASC_8384@foxmail.com
# Website http://ASC8384.top
# FileName url.py
# Description 编码转换: 把某些网站上的乱码转为我的人所可理解的文字
# HomepageURL https://github.com/ASC8384/MyCodeSnippets
# License CC0
# Time 2018-07-15
###############################################################################
import urllib.parse
import os
def change(sourse, charset):
if charset is True:
ret = urllib.parse.unquote(sourse, 'utf-8')
else:
ret = urllib.parse.unquote(sourse, 'gbk')
return ret
def init():
name = input()
if name == "q" or name == "Q":
return 0
elif name == "g" or name == "G":
return 8
elif name == 'u' or name == 't' or name == 'f':
return 7
elif name == "z" or name == "Z" or name == 'b' or name == 'B':
return 6
else:
return name
if __name__ == "__main__":
active = True
char = True
backupa = ''
backupb = ''
while (active):
putin = init()
if putin == 0:
active = False
elif putin == 8:
char = False
elif putin == 7:
char = True
elif putin == 6:
os.rename(backupa, backupb)
print('撤消成功!')
else:
backupb = putin[:]
backupa = change(putin, char)
os.rename(putin, backupa)
print(backupa)
|
#Source code with the blog post at http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/
import numpy as np
import random
from random import shuffle
import tensorflow as tf
# from tensorflow.models.rnn import rnn_cell
# from tensorflow.models.rnn import rnn
NUM_EXAMPLES = 10000
train_input = ['{0:020b}'.format(i) for i in range(2**20)]
shuffle(train_input)
train_input = [map(int,i) for i in train_input]
ti = []
for i in train_input:
temp_list = []
for j in i:
temp_list.append([j])
ti.append(np.array(temp_list))
train_input = ti
train_output = []
for i in train_input:
count = 0
for j in i:
if j[0] == 1:
count+=1
temp_list = ([0]*21)
temp_list[count]=1
train_output.append(temp_list)
test_input = train_input[NUM_EXAMPLES:]
test_output = train_output[NUM_EXAMPLES:]
train_input = train_input[:NUM_EXAMPLES]
train_output = train_output[:NUM_EXAMPLES]
print("test and training data loaded")
data = tf.placeholder(tf.float32, [None, 20,1]) #Number of examples, number of input, dimension of each input
target = tf.placeholder(tf.float32, [None, 21])
num_hidden = 24
cell = tf.nn.rnn_cell.LSTMCell(num_hidden,state_is_tuple=True)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction,1e-10,1.0)))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
batch_size = 1000
no_of_batches = int(len(train_input) / batch_size)
epoch = 5000
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp, out = train_input[ptr:ptr+batch_size], train_output[ptr:ptr+batch_size]
ptr+=batch_size
sess.run(minimize,{data: inp, target: out})
print("Epoch ",str(i))
incorrect = sess.run(error,{data: test_input, target: test_output})
print(sess.run(prediction,{data: [[[1],[0],[0],[1],[1],[0],[1],[1],[1],[0],[1],[0],[0],[1],[1],[0],[1],[1],[1],[0]]]}))
print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
sess.close() |
from flask import Blueprint, request, jsonify
from models.models import (
RelUserFavProject,
RelUserInProject,
db,
UserModel,
RelUserLanguage,
RelUserTopic,
)
user_api = Blueprint("user_api", __name__)
@user_api.route("/getUserData", methods=("GET",))
def getUserData():
username = request.args.get("username")
error = None
if not username:
error = "Missing Data"
if UserModel.query.filter_by(username=username).first() is None:
error = "Username does not exist"
if error is None:
response = UserModel.query.filter_by(username=username).first()
userDic = response.serialize()
userData = {
"username": userDic["username"],
"name": userDic["name"],
"email": userDic["email"],
"github": userDic["github"],
}
response = RelUserLanguage.query.filter_by(username=username).all()
languages = []
for item in response:
languages.append({"id": item.id, "name": item.langName})
userData["languages"] = languages
response = RelUserTopic.query.filter_by(username=username).all()
topics = []
for item in response:
topics.append({"id": item.id, "name": item.topicName})
userData["topics"] = topics
return jsonify({"userData": userData}), 200
else:
return jsonify({"status": "bad", "error": error, "username": username}), 400
@user_api.route("/deleteUser", methods=("DELETE",))
def deleteUser():
body = request.get_json()
username = str(body["username"])
error = None
if not username:
error = "Missing Data"
user = UserModel.query.filter_by(username=username).first()
if user is None:
error = f"User {username} does not exist"
if error is None:
RelUserInProject.query.filter_by(username=username).delete()
RelUserFavProject.query.filter_by(username=username).delete()
RelUserLanguage.query.filter_by(username=username).delete()
RelUserTopic.query.filter_by(username=username).delete()
UserModel.query.filter_by(username=username).delete()
db.session.commit()
message = f"User {username} deleted"
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
|
# File: daterangepicker/forms.py
from django.forms import ModelForm
from daterangepicker import utils
from daterangepicker.utils import DATETIME_INPUT_FORMAT, time_range_generator
from daterangepicker.fields import DateTimeRangeField
__all__ = [
"TimeRangedModelForm",
"utils",
"DATETIME_INPUT_FORMAT",
"time_range_generator",
]
class TimeRangedModelForm(ModelForm):
time_range = DateTimeRangeField()
class Meta:
# Since time_range isn't actually a field in the model, exclude it from
# being saved into the new model instance.
exclude = [
"time_range",
]
def __init__(self, *args, **kwargs):
super(TimeRangedModelForm, self).__init__(*args, **kwargs)
# Get rid of time_start and time_end fields, which should still have
# been in the subclass's Meta.fields list.
self.fields.pop("time_start", None)
self.fields.pop("time_end", None)
time_start = self.initial.pop("time_start", None)
time_end = self.initial.pop("time_end", None)
# Set a default time_range if it was not already provided.
self.initial.setdefault("time_range", (time_start, time_end))
def save(self, commit=True):
"""
Extend saving such that time_start and time_end values are manually
set in the model instance.
"""
super(TimeRangedModelForm, self).save(commit=False)
time_start, time_end = self.cleaned_data["time_range"]
self.instance.time_start = time_start
self.instance.time_end = time_end
if commit:
self.instance.save()
self.save_m2m()
return self.instance
|
#! /usr/bin/env python
import base64
import os
import sys
import subprocess
import StringIO
if len(sys.argv)<3:
print >> sys.stderr, "syntax: copy-opencloud <srcfn> <desthost:destfn>"
sys.exit(-1)
srcfn = sys.argv[1]
dest = sys.argv[2]
if not ":" in dest:
print >> sys.stderr, "malformed desthost:destfn"
sys.exit(-1)
(hostname,destfn) = dest.split(":",1)
if destfn.endswith("/"):
destfn = destfn + os.path.basename(srcfn)
enctext = base64.b64encode(open(srcfn).read())
#script = 'sudo bash -C "base64 -d -i > %s <<EOF\n%s\nEOF\n"' % (destfn, enctext)
script = 'base64 -d -i > %s <<EOF\n%s\nEOF\n' % (destfn, enctext)
file("/tmp/script","w").write(script)
p = subprocess.Popen(["ssh", "-A", hostname], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print p.communicate(input=script)[0]
"""
SRCPATHNAME=$1
DESTHOSTNAME=$2
DESTPATHNAME=$3
echo "base64 -d -i > $DESTPATHNAME <<EOF" > /tmp/ssh-up
base64 $SRCPATHNAME >> /tmp/ssh-up
echo "EOF" >> /tmp/ssh-up
ssh -A $DESTHOSTNAME < /tmp/ssh-up
"""
|
from django.views.generic import TemplateView
from .calc_modules.minimum_blank_size import MBSCalculate
from .calc_modules.back_vertex_power import BVPCalculate
from .calc_modules.cylinder_transpose import CTCalculate
from .calc_modules.mean_ocular_perfusion_pressure import MOPPCalculate
class IndexView(TemplateView):
template_name = "calculator/index.html"
class MinimumBlankSizeView(TemplateView):
template_name = "calculator/minimum_blank_size.html"
class BackVertexPowerView(TemplateView):
template_name = "calculator/back_vertex_power.html"
class CylinderTranposeView(TemplateView):
template_name = "calculator/cylinder_transpose.html"
class MeanOcularPerfusionPressureView(TemplateView):
template_name = "calculator/mean_ocular_perfusion_pressure.html"
|
# -*- coding: utf-8 -*-
__author__ = ["chrisholder"]
from typing import Any
import numpy as np
from numba import njit
from sktime.distances.base import DistanceCallable, NumbaDistance
class _SquaredDistance(NumbaDistance):
"""Squared distance between two time series."""
def _distance_factory(
self, x: np.ndarray, y: np.ndarray, **kwargs: Any
) -> DistanceCallable:
"""Create a no_python compiled Squared distance callable.
Parameters
----------
x: np.ndarray (1d or 2d array)
First timeseries.
y: np.ndarray (1d or 2d array)
Second timeseries.
kwargs: Any
Extra kwargs. For squared there are none however, this is kept for
consistency.
Returns
-------
Callable[[np.ndarray, np.ndarray], float]
No_python compiled Squared distance callable.
"""
return _numba_squared_distance
@njit(cache=True, fastmath=True)
def _numba_squared_distance(x: np.ndarray, y: np.ndarray) -> float:
"""Squared distance compiled to no_python.
Parameters
----------
x: np.ndarray (2d array)
First time series.
y: np.ndarray (2d array)
Second time series.
Returns
-------
distance: float
Squared distance between the x and y.
"""
dist = 0.0
for i in range(x.shape[0]):
dist += _local_squared_distance(x[i], y[i])
return dist
@njit(cache=True, fastmath=True)
def _local_squared_distance(x: np.ndarray, y: np.ndarray):
"""Compute the local squared distance.
Parameters
----------
x: np.ndarray (1d array)
First time series
y: np.ndarray (1d array)
Second time series
Returns
-------
float
Squared distance between the two time series
"""
distance = 0.0
for i in range(x.shape[0]):
difference = x[i] - y[i]
distance += difference * difference
return distance
|
#!/usr/bin/env python
import argparse
from flask import Flask, jsonify, request, abort
from dispatch import Dispatcher
app = Flask(__name__)
PROFILE = None
@app.route('/')
def ok():
"""
Verify the system is running.
"""
return jsonify({"response": "200 OK"}), 200
@app.errorhandler(400)
def bad_request(e):
"""
Respond to a malformed OpenC2 command.
"""
return jsonify({"response": "400 Bad Request"}), 400
@app.errorhandler(500)
def internal_server_error(e):
"""
Uncaught proxy error
"""
return jsonify({"response": "500 Internal Server Error"}), 500
@app.route('/', methods=['POST'])
def recieve():
"""
Recieve an OpenC2 command, process and return response.
All OpenC2 commands should be application/json over HTTP POST
"""
if not request.json:
abort(400)
response = PROFILE.dispatch(request.get_json())
return jsonify(response), 200
def main():
"""
Parse configuration and start flask app.
WARNING: only a single profile file is supported at this time
"""
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9001,
help="port to listen on (default=9001)")
parser.add_argument('profiles', nargs='+',
help="full path to OpenC2 profile")
args = parser.parse_args()
global PROFILE
PROFILE = Dispatcher(*args.profiles)
app.run(port=args.port)
if __name__ == "__main__":
main()
|
from inferelator import utils
from inferelator import workflow
from inferelator.distributed.inferelator_mp import MPControl
from inferelator.preprocessing import single_cell
from inferelator.regression.bbsr_multitask import BBSRByTaskRegressionWorkflow
N_CORES = 100
INPUT_DIR = '/mnt/ceph/users/cjackson/inferelator/data/yeast'
OUTPUT_DIR = '/mnt/ceph/users/cjackson/inferelator/v031/'
CONDA_ACTIVATE_PATH = '~/.local/anaconda3/bin/activate'
YEASTRACT_PRIOR = "YEASTRACT_20190713_BOTH.tsv"
TF_NAMES = "tf_names_gold_standard.txt"
YEASTRACT_TF_NAMES = "tf_names_yeastract.txt"
def start_mpcontrol_dask(n_cores=N_CORES):
utils.Debug.set_verbose_level(1)
MPControl.set_multiprocess_engine("dask-cluster")
MPControl.client.minimum_cores = n_cores
MPControl.client.maximum_cores = n_cores
MPControl.client.walltime = '48:00:00'
MPControl.client.add_worker_env_line('module load slurm')
MPControl.client.add_worker_env_line('module load gcc/8.3.0')
MPControl.client.add_worker_env_line('source ' + CONDA_ACTIVATE_PATH)
MPControl.client.cluster_controller_options.append("-p ccb")
MPControl.connect()
if __name__ == '__main__':
start_mpcontrol_dask(100)
for seed in range(42,52):
worker = workflow.inferelator_workflow(regression=BBSRByTaskRegressionWorkflow, workflow="amusr")
worker.set_file_paths(input_dir=INPUT_DIR, output_dir=OUTPUT_DIR, gold_standard_file="gold_standard.tsv",
gene_metadata_file="orfs.tsv", priors_file=YEASTRACT_PRIOR,
tf_names_file=YEASTRACT_TF_NAMES)
worker.set_file_properties(gene_list_index="SystematicName")
worker.set_task_filters(target_expression_filter="union", regulator_expression_filter="intersection")
worker.set_run_parameters(num_bootstraps=5, random_seed=seed)
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True, cv_split_ratio=0.2)
worker.append_to_path('output_dir', "hybrid_bbsr_mtl_seed_" + str(seed))
# Jackson single cell task
task1 = worker.create_task(task_name="Jackson_2019",
expression_matrix_file="103118_SS_Data.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['Genotype', 'Genotype_Group', 'Replicate', 'Condition',
'tenXBarcode'],
workflow_type="single-cell",
count_minimum=0.05)
task1.add_preprocess_step(single_cell.log2_data)
# Calico data task
task2 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_log2.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task3 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.run()
del worker
|
#! /usr/bin/env python
# run this scritp along with _06Briding_openCV_ROS.py from this package
import rospy, cv2, time
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
# CONSTANTS
NODE_NAME = "image_publisher"
IMAGE_TOPIC = "image"
# GLOBALS
image_pub = None
bridge = CvBridge()
def main():
rospy.init_node(NODE_NAME)
global image_pub
image_pub = rospy.Publisher(IMAGE_TOPIC, Image, queue_size=1)
# attach the webcam's video feed to image_feed object
image_feed = cv2.VideoCapture(0)
frame_rate = rospy.Rate(30)
while not rospy.is_shutdown():
ret, frame = image_feed.read()
try:
# making sure if we have read frame successfully
if frame.any():
frame = bridge.cv2_to_imgmsg(frame[:, -1::-1, :], encoding="bgr8")
# mirroring the frame
except CvBridgeError as e:
print(e)
image_pub.publish(frame)
frame_rate.sleep()
image_feed.release()
if __name__ == "__main__":
main() |
# Generated by Django 3.0.2 on 2020-02-13 14:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shortcode', '0024_auto_20200213_0846'),
]
operations = [
migrations.RenameField(
model_name='adminsetting',
old_name='export_url',
new_name='ftp_export_path',
),
migrations.RemoveField(
model_name='adminsetting',
name='ftp_port',
),
migrations.AddField(
model_name='usersetting',
name='active_column_profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shortcode.ColumnProfile', verbose_name='Active Column Profile'),
),
migrations.AlterField(
model_name='adminsetting',
name='export_method',
field=models.IntegerField(choices=[(0, 'File'), (1, 'Ftp')], default=0, verbose_name='Export Method'),
),
migrations.AlterField(
model_name='adminsetting',
name='ftp_host',
field=models.CharField(blank=True, default='', max_length=127, verbose_name='FTP Host'),
),
]
|
import numpy as np
import tensorflow as tf
from ..common.utils import logger
"""
Simple time-series modeling with custom RNN
Some code motivated by:
Hands-On Machine Learning with Scikit-Learn and TensorFlow by Aurelien Geron
https://machinelearningmastery.com/time-train_series-forecasting-long-short-term-memory-network-python/
https://r2rt.com/recurrent-neural-networks-in-tensorflow-i.html
"""
class TsRNNCustom(object):
"""
Does not use any RNN cells. Simply approximates what goes on inside the RNN cells.
The loss is only based on the final output, and not the intermediate time outputs.
NOTE(s):
(1) Should consider this for only lag-1 time series although the API suports more than 1 lags.
"""
def __init__(self, n_lags, state_size, n_epochs=1, batch_size=-1, learning_rate=0.01, l2_penalty=0.001):
self.state_size = state_size
self.n_epochs = n_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.l2_penalty = l2_penalty
self.n_lags = n_lags
self.n_inputs = None
self.init_state = None
self.X = None
self.Y = None
self.err_loss = None
self.training_op = None
self.predict_op = None
self.session = None
def rnn_cell(self, rnn_input, hidden_state):
with tf.variable_scope('rnn_cell', reuse=True):
W = tf.get_variable('W', shape=[self.state_size, self.state_size], dtype=np.float32)
b = tf.get_variable('b', shape=[1, self.state_size], dtype=np.float32,
initializer=tf.constant_initializer(0.0))
U = tf.get_variable('U', shape=[self.n_inputs, self.state_size], dtype=np.float32)
c = tf.get_variable('c', shape=[1, self.n_inputs], dtype=np.float32,
initializer=tf.constant_initializer(0.0))
V = tf.get_variable('V', shape=[self.state_size, self.n_inputs], dtype=np.float32)
new_state = tf.tanh(tf.matmul(hidden_state, W) + tf.matmul(rnn_input, U) + b)
# output = tf.matmul(new_state, V) + c
return new_state # , output
def fit(self, ts):
n_data = ts.series_len
self.n_inputs = ts.dim
batch_size = n_data if self.batch_size < 0 else self.batch_size
logger.debug("n_inputs: %d, state_size: %d, n_lag: %d; batch_size: %d" %
(self.n_inputs, self.state_size, self.n_lags, batch_size))
tf.set_random_seed(42)
self.init_state = tf.placeholder(tf.float32, shape=(None, self.state_size))
self.X = tf.placeholder(tf.float32, shape=(None, self.n_lags, self.n_inputs))
self.Y = tf.placeholder(tf.float32, shape=(None, self.n_inputs))
# rnn_inputs is a list of n_lag tensors with shape [batch_size, n_inputs]
rnn_inputs = tf.unstack(self.X, axis=1)
with tf.variable_scope('rnn_cell'):
W = tf.get_variable('W', shape=[self.state_size, self.state_size], dtype=np.float32)
b = tf.get_variable('b', shape=[1, self.state_size], dtype=np.float32,
initializer=tf.constant_initializer(0.0))
U = tf.get_variable('U', shape=[self.n_inputs, self.state_size], dtype=np.float32)
c = tf.get_variable('c', shape=[1, self.n_inputs], dtype=np.float32,
initializer=tf.constant_initializer(0.0))
V = tf.get_variable('V', shape=[self.state_size, self.n_inputs], dtype=np.float32)
state = self.init_state
for rnn_input in rnn_inputs:
state = self.rnn_cell(rnn_input, state)
final_y = tf.matmul(state, V) + c
self.predict_op = final_y
with tf.name_scope("loss"):
self.err_loss = tf.reduce_mean(tf.square(final_y - self.Y))
if self.l2_penalty > 0:
l2_loss = self.l2_penalty * (tf.nn.l2_loss(U) + tf.nn.l2_loss(V) + tf.nn.l2_loss(W))
reg_loss = tf.add(self.err_loss, l2_loss, name="l2loss")
else:
reg_loss = self.err_loss
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.training_op = optimizer.minimize(reg_loss)
return self.train(ts)
def train(self, ts):
x_train = y_train = None
for x_train, y_train in ts.get_batches(self.n_lags, self.batch_size, single_output_only=True):
pass
z_train = np.zeros(shape=(x_train.shape[0], self.state_size))
zero_state = np.zeros(shape=(self.batch_size, self.state_size))
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
for epoch in range(self.n_epochs):
for i, batch in enumerate(ts.get_batches(self.n_lags, self.batch_size, single_output_only=True)):
x, y = batch
self.session.run([self.training_op],
feed_dict={self.X: x, self.Y: y,
self.init_state: zero_state[0:x.shape[0], :]})
mse = self.session.run(self.err_loss,
feed_dict={self.X: x_train, self.Y: y_train,
self.init_state: z_train})
logger.debug("epoch: %d, mse: %f" % (epoch, mse))
def predict(self, start_ts, n_preds=1, true_preds=None):
"""
Predict with fixed model.
NOTE: Assumes that each time input is one-dimensional.
"""
if self.n_inputs != 1:
raise ValueError("Currently only supports univariate input per time-step")
seq = list(np.reshape(start_ts, newshape=(-1,)))
logger.debug("seq: %s" % str(seq))
preds = list()
init_state = np.zeros(shape=(1, self.state_size))
for i in range(n_preds):
ts = seq[-self.n_lags:]
X_batch = np.array(ts).reshape(1, self.n_lags, self.n_inputs)
yhat = self.session.run(self.predict_op,
feed_dict={self.X: X_batch,
self.init_state: init_state})
logger.debug("pred: %d %s" % (i, str(yhat)))
preds.append(yhat[0, 0])
if true_preds is not None:
seq.append(true_preds[i])
else:
seq.append(yhat[0, 0])
return np.array(preds)
|
"""
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing the public function of the magni.cs.phase_transition
subpackage.
"""
from __future__ import division
import os
import re
import types
from magni.cs.phase_transition import _analysis
from magni.cs.phase_transition import _simulation
from magni.utils.multiprocessing import File as _File
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_numeric as _numeric
def determine(algorithm, path, label='default', overwrite=False,
pre_simulation_hook=None):
"""
Determine the phase transition of a reconstruction algorithm.
The phase transition is determined from a number of monte carlo simulations
on a delta-rho grid for a given problem suite.
Parameters
----------
algorithm : function
A function handle to the reconstruction algorithm.
path : str
The path of the HDF5 database where the results should be stored.
label : str
The label assigned to the phase transition (the default is 'default').
overwrite : bool
A flag indicating if an existing phase transition should be overwritten
if it has the same path and label (the default is False).
pre_simulation_hook : callable
A handle to a callable which should be run *just* before the call to
the reconstruction algorithm (the default is None, which implies that
no pre hook is run).
See Also
--------
magni.cs.phase_transition._simulation.run : The actual simulation.
magni.cs.phase_transition._analysis.run : The actual phase determination.
Notes
-----
The `pre_simulation_hook` may be used to setup the simulation to match the
specfic simulation parameters, e.g. if an oracle estimator is used in the
reconstruction algorithm. The `pre_simulation_hook` takes one argument
which is the locals() dict.
Examples
--------
An example of how to use this function is provided in the `examples` folder
in the `cs-phase_transition.ipynb` ipython notebook file.
"""
@_decorate_validation
def validate_input():
_generic('algorithm', 'function')
_generic('path', 'string')
_generic('label', 'string')
# regular expression matching invalid characters
match = re.search(r'[^a-zA-Z0-9 ,.\-_/]', label)
if match is not None:
msg = 'The value of >>label<<, {!r}, may not contain {!r}.'
raise RuntimeError(msg.format(label, match.group()))
# regular expression matching labels without empty path components
match = re.search(r'^([^/]+/)*[^/]+$', label)
if match is None:
msg = "The value of >>label<<, {!r}, may not contain '' folders."
raise RuntimeError(msg.format(label))
_numeric('overwrite', 'boolean')
if (pre_simulation_hook is not None and
not callable(pre_simulation_hook)):
raise RuntimeError('The >>pre_simulation_hook<< must be callable')
validate_input()
if os.path.isfile(path):
with _File(path, 'a') as f:
if '/' + label in f:
if overwrite:
f.remove_node('/' + label, recursive=True)
else:
raise IOError("{!r} already uses the label {!r}."
.format(path, label))
_simulation.run(algorithm, path, label,
pre_simulation_hook=pre_simulation_hook)
_analysis.run(path, label)
|
import weakref
from pyqtgraph import ROI, PolyLineROI, Point
from pyqtgraph.graphicsItems.ROI import Handle, RectROI, LineROI
from qtpy.QtCore import QRectF, QPointF, Qt, Signal, QSize
from qtpy.QtGui import QColor, QPainter, QPainterPath, QBrush, QPainterPathStroker, QCursor
from qtpy.QtWidgets import QAction, QVBoxLayout, QWidget, QMenu
import numpy as np
from itertools import count
from xicam.plugins import OperationPlugin
from pyqtgraph.parametertree import Parameter, parameterTypes, ParameterTree
import pyqtgraph as pg
from xicam.plugins.operationplugin import operation, output_names
class ROIOperation(OperationPlugin):
"""Single point of entry for one or more ROIs, generates a label array."""
name = 'ROI'
output_names = ('roi', 'labels')
input_names = ('images', 'image_item', 'rois')
def __init__(self, *rois: ROI):
super(ROIOperation, self).__init__()
self._param = None # type: Parameter
self.name = "ROI" #f"ROI #{self.ROI.index}"
def _func(self, images, image_item=None, rois=None):
# Create zeros label array to insert new labels into (if multiple ROIs)
label_array = np.zeros(images[0].shape)
roi_masks = []
for roi in rois:
# TODO: Should label array be astype(np.int) (instead of float)?
label = roi.getLabelArray(images, image_item)
# Store the boolean mask of each label array
roi_mask = label.astype(np.bool)
roi_masks.append(roi_mask)
# Grab the current label array maximum value (so we can increment multiple labels accordingly)
label_array_max = label_array.max()
label = np.where(label > 0, label + label_array_max, label)
# For single roi, our max will be 0 (since label_array is just np.zeros so far, hasn't been modified)
if label_array_max == 0:
label_array = label
print(f"{label_array_max + 1}: {(label_array == 1).sum()}")
print()
else:
# FIXME right now, if labels overlap, label integers are being added together (into a new label value)
# Adjust any currently non-masked areas with the new label
label_array = np.where(label_array == 0, label, label_array)
print(f"{1}: {(label_array == 1).sum()}")
print(f"{2}: {(label_array == 2).sum()}")
print()
#
label_array = np.where(label_array > 0,
np.where(label > 0, label, label_array),
label_array)
# label_array = np.where(label_array > 0,
# label or label_array,
# label_array)
print(f"1: {(label_array == 1).sum()}")
print(f"2: {(label_array == 2).sum()}")
print()
return roi_masks, label_array
# TODO: might need this for adjusting roi's manually
# @property
# def parameter(self):
# if not self._param:
# self._param = self.roi.parameter()
# return self._param
class WorkflowableROI(ROI):
# FIXME: do we still want this for our (e.g.) CorrelationStage process_actions???
def __init__(self, *args, **kwargs):
super(WorkflowableROI, self).__init__(*args, **kwargs)
self.operation = ROIOperation(self)
self._param = None
def parameter(self) -> Parameter:
raise NotImplementedError
def getMenu(self):
if self.menu is None:
self.menu = QMenu()
self.menu.setTitle("ROI")
if self.removable: # FIXME: if the removable attr is changed, the menu will not react and remAct won't show
remAct = QAction("Remove ROI", self.menu)
remAct.triggered.connect(self.removeClicked)
self.menu.addAction(remAct)
self.menu.remAct = remAct
editAct = QAction("Edit ROI", self.menu)
editAct.triggered.connect(self.edit_parameters)
self.menu.addAction(editAct)
self.menu.editAct = editAct
self.menu.setEnabled(True)
return self.menu
def contextMenuEnabled(self):
return True
def edit_parameters(self):
class DefocusParameterTree(QWidget):
def __init__(self, *args, **kwargs):
super(DefocusParameterTree, self).__init__(*args, **kwargs)
self.setLayout(QVBoxLayout())
self.parameter_tree = ParameterTree()
self.layout().addWidget(self.parameter_tree)
self.layout().setContentsMargins(0, 0, 0, 0)
def setParameters(self, *args, **kwargs):
self.parameter_tree.setParameters(*args, **kwargs)
# self.parameter_tree = DefocusParameterTree()
self.parameter_tree = DefocusParameterTree()
self.parameter_tree.setParameters(self.parameter())
self.parameter_tree.setWindowFlags(Qt.FramelessWindowHint | Qt.Popup)
# self.parameter_tree = QLabel('blah')
self.parameter_tree.show()
self.parameter_tree.activateWindow()
self.parameter_tree.raise_()
self.parameter_tree.move(QCursor().pos())
self.parameter_tree.setFocus(Qt.PopupFocusReason)
self.parameter_tree.resize(QSize(300, 400))
# MIXIN!~
# Now with 100% more ROI!
class BetterROI(WorkflowableROI):
roi_count = count(1)
index = None
def __init__(self, *args, **kwargs):
super(BetterROI, self).__init__(*args, **kwargs)
# # BetterROI removable by default
# super(BetterROI, self).__init__(*args, removable=removable, **kwargs)
self.index = next(self.roi_count)
self._restyle()
# Remove the roi from the view when requested to be removed
self.sigRemoveRequested.connect(lambda roi: self._viewBox().removeItem(roi))
self._name = "ROI"
def __str__(self):
return f"ROI #{self.index} ({self._name})"
def _restyle(self):
self.currentPen.setWidth(2)
for handledict in self.handles: # type: dict
handle = handledict["item"] # type: Handle
handle.radius = handle.radius * 2
handle.pen.setWidth(2)
handle.buildPath()
def hoverEvent(self, ev):
hover = False
if not ev.isExit():
if ev.acceptDrags(Qt.LeftButton):
hover = True
for btn in [Qt.LeftButton, Qt.RightButton, Qt.MidButton]:
if int(self.acceptedMouseButtons() & btn) > 0 and ev.acceptClicks(btn):
hover = True
if hover:
self.currentPen = pg.mkPen(255, 255, 0, width=2)
else:
self.currentPen = self.pen
self.update()
def valueChanged(self, sender, changes):
for change in changes:
setattr(self, change[0].name(), change[2])
self.stateChanged()
class BetterPolyLineROI(BetterROI, PolyLineROI):
def __repr__(self):
return f"ROI #{self.index}"
class QCircRectF(QRectF):
def __init__(self, center=(0.0, 0.0), radius=1.0, rect=None):
self._scale = 1.0
if rect is not None:
self.center = rect.center()
super(QCircRectF, self).__init__(rect)
else:
self.center = QPointF(*center)
left = self.center.x() - radius
top = self.center.y() - radius
bottom = self.center.y() + radius
right = self.center.x() + radius
super(QCircRectF, self).__init__(QPointF(left, top), QPointF(right, bottom))
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
self.radius *= value
self.setLeft(self.center.x() - self._radius)
self.setTop(self.center.y() - self._radius)
self.setBottom(self.center.y() + self._radius)
self.setRight(self.center.x() + self._radius)
@property
def radius(self):
return (self.right() - self.left()) * 0.5
@radius.setter
def radius(self, radius):
self.setLeft(self.center.x() - radius)
self.setTop(self.center.y() - radius)
self.setBottom(self.center.y() + radius)
self.setRight(self.center.x() + radius)
class BetterCrosshairROI(BetterROI):
sigMoved = Signal(object)
"""A crosshair ROI whose position is at the center of the crosshairs. By default, it is scalable, rotatable and translatable."""
def __init__(self, pos=None, size=None, parent=None, **kwargs):
assert parent
if size == None:
size = [0, 0]
if pos == None:
pos = [0, 0]
self._shape = None
linepen = pg.mkPen("#FFA500", width=2)
self._vline = pg.InfiniteLine((0, 0), angle=90, movable=False, pen=linepen)
self._hline = pg.InfiniteLine((0, 0), angle=0, movable=False, pen=linepen)
super(BetterCrosshairROI, self).__init__(pos, size, parent=parent, **kwargs)
parent.addItem(self)
self.sigRegionChanged.connect(self.invalidate)
self.addTranslateHandle(Point(0, 0))
self.aspectLocked = True
parent.addItem(self._vline)
parent.getViewBox().addItem(self._hline)
self._name = "Crosshair ROI"
def translate(self, *args, **kwargs):
super(BetterCrosshairROI, self).translate(*args, **kwargs)
self.sigMoved.emit(self.pos())
def stateChanged(self, finish=True):
super(BetterCrosshairROI, self).stateChanged()
self._hline.setPos(self.pos().y())
self._vline.setPos(self.pos().x())
def invalidate(self):
self._shape = None
self.prepareGeometryChange()
def boundingRect(self):
return self.shape().boundingRect()
def shape(self):
if self._shape is None:
radius = self.getState()['size'][1]
p = QPainterPath()
p.moveTo(Point(0, -radius))
p.lineTo(Point(0, radius))
p.moveTo(Point(-radius, 0))
p.lineTo(Point(radius, 0))
p = self.mapToDevice(p)
stroker = QPainterPathStroker()
stroker.setWidth(10)
outline = stroker.createStroke(p)
self._shape = self.mapFromDevice(outline)
return self._shape
class ArcROI(BetterROI):
"""
A washer-wedge-shaped ROI for selecting q-ranges
"""
def __init__(self, pos, radius, **kwargs):
# QtGui.QGraphicsRectItem.__init__(self, 0, 0, size[0], size[1])
r = QCircRectF(pos, radius)
super(ArcROI, self).__init__(r.center, radius, **kwargs)
# self.addRotateHandle([1.0, 0.5], [0.5, 0.5])
# self.addScaleHandle([0.5*2.**-0.5 + 0.5, 0.5*2.**-0.5 + 0.5], [0.5, 0.5])
self.startangle = 30
self.arclength = 120
self.radius_name = 'Radius'
self.radius_units = 'px'
self.aspectLocked = True
# only these values are in external space, others are internal (-.5,.5)
self.innerradius = 0.5 * radius
self.outerradius = radius
self.thetawidth = 120.0
self.thetacenter = 90.0
self.innerhandle = self.addFreeHandle([0.0, self.innerradius / self.outerradius], [0, 0])
self.outerhandle = self.addFreeHandle([0.0, 1], [0, 0])
self.widthhandle = self.addFreeHandle(np.array([-0.433 * 2, 0.25 * 2]))
self.path = None
self._param = None # type: Parameter
self._restyle()
self._name = "Arc ROI"
def boundingRect(self):
size = self.outerradius
return QRectF(-size, -size, size * 2, size * 2).normalized()
def movePoint(self, handle, pos, modifiers=Qt.KeyboardModifier(), finish=True, coords="parent"):
super(ArcROI, self).movePoint(handle, pos, modifiers, finish, coords)
self._update_internal_parameters(handle)
def _update_internal_parameters(self, handle=None):
# Set internal parameters
if handle is self.innerhandle:
self.innerradius = min(self.innerhandle.pos().length(), self.outerhandle.pos().length())
elif handle is self.outerhandle:
self.innerradius = self.innerhandle.pos().length()
self.outerradius = self.outerhandle.pos().length()
if handle is self.outerhandle:
self.thetacenter = self.outerhandle.pos().angle(Point(1, 0))
elif handle is self.widthhandle:
self.thetawidth = max(2 * self.widthhandle.pos().angle(self.innerhandle.pos()), 0)
self.handleChanged()
def paint(self, p, opt, widget):
# Enforce constraints on handles
r2 = Point(np.cos(np.radians(self.thetacenter)),
np.sin(np.radians(self.thetacenter))) # chi center direction vector
# constrain innerhandle to be parallel to outerhandle, and shorter than outerhandle
self.innerhandle.setPos(r2 * self.innerradius)
if self.innerhandle.pos().length() > self.outerhandle.pos().length():
self.innerhandle.setPos(r2 * self.outerradius)
# constrain widthhandle to be counter-clockwise from innerhandle
widthangle = np.radians(self.thetawidth / 2 + self.thetacenter)
widthv = Point(np.cos(widthangle), np.sin(widthangle)) if self.thetawidth > 0 else r2
# constrain widthhandle to half way between inner and outerhandles
self.widthhandle.setPos(widthv * (self.innerradius + self.outerradius) / 2)
# constrain handles to base values
self.outerhandle.setPos(r2 * self.outerradius)
pen = self.currentPen
pen.setColor(QColor(0, 255, 255))
pen.setStyle(Qt.SolidLine)
p.setPen(pen)
r = self.boundingRect()
# p.drawRect(r)
p.setRenderHint(QPainter.Antialiasing)
p.scale(r.width(), r.height()) # workaround for GL bug
centerangle = self.innerhandle.pos().angle(Point(1, 0))
startangle = centerangle - self.thetawidth / 2
endangle = centerangle + self.thetawidth / 2
r = QCircRectF(radius=0.5)
if self.innerradius < self.outerradius and self.thetawidth > 0:
p.drawArc(r, -startangle * 16, -self.thetawidth * 16)
radius = self.innerradius / self.outerradius / 2
r = QCircRectF()
r.radius = radius
if self.innerradius < self.outerradius and self.thetawidth > 0:
p.drawArc(r, -startangle * 16, -self.thetawidth * 16)
pen.setStyle(Qt.DashLine)
p.setPen(pen)
p.drawLine(QPointF(0.0, 0.0), self.widthhandle.pos().norm() / 2)
r1v = self.innerhandle.pos().norm()
p.drawLine(QPointF(0.0, 0.0), (-1.0 * self.widthhandle.pos() + 2 * self.widthhandle.pos().dot(r1v) * r1v).norm() / 2)
pen.setStyle(Qt.SolidLine)
if self.innerradius < self.outerradius and self.thetawidth > 0:
path = QPainterPath()
path.moveTo((-1.0 * self.widthhandle.pos() + 2 * self.widthhandle.pos().dot(r1v) * r1v).norm() / 2)
path.arcTo(r, -startangle, -self.thetawidth) # inside
path.lineTo(self.widthhandle.pos().norm() / 2) # ? side
path.arcTo(QCircRectF(radius=0.5), -endangle, self.thetawidth) # outside
path.lineTo((-1.0 * self.widthhandle.pos() + 2 * self.widthhandle.pos().dot(r1v) * r1v).norm() / 2)
self.path = path
p.fillPath(path, QBrush(QColor(0, 255, 255, 20)))
def getArrayRegion(self, arr, img=None):
"""
Return the result of ROI.getArrayRegion() masked by the arc shape
of the ROI. Regions outside the arc are set to 0.
"""
w = arr.shape[-2]
h = arr.shape[-1]
centerangle = self.outerhandle.pos().angle(Point(1, 0))
startangle = centerangle - self.thetawidth / 2
# generate an ellipsoidal mask
mask = np.fromfunction(
lambda y, x: (
self.innerhandle.pos().length() < (
(x - self.pos().y()) ** 2.0 + (y - self.pos().x()) ** 2.0) ** 0.5
)
& (((x - self.pos().y()) ** 2.0 + (
y - self.pos().x()) ** 2.0) ** 0.5 < self.outerhandle.pos().length())
& ((np.degrees(np.arctan2(y - self.pos().x(), x - self.pos().y())) - startangle) % 360 > 0)
& ((np.degrees(
np.arctan2(y - self.pos().x(), x - self.pos().y())) - startangle) % 360 < self.thetawidth),
(w, h),
)
return arr * mask
def getLabelArray(self, arr, img: pg.ImageItem = None):
"""Return a label array (ones and zeros) for the masked array region defined by the ROI."""
masked_arr = self.getArrayRegion(arr, img)
return (masked_arr != 0).astype(np.uint8)
def shape(self):
# (used for hitbox for menu)
centerangle = self.innerhandle.pos().angle(Point(1, 0))
startangle = centerangle - self.thetawidth / 2
endangle = centerangle + self.thetawidth / 2
r1v = self.innerhandle.pos().norm()
# Draw out the path in external space
path = QPainterPath()
path.moveTo(-1.0 * self.widthhandle.pos() + 2 * self.widthhandle.pos().dot(r1v) * r1v)
path.arcTo(QCircRectF(radius=self.innerradius), -startangle, -self.thetawidth) # inside
path.lineTo(self.widthhandle.pos()) # ? side
path.arcTo(QCircRectF(radius=self.outerradius), -endangle, self.thetawidth) # outside
path.lineTo(-1.0 * self.widthhandle.pos() + 2 * self.widthhandle.pos().dot(r1v) * r1v)
return path
def parameter(self):
if not self._param:
self._param = parameterTypes.GroupParameter(
name="Arc ROI",
children=[
parameterTypes.SimpleParameter(
title=f"{self.radius_name} Minimum", name="innerradius", value=self.innerradius, type="float",
units=self.radius_units, min=0
# "Å⁻¹"
),
parameterTypes.SimpleParameter(
title=f"{self.radius_name} Maximum", name="outerradius", value=self.outerradius, type="float",
units=self.radius_units, min=0
),
parameterTypes.SimpleParameter(
title="χ Width", name="thetawidth", value=self.thetawidth, type="float", units="°"
),
parameterTypes.SimpleParameter(
title="χ Center", name="thetacenter", value=self.thetacenter, type="float", siSuffix="°"
),
],
)
self._param.sigTreeStateChanged.connect(self.valueChanged)
return self._param
def handleChanged(self):
self.parameter().child("innerradius").setValue(self.innerradius)
self.parameter().child("outerradius").setValue(self.outerradius)
self.parameter().child("thetawidth").setValue(self.thetawidth)
self.parameter().child("thetacenter").setValue(self.thetacenter)
class ArcQRoi(ArcROI):
...
class SegmentedArcROI(ArcROI):
"""
A washer-wedge-shaped ROI for selecting q-ranges
"""
def __init__(self, pos, radius, **kwargs):
# QtGui.QGraphicsRectItem.__init__(self, 0, 0, size[0], size[1])
self.segments_radial = 3
self.segments_angular = 3
super(SegmentedArcROI, self).__init__(pos, radius, **kwargs)
self._name = "Segmented Arc ROI"
def paint(self, p, opt, widget):
super(SegmentedArcROI, self).paint(p, opt, widget)
pen = self.currentPen
# pen.setColor(QColor(255, 0, 255))
pen.setStyle(Qt.DashLine)
p.setPen(pen)
centerangle = self.innerhandle.pos().angle(Point(1, 0))
startangle = centerangle - self.thetawidth / 2
endangle = centerangle + self.thetawidth / 2
segment_angles = np.linspace(startangle, endangle, self.segments_angular, endpoint=False)[1:]
segment_radii = np.linspace(self.innerradius, self.outerradius, self.segments_radial, endpoint=False)[1:]
r = QCircRectF(radius=0.5)
radius = self.innerradius / self.outerradius / 2
r = QCircRectF()
r.radius = radius
# draw segments
for segment_radius in segment_radii:
r.radius = segment_radius / self.outerradius / 2
# p.drawRect(r)
p.drawArc(r, -startangle * 16, -self.thetawidth * 16)
if self.innerradius < self.outerradius:
for segment_angle in segment_angles:
segment_vector = QPointF(np.cos(np.radians(segment_angle)), np.sin(np.radians(segment_angle)))
p.drawLine(segment_vector * self.innerradius / self.outerradius / 2, segment_vector / 2)
def getLabelArray(self, arr, img: pg.ImageItem = None):
labels = np.zeros(arr.shape[-2:])
centerangle = -self.outerhandle.pos().angle(Point(0, 1))
startangle = centerangle - self.thetawidth / 2
endangle = centerangle + self.thetawidth / 2
radii = np.linspace(self.innerradius, self.outerradius, self.segments_radial + 1, endpoint=True)
angles = np.linspace(startangle, endangle, self.segments_angular + 1, endpoint=True)
start_radii = radii[:-1]
end_radii = radii[1:]
start_angles = angles[:-1]
end_angles = angles[1:]
for i, (start_radius, end_radius) in enumerate(zip(start_radii, end_radii)):
for j, (start_angle, end_angle) in enumerate(zip(start_angles, end_angles)):
# generate an ellipsoidal mask
mask = np.fromfunction(
lambda x, y: (start_radius <= ((x - self.pos().y()) ** 2.0 + (y - self.pos().x()) ** 2.0) ** 0.5)
& (((x - self.pos().y()) ** 2.0 + (y - self.pos().x()) ** 2.0) ** 0.5 <= end_radius)
& ((np.degrees(
np.arctan2(y - self.pos().x(), x - self.pos().y())) - start_angle) % 360 >= 0)
& ((np.degrees(np.arctan2(y - self.pos().x(),
x - self.pos().y())) - start_angle) % 360 <= end_angle - start_angle),
arr.shape[-2:], )
labels[mask] = i * self.segments_radial + j + 1
return labels
def parameter(self):
if not self._param:
self._param = parameterTypes.GroupParameter(
name="Segmented Arc ROI",
children=[
parameterTypes.SimpleParameter(
title="χ Segments", name="segments_angular", value=self.segments_angular, type="int", min=1
),
parameterTypes.SimpleParameter(
title=f"{self.radius_name} segments", name="segments_radial", value=self.segments_radial,
type="int", min=1
),
parameterTypes.SimpleParameter(
title=f"{self.radius_name} Minimum", name="innerradius", value=self.innerradius, type="float",
units=self.radius_units, min=0
),
parameterTypes.SimpleParameter(
title=f"{self.radius_name} Maximum", name="outerradius", value=self.outerradius, type="float",
units=self.radius_units, min=0
),
parameterTypes.SimpleParameter(
title="χ Width", name="thetawidth", value=self.thetawidth, type="float", units="°"
),
parameterTypes.SimpleParameter(
title="χ Center", name="thetacenter", value=self.thetacenter, type="float", siSuffix="°"
),
],
)
self._param.sigTreeStateChanged.connect(self.valueChanged)
return self._param
class BetterRectROI(BetterROI, RectROI):
def __init__(self, *args, pen=pg.mkPen(QColor(0, 255, 255)), **kwargs):
super(BetterRectROI, self).__init__(*args, pen=pen, **kwargs)
self.handle = self.handles[0]
self._name = "Rectangle ROI"
@property
def width(self):
return self.handle["pos"].x() * self.size().x()
@width.setter
def width(self, value):
width = self.handle["pos"].x() * value
size = self.size()
size.setX(width)
self.state['size'] = size
self.stateChanged()
@property
def height(self):
return self.handle["pos"].y() * self.size().y()
@height.setter
def height(self, value):
height = self.handle["pos"].y() * value
size = self.size()
size.setY(height)
self.state['size'] = size
self.stateChanged()
def __reduce__(self):
# FIXME: very simple reduce for allowing copy (to help with weakref management)
return self.__class__, (self.pos(), self.size())
def movePoint(self, handle, pos, modifiers=Qt.KeyboardModifier(), finish=True, coords="parent"):
super(BetterRectROI, self).movePoint(handle, pos, modifiers, finish, coords)
self.width = self.handle["pos"].x() * self.size().x()
self.height = self.handle["pos"].y() * self.size().y()
self.handleChanged()
def parameter(self) -> Parameter:
if not self._param:
self._param = parameterTypes.GroupParameter(
name="Rectangular ROI",
children=[
parameterTypes.SimpleParameter(title="Width", name="width", value=self.width, type="float", units="px"),
parameterTypes.SimpleParameter(
title="Height", name="height", value=self.height, type="float", units="px"
),
],
)
self._param.sigTreeStateChanged.connect(self.valueChanged)
return self._param
def handleChanged(self):
self.parameter().child("width").setValue(self.width)
self.parameter().child("height").setValue(self.height)
def getLabelArray(self, arr, img: pg.ImageItem = None):
# TODO : make more generic for all rectangle ROIs, segmented (multi-labeled) and non-segmented (single-labeled)
dim_0, dim_1 = arr.shape[-2:]
min_x = self.pos().x()
min_y = self.pos().y()
max_x = self.size().x() + min_x
max_y = self.size().y() + min_y
mask = np.zeros(arr.shape[-2:])
label_mask = np.fromfunction(
lambda y, x: (x + 0.5 > min_x) & (x + 0.5 < max_x) & (y + 0.5 > min_y) & (y + 0.5 < max_y), (dim_0, dim_1)
)
mask[label_mask] = 1
# Invert y
# FIXME -- use image transform above with passed image item
return mask
class LineROI(BetterROI, LineROI):
def __init__(self, *args, pen=pg.mkPen(QColor(0, 255, 255)), **kwargs):
super(LineROI, self).__init__(*args, pen=pen, **kwargs)
self._update_state()
self._name = "Line ROI"
def _update_state(self):
self.width = self.size().y()
self.length = self.size().x()
self.rotation = self.angle()
self.center_x = self.pos().x()
self.center_y = self.pos().y()
def movePoint(self, handle, pos, modifiers=Qt.KeyboardModifier(), finish=True, coords="parent"):
super(LineROI, self).movePoint(handle, pos, modifiers, finish, coords)
self._update_state()
self.handleChanged()
def mouseDragEvent(self, ev):
super(LineROI, self).mouseDragEvent(ev)
self._update_state()
def paint(self, p, opt, widget):
self.setSize(QPointF(self.length, self.width))
self.setAngle(self.rotation)
self.setPos(QPointF(self.center_x, self.center_y))
super(LineROI, self).paint(p, opt, widget)
def parameter(self) -> Parameter:
if not self._param:
self._param = parameterTypes.GroupParameter(
name="Line ROI",
children=[
parameterTypes.SimpleParameter(
title="Center X", name="center_x", value=self.center_x, type="float", units="px"
),
parameterTypes.SimpleParameter(
title="Center Y", name="center_y", value=self.center_y, type="float", units="px"
),
parameterTypes.SimpleParameter(
title="Rotation Angle", name="rotation", value=self.rotation, type="float", units="px"
),
parameterTypes.SimpleParameter(
title="Length", name="length", value=self.length, type="float", units="px"
),
parameterTypes.SimpleParameter(title="Width", name="width", value=self.width, type="float", units="px"),
],
)
self._param.sigTreeStateChanged.connect(self.valueChanged)
return self._param
def handleChanged(self):
self.parameter().child("center_x").setValue(self.center_x)
self.parameter().child("center_y").setValue(self.center_y)
self.parameter().child("rotation").setValue(self.rotation)
self.parameter().child("width").setValue(self.width)
self.parameter().child("length").setValue(self.length)
class SegmentedRectROI(BetterRectROI):
def __init__(self, *args, **kwargs):
self.segments_h = 2
self.segments_v = 2
super(SegmentedRectROI, self).__init__(*args, **kwargs)
self._name = "Segmented Rectangle ROI"
def parameter(self) -> Parameter:
if not self._param:
self._param = parameterTypes.GroupParameter(
name="Rectangular ROI",
children=[
parameterTypes.SimpleParameter(title="Width", name="width", value=self.width, type="float", units="px"),
parameterTypes.SimpleParameter(
title="Height", name="height", value=self.height, type="float", units="px"
),
parameterTypes.SimpleParameter(
title="Horizontal Segments", name="segments_h", value=self.segments_h, type="int"
),
parameterTypes.SimpleParameter(
title="Vertical Segments", name="segments_v", value=self.segments_v, type="int"
),
],
)
self._param.sigTreeStateChanged.connect(self.valueChanged)
return self._param
def getLabelArray(self, arr, img=None):
"""
Return the result of ROI.getArrayRegion() masked by the arc shape
of the ROI. Regions outside the arc are set to 0.
"""
w, h = arr.shape
min_x = self.pos().x()
min_y = self.pos().y()
max_x = self.size().x() + min_x
max_y = self.size().y() + min_y
segment_bin_x = (max_x - min_x) / self.segments_h
segment_bin_y = (max_y - min_y) / self.segments_v
mask = np.zeros_like(arr)
for i in range(self.segments_h):
for j in range(self.segments_v):
# generate an square max
label_mask = np.fromfunction(
lambda y, x: (x + 0.5 > min_x + i * segment_bin_x)
& (x + 0.5 < min_x + (i + 1) * segment_bin_x)
& (y + 0.5 > min_y + j * segment_bin_y)
& (y + 0.5 < min_y + (j + 1) * segment_bin_y),
(w, h),
)
mask[label_mask] = 1 + i + j * self.segments_h
return mask
def paint(self, p, opt, widget):
super(SegmentedRectROI, self).paint(p, opt, widget)
min_x = self.pos().x()
min_y = self.pos().y()
max_x = self.size().x() + min_x
max_y = self.size().y() + min_y
segment_bin_x = (max_x - min_x) / self.segments_h
segment_bin_y = (max_y - min_y) / self.segments_v
self.currentPen.setStyle(Qt.DashLine)
p.setPen(self.currentPen)
for i in range(1, self.segments_h):
p.drawLine(QPointF(1.0 / self.segments_h * i, 0), QPointF(1 / self.segments_h * i, 1))
for j in range(1, self.segments_v):
p.drawLine(QPointF(0, 1 / self.segments_v * j), QPointF(1, 1 / self.segments_v * j))
self.currentPen.setStyle(Qt.SolidLine)
if __name__ == "__main__":
from qtpy.QtWidgets import QApplication, QLabel, QVBoxLayout, QAbstractScrollArea
qapp = QApplication([])
import pyqtgraph as pg
pg.setConfigOption('imageAxisOrder', 'row-major')
imageview = pg.ImageView()
data = np.random.random((100, 100))
imageview.setImage(data)
# roi = ArcROI(pos=(50, 50), radius=50)
roi = BetterRectROI(pos=(0, 0), size=(10, 10))
# roi = SegmentedArcROI(pos=(50,50), radius=50)
# roi = BetterCrosshairROI((0, 0), parent=imageview.view)
imageview.view.addItem(roi)
imageview.show()
iv2 = pg.ImageView()
iv2.show()
def show_labels():
iv2.setImage(roi.getLabelArray(data, imageview.imageItem))
roi.sigRegionChanged.connect(show_labels)
qapp.exec_()
|
import os
# scenario name for log file
scenario_name = "20mus"
# material parameters
# --------------------
Conductivity = 3.828 # [mS/cm] sigma, conductivity
Am = 500.0 # [cm^-1] surface area to volume ratio
Cm = 0.58 # [uF/cm^2] membrane capacitance, (1 = fast twitch, 0.58 = slow twitch)
# diffusion prefactor = Conductivity/(Am*Cm)
# timing and activation parameters
# -----------------
# motor units from paper Klotz2019 "Modelling the electrical activity of skeletal muscle tissue using a multi‐domain approach"
import random
random.seed(0) # ensure that random numbers are the same on every rank
import numpy as np
n_motor_units = 20 # number of motor units
motor_units = []
for mu_no in range(n_motor_units):
# capacitance of the membrane
if mu_no <= 0.7*n_motor_units:
cm = 0.58 # slow twitch (type I)
else:
cm = 1.0 # fast twitch (type II)
# fiber radius between 40 and 55 [μm]
min_value = 40
max_value = 55
# ansatz value(i) = c1 + c2*exp(i),
# value(0) = min = c1 + c2 => c1 = min - c2
# value(n-1) = max = min - c2 + c2*exp(n-1) => max = min + c2*(exp(n-1) - 1) => c2 = (max - min) / (exp(n-1) - 1)
c2 = (max_value - min_value) / (1.02**(n_motor_units-1) - 1)
c1 = min_value - c2
radius = c1 + c2*1.02**(mu_no)
# stimulation frequency [Hz] between 24 and 7
min_value = 7
max_value = 24
c2 = (max_value - min_value) / (1.02**(n_motor_units-1) - 1)
c1 = min_value - c2
stimulation_frequency = c1 + c2*1.02**(n_motor_units-1-mu_no)
# exponential distribution: low number of fibers per MU, slow twitch (type I), activated first --> high number of fibers per MU, fast twitch (type II), activated last
motor_units.append(
{
"radius": radius, # [μm] parameter for motor unit: radius of the fiber, used to compute Am
"cm": cm, # [uF/cm^2] parameter Cm
"activation_start_time": 1*mu_no, # [s] when to start activating this motor unit, here it is a ramp
"stimulation_frequency": stimulation_frequency, # [Hz] stimulation frequency for activation
"jitter": [0.1*random.uniform(-1,1) for i in range(100)] # [-] random jitter values that will be added to the intervals to simulate jitter
})
# timing parameters
# -----------------
end_time = 40000.0 # [ms] end time of the simulation
stimulation_frequency = 100*1e-3 # [ms^-1] sampling frequency of stimuli in firing_times_file, in stimulations per ms, number before 1e-3 factor is in Hertz.
stimulation_frequency_jitter = 0 # [-] jitter in percent of the frequency, added and substracted to the stimulation_frequency after each stimulation
dt_0D = 2.5e-3 # [ms] timestep width of ODEs (2e-3)
dt_1D = 6.25e-4 # [ms] timestep width of diffusion (4e-3)
dt_splitting = 2.5e-3 # [ms] overall timestep width of strang splitting (4e-3)
dt_3D = 5e-1 # [ms] time step width of coupling, when 3D should be performed, also sampling time of monopolar EMG
output_timestep_fibers = 2e5 # [ms] timestep for fiber output, 0.5
output_timestep_3D_emg = 2e5 # [ms] timestep for output big files of 3D EMG, 100
output_timestep_surface = 1e5 # [ms] timestep for output surface EMG, 0.5
output_timestep_electrodes = 2e8 # [ms] timestep for python callback, which is electrode measurement output, has to be >= dt_3D
# input files
input_directory = os.path.join(os.environ["OPENDIHU_HOME"], "examples/electrophysiology/input")
fiber_file = input_directory+"/left_biceps_brachii_37x37fibers.bin"
fat_mesh_file = input_directory+"/left_biceps_brachii_37x37fibers_thin_fat.bin"
firing_times_file = input_directory+"/MU_firing_times_always.txt" # use setSpecificStatesCallEnableBegin and setSpecificStatesCallFrequency
fiber_distribution_file = "MU_fibre_distribution_37x37_20a.txt"
# stride for sampling the 3D elements from the fiber data
# a higher number leads to less 3D elements
sampling_stride_x = 2
sampling_stride_y = 2
sampling_stride_z = 40 # good values: divisors of 1480: 1480 = 1*1480 = 2*740 = 4*370 = 5*296 = 8*185 = 10*148 = 20*74 = 37*40
# HD-EMG electrode parameters
fiber_file_for_hdemg_surface = fat_mesh_file # use the fat mesh for placing electrodes, this option is the file of the 2D mesh on which electrode positions are set
hdemg_electrode_faces = ["1+"] # which faces of this 2D mesh should be considered for placing the HD-EMG electrodes (list of faces, a face is one of "0-" (left), "0+" (right), "1-" (front), "1+" (back))
# xy-direction = across muscle, z-direction = along muscle
hdemg_electrode_offset_xy = 2.0 # [cm] offset from boundary of 2D mesh where the electrode array begins
hdemg_inter_electrode_distance_z = 0.4 # [cm] distance between electrodes ("IED") in z direction (direction along muscle)
hdemg_inter_electrode_distance_xy = 0.4 # [cm] distance between electrodes ("IED") in transverse direction
hdemg_n_electrodes_z = 32 # number of electrodes in z direction (direction along muscle)
hdemg_n_electrodes_xy = 12 # number of electrode across muscle
# other options
paraview_output = True
adios_output = False
exfile_output = False
python_output = False
disable_firing_output = False
fast_monodomain_solver_optimizations = True # enable the optimizations in the fast multidomain solver
use_vc = True # If the vc optimization type should be used for CellmlAdapter
# functions, here, Am, Cm and Conductivity are constant for all fibers and MU's
def get_am(fiber_no, mu_no):
# get radius in cm, 1 μm = 1e-6 m = 1e-4*1e-2 m = 1e-4 cm
r = motor_units[mu_no % len(motor_units)]["radius"]*1e-4
# cylinder surface: A = 2*π*r*l, V = cylinder volume: π*r^2*l, Am = A/V = 2*π*r*l / (π*r^2*l) = 2/r
return 2./r
def get_cm(fiber_no, mu_no):
return motor_units[mu_no % len(motor_units)]["cm"]
def get_conductivity(fiber_no, mu_no):
return Conductivity
def get_specific_states_call_frequency(fiber_no, mu_no):
stimulation_frequency = motor_units[mu_no % len(motor_units)]["stimulation_frequency"]
return stimulation_frequency*1e-3
def get_specific_states_frequency_jitter(fiber_no, mu_no):
return motor_units[mu_no % len(motor_units)]["jitter"]
def get_specific_states_call_enable_begin(fiber_no, mu_no):
#return 1
return motor_units[mu_no % len(motor_units)]["activation_start_time"]*1e3
# output motor_unit config in a readable format
if True:
import sys
# only on rank 0
if (int)(sys.argv[-2]) == 0:
for mu_no,item in enumerate(motor_units[0::4]):
print("MU {}".format(mu_no*4))
for (key,value) in item.items():
if key != "jitter":
print(" {}: {}".format(key,value))
|
from .boilerplates import *
from .geometry import *
from .graphics import *
from .uitools import *
from .util import *
from .tile import *
from .sprite import Sprite
|
## Haralambi Todorov (harrytodorov@gmail.com)
## University of Freiburg, Germany
## November 2018
# The Expectation Maximization algorithm
# Input: Observed data X=(X_1, ..., X_N),
# number of iterations,
# initial values for theta_A, theta_B
# parameter m
# Output: theta_A, theta_B
def e_step(x, n, theta_a, theta_b, m):
ti_a = [0.0] * n
ti_b = [0.0] * n
for i in range(n):
a_part = (theta_a ** x[i]) * ((1.0 - theta_a) ** (m - x[i]))
b_part = (theta_b ** x[i]) * ((1.0 - theta_b) ** (m - x[i]))
ti_a[i] = a_part / (a_part + b_part)
ti_b[i] = 1.0 - ti_a[i]
return [ti_a, ti_b]
def m_step(ti_a, ti_b, n, x):
numerator_a = sum([ti_a[i] * x[i] for i in range(n)])
denominator_a = n * sum([ti_a[i] for i in range(n)])
theta_a = numerator_a / denominator_a
numerator_b = sum([ti_b[i] * x[i] for i in range(n)])
denominator_b = n * sum([ti_b[i] for i in range(n)])
theta_b = numerator_b / denominator_b
return [theta_a, theta_b]
if __name__ == '__main__':
X = (4, 9, 8, 3, 7)
N = len(X)
theta_A = 0.3
theta_B = 0.4
M = 10
ni = 4
for i in range(ni):
TI_A, TI_B = e_step(X, N, theta_A, theta_B, M)
theta_A, theta_B = m_step(TI_A, TI_B, N, X)
print(f"theta_a: {theta_A}\ntheta_b: {theta_B}") |
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
logging.getLogger('matplotlib').setLevel(logging.INFO)
def plotPxx(Pxx, freqs, out=None, title=None):
''' Function to plot PSD and save '''
fig, ax = plt.subplots(1)
ax.loglog(freqs, Pxx)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('PSD [strain / Hz]')
ax.set_xlim(10, None)
if title is not None:
ax.set_title(title)
fig.tight_layout()
if out is not None:
fig.savefig(out, bbox_inches='tight', dpi=300)
else:
plt.show()
plt.close()
|
#!/bin/python3
import sys
def twinArrays(ar1, ar2):
a1 = min(ar1)
a2 = min(ar2)
if ar1.index(a1) != ar2.index(a2):
return a1+a2
else:
a = sorted(ar1)
b = sorted(ar2)
if a[1] < b[1]:
return a[1] + a2
else:
return a1 + b[1]
n = int(input().strip())
ar1 = list(map(int, input().strip().split(' ')))
ar2 = list(map(int, input().strip().split(' ')))
result = twinArrays(ar1, ar2)
print(result)
|
""""""
# -*- coding: utf-8 -*-
# date: 2021
# author: AllChooseC
import numpy as np
import scipy as sc
from scipy import signal
from sklearn.preprocessing import normalize
import torch
class ToSpectrogram(object):
""""""
def __init__(self, nperseg=32, noverlap=16):
assert isinstance(nperseg, int)
assert isinstance(nperseg, int)
self.nperseg = nperseg
self.noverlap = noverlap
def __call__(self, data):
log_spectrogram = True
fs = 300
_, _, Sxx = signal.spectrogram(data, fs=fs, nperseg=self.nperseg, noverlap=self.noverlap)
Sxx = np.transpose(Sxx, [0, 2, 1])
if log_spectrogram:
Sxx = abs(Sxx)
mask = Sxx > 0
Sxx[mask] = np.log(Sxx[mask])
# Data dimension [channels, times, frequency]
return Sxx
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_length (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_length):
assert isinstance(output_length, (int, tuple))
self.output_size = output_length
def __call__(self, data):
h = 1
w = data.shape[1]
if isinstance(self.output_size, int):
new_h = h
new_w = self.output_size
else:
new_h, new_w = self.output_size
if w < new_w:
# If w is smaller, padding w with 0 util new_w
data = np.pad(data, ((0, 0), (0, new_w - w)), 'constant', constant_values=(0, 0))
elif w == new_w:
pass
else:
# If w is larger, cut signal to length new_w
data = data[:, :new_w]
return data
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, data):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
data = torch.from_numpy(data)
return data.float()
class DropoutBursts(object):
"""Dropout bursts are created by selecting time instants uniformly at random and setting the ECG signal values in
a 50ms vicinity of those time instants to 0. Dropout burst hence model short periods of weak signal due to, e.g.,
bad contact of ECG leads.
"""
def __init__(self, threshold=2, depth=8):
self.threshold = threshold
self.depth = depth
def __call__(self, data):
shape = data.shape
# compensate for lost length due to mask processing
noise_shape = [shape[0], shape[1] + self.depth]
noise = np.random.normal(0, 1, noise_shape)
mask = np.greater(noise, self.threshold)
# grow a neighbourhood of True values with at least length depth+1
for d in range(self.depth):
mask = np.logical_or(mask[:, :-1], mask[:, 1:])
output = np.where(mask, np.zeros(shape), data)
output = output.reshape(1, -1)
return output
def _stretch_squeeze(source, length):
target = np.zeros([1, length])
interpol_obj = sc.interpolate.interp1d(np.arange(source.size), source)
grid = np.linspace(0, source.size - 1, target.size)
result = interpol_obj(grid)
return result
def _fit_to_length(source, length):
target = np.zeros([length])
w_l = min(source.size, target.size)
target[0:w_l] = source[0, 0:w_l]
return target
class RandomResample(object):
"""Assuming a heart rate of 80bpm for all training ECG signals, random resampling emulates a broader range of heart
rates by uniformly resampling the ECG signals such that the heart rate of the resampled signal is uniformly
distributed on the interval [60, 120]bpm.
"""
def __call__(self, data):
shape = data.shape
# pulse variation from 60 bpm to 120 bpm, expected 80 bpm
new_length = np.random.randint(
low=int(shape[1] * 80 / 120),
high=int(shape[1] * 80 / 60),
)
sig = _stretch_squeeze(data, new_length)
sig = _fit_to_length(sig, shape[1])
sig = sig.reshape(1, -1)
return sig
class Normalize(object):
"""
"""
def __call__(self, data):
sig = normalize(data)*100
mean_sig = np.mean(sig)
sig = sig - mean_sig
return sig
|
import logging
from django.db import connections
# project
from ...ext import sql as sqlx
from ...ext import AppTypes
from .conf import settings
log = logging.getLogger(__name__)
CURSOR_ATTR = '_datadog_original_cursor'
def patch_db(tracer):
for c in connections.all():
patch_conn(tracer, c)
def unpatch_db():
for c in connections.all():
unpatch_conn(c)
def patch_conn(tracer, conn):
if hasattr(conn, CURSOR_ATTR):
log.debug("already patched")
return
setattr(conn, CURSOR_ATTR, conn.cursor)
def cursor():
return TracedCursor(tracer, conn, conn._datadog_original_cursor())
conn.cursor = cursor
def unpatch_conn(conn):
cursor = getattr(conn, CURSOR_ATTR, None)
if cursor is None:
log.debug('nothing to do, the connection is not patched')
return
conn.cursor = cursor
delattr(conn, CURSOR_ATTR)
class TracedCursor(object):
def __init__(self, tracer, conn, cursor):
self.tracer = tracer
self.conn = conn
self.cursor = cursor
self._vendor = getattr(conn, 'vendor', 'db') # e.g sqlite, postgres
self._alias = getattr(conn, 'alias', 'default') # e.g. default, users
prefix = sqlx.normalize_vendor(self._vendor)
self._name = "%s.%s" % (prefix, "query") # e.g sqlite.query
database_prefix = (
'{}-'.format(settings.DEFAULT_DATABASE_PREFIX)
if settings.DEFAULT_DATABASE_PREFIX else ''
)
self._service = "%s%s%s" % (
database_prefix,
self._alias,
"db"
) # e.g. service-defaultdb or service-postgresdb
self.tracer.set_service_info(
service=self._service,
app=prefix,
app_type=AppTypes.db,
)
def _trace(self, func, sql, params):
span = self.tracer.trace(
self._name,
resource=sql,
service=self._service,
span_type=sqlx.TYPE
)
with span:
span.set_tag(sqlx.QUERY, sql)
span.set_tag("django.db.vendor", self._vendor)
span.set_tag("django.db.alias", self._alias)
try:
return func(sql, params)
finally:
rows = self.cursor.cursor.rowcount
if rows and 0 <= rows:
span.set_tag(sqlx.ROWS, self.cursor.cursor.rowcount)
def callproc(self, procname, params=None):
return self._trace(self.cursor.callproc, procname, params)
def execute(self, sql, params=None):
return self._trace(self.cursor.execute, sql, params)
def executemany(self, sql, param_list):
return self._trace(self.cursor.executemany, sql, param_list)
def close(self):
return self.cursor.close()
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
import json
from os.path import abspath, dirname, isfile, join
CURRENT_DIR = dirname(abspath(__file__))
TEST_DATA_DIR = join(dirname(dirname(dirname(CURRENT_DIR))), 'test_data')
from opendp_apps.analysis.testing.base_stat_spec_test import StatSpecTestCase
from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec
from opendp_apps.model_helpers.msg_util import msgt
from opendp_apps.analysis import static_vals as astatic
from opendp_apps.profiler import static_vals as pstatic
from opendp_apps.utils.extra_validators import *
class DPCountStatSpecTest(StatSpecTestCase):
fixtures = ['test_dataset_data_001.json', ]
def test_05_valid_noise_mechanism(self):
"""Check for the correct noise_mechanism"""
dp_count = DPCountSpec({})
self.assertEqual(dp_count.noise_mechanism, astatic.NOISE_GEOMETRIC_MECHANISM)
def test_10_count_valid_spec(self):
"""(10) Run DP Count valid spec, float column"""
msgt(self.test_10_count_valid_spec.__doc__)
spec_props = {'variable': 'EyeHeight',
'col_index': 19,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '182',
'variable_info': {'min': -8,
'max': 5,
'type': 'Float', },
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
#if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
#self.assertTrue(dp_count.accuracy_val > 4.5)
#self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
def test_20_count_valid_spec(self):
"""(20) Run DP Count valid spec, integer column"""
msgt(self.test_20_count_valid_spec.__doc__)
spec_props = {'variable': 'age',
'col_index': 1,
'statistic': astatic.DP_COUNT,
'dataset_size': 10_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '44',
'variable_info': {'min': 18,
'max': 95,
'type': pstatic.VAR_TYPE_INTEGER},
}
dp_count = DPCountSpec(spec_props)
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 11 columns
col_indexes = [idx for idx in range(0, 11)]
# File object
#
pums_extract_10_000 = join(TEST_DATA_DIR, 'PUMS5extract10000.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(pums_extract_10_000))
file_obj = open(pums_extract_10_000, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# val from local machine: 2.9957322850627124
self.assertTrue(dp_count.accuracy_val > 2.995)
self.assertTrue(dp_count.accuracy_val < 2.996)
# Actual count 10_000
self.assertTrue(dp_count.value > 9_980) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_30_count_valid_another_spec(self):
"""(30) Run DP Count on another valid spec"""
msgt(self.test_30_count_valid_another_spec.__doc__)
spec_props = {'variable': 'TypingSpeed',
'col_index': 5,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '62',
'variable_info': {'min': 1,
'max': 61,
'type': pstatic.VAR_TYPE_FLOAT},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 4.4)
self.assertTrue(dp_count.accuracy_val < 4.8)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_40_count_valid_str_spec(self):
"""(40) Run DP Count string"""
msgt(self.test_40_count_valid_str_spec.__doc__)
spec_props = {'variable': 'Subject',
'col_index': 0,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'ac',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 2)
self.assertTrue(dp_count.accuracy_val < 4)
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_50_count_missing_vals_str(self):
"""(50) Run DP Count string"""
msgt(self.test_50_count_missing_vals_str.__doc__)
xspec_props = {'variable': 'gender',
'col_index': 4,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'Genderfluid',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
# right from UI
spec_props = {'error': '', 'label': 'gender', 'locked': False,
'epsilon': 1.0, 'delta': 0.0, 'cl': 0.95,
'variable': 'gender', 'statistic': 'count',
'fixed_value': 'male', 'handle_as_fixed': True,
'missing_values_handling': 'insert_fixed', 'dataset_size': 1000,
'variable_info': {'name': 'gender', 'type': 'Categorical',
'label': 'gender', 'selected': True,
'categories': ['Genderfluid'], 'sort_order': 4}, 'col_index': 4}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def test_60_count_missing_vals_bool(self):
"""(60) Run DP Count bool"""
msgt(self.test_60_count_missing_vals_bool.__doc__)
spec_props = {'variable': 'Boolean2',
'col_index': 8,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'true',
'variable_info': {'type': pstatic.VAR_TYPE_BOOLEAN},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def show_release_result(self, release_dict:{}):
"""print the result to the screen"""
print(json.dumps(release_dict, indent=4)) |
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import torch.nn.functional as F
label_colours = [(178, 45, 45), (153, 115, 115), (64, 36, 32), (255, 68, 0), (89, 24, 0), (191, 121, 96), (191, 102, 0),
(76, 41, 0), (153, 115, 38), (102, 94, 77), (242, 194, 0), (191, 188, 143), (226, 242, 0),
(119, 128, 0), (59, 64, 0), (105, 191, 48), (81, 128, 64), (0, 255, 0), (0, 51, 7), (191, 255, 208),
(96, 128, 113), (0, 204, 136), (13, 51, 43), (0, 191, 179), (0, 204, 255), (29, 98, 115), (0, 34, 51),
(163, 199, 217), (0, 136, 255), (41, 108, 166), (32, 57, 128), (0, 22, 166), (77, 80, 102),
(119, 54, 217), (41, 0, 77), (222, 182, 242), (103, 57, 115), (247, 128, 255), (191, 0, 153),
(128, 96, 117), (127, 0, 68), (229, 0, 92), (76, 0, 31), (255, 128, 179), (242, 182, 198)]
def process_image(image, image_mean):
image = image.cpu().numpy() + image_mean[:, None, None]
return image.astype(np.uint8)
def process_seg_label(pred, gt, num_classes=40):
B, C, H, W = gt.size()
pred = F.interpolate(pred, (H, W), mode='bilinear', align_corners=True)
pred = pred.argmax(dim=1)[0].detach()
gt = gt.squeeze(1)[0]
pred = pred.cpu().numpy()
gt = gt.cpu().numpy()
h, w = gt.shape
pred_img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)
gt_img = Image.new('RGB', (w, h), (255, 255, 255))
pred_pixels = pred_img.load()
gt_pixels = gt_img.load()
for j_, j in enumerate(gt):
for k_, k in enumerate(j):
if k < num_classes:
gt_pixels[k_, j_] = label_colours[k]
pred_pixels[k_, j_] = label_colours[pred[j_, k_]]
return np.array(pred_img).transpose([2, 0, 1]), np.array(gt_img).transpose([2, 0, 1])
def normalize(x):
return x / np.linalg.norm(x, ord=2, axis=0, keepdims=True)
def process_normal_label(pred, gt, ignore_label):
B, C, H, W = gt.size()
pred = F.interpolate(pred, (H, W), mode='bilinear', align_corners=True)
pred = pred[0].detach()
gt = gt[0]
pred = pred.cpu().numpy()
gt = gt.cpu().numpy()
mask = gt != ignore_label
_, h, w = gt.shape
pred = normalize(pred.reshape(3, -1)).reshape(3, h, w) * mask + (1 - mask)
gt = normalize(gt.reshape(3, -1)).reshape(3, h, w) * mask + (1 - mask)
return pred, gt
def save_heatmap(matrix, filename, vmin=0., vmax=1.):
fig = plt.figure(0)
fig.clf()
plt.matshow(matrix, fignum=0, cmap=plt.cm.bwr, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.savefig(filename)
img = Image.open(filename)
return np.array(img).transpose((2, 0, 1))
def _process_params(G, center, dim):
if not isinstance(G, nx.Graph):
empty_graph = nx.Graph()
empty_graph.add_nodes_from(G)
G = empty_graph
if center is None:
center = np.zeros(dim)
else:
center = np.asarray(center)
if len(center) != dim:
msg = "length of center coordinates must match dimension of layout"
raise ValueError(msg)
return G, center
def rescale_layout(pos, scale=1):
# Find max length over all dimensions
lim = 0 # max coordinate for all axes
for i in range(pos.shape[1]):
pos[:, i] -= pos[:, i].mean()
lim = max(abs(pos[:, i]).max(), lim)
# rescale to (-scale, scale) in all directions, preserves aspect
if lim > 0:
for i in range(pos.shape[1]):
pos[:, i] *= scale / lim
return pos
def task_layout(G, nodes, align='vertical',
scale=1, center=None, aspect_ratio=4./3):
G, center = _process_params(G, center=center, dim=2)
if len(G) == 0:
return {}
height = 1
width = aspect_ratio * height
offset = (width/2, height/2)
top = set(nodes)
bottom = set(G) - top
top = sorted(top, key=lambda x: int(x.split('_')[-1]), reverse=align == 'vertical')
bottom = sorted(bottom, key=lambda x: int(x.split('_')[-1]), reverse=align == 'vertical')
nodes = list(top) + list(bottom)
if align == 'vertical':
left_xs = np.repeat(0, len(top))
right_xs = np.repeat(width, len(bottom))
left_ys = np.linspace(0, height, len(top))
right_ys = np.linspace(0, height, len(bottom))
top_pos = np.column_stack([left_xs, left_ys]) - offset
bottom_pos = np.column_stack([right_xs, right_ys]) - offset
pos = np.concatenate([top_pos, bottom_pos])
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(nodes, pos))
return pos
if align == 'horizontal':
top_ys = np.repeat(height, len(top))
bottom_ys = np.repeat(0, len(bottom))
top_xs = np.linspace(0, width, len(top))
bottom_xs = np.linspace(0, width, len(bottom))
top_pos = np.column_stack([top_xs, top_ys]) - offset
bottom_pos = np.column_stack([bottom_xs, bottom_ys]) - offset
pos = np.concatenate([top_pos, bottom_pos])
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(nodes, pos))
return pos
msg = 'align must be either vertical or horizontal.'
raise ValueError(msg)
def save_connectivity(net1, net2, connectivity1, connectivity2, filename, align='horizontal', with_labels=False):
num_stages = net1.shape[0]
G = nx.DiGraph()
G.add_nodes_from([("1_%d" % i, dict(label=i)) for i in range(num_stages)], task=0, color='xkcd:azure')
G.add_nodes_from([("2_%d" % i, dict(label=i)) for i in range(num_stages)], task=1, color='xkcd:tomato')
paths1 = list(zip(*np.nonzero(connectivity1)))
paths2 = list(zip(*np.nonzero(connectivity2)))
pos_edges_2_to_1 = [("2_%d" % s, "1_%d" % t, dict(value=net1[t, s])) for t, s in paths1 if net1[t, s] > 0.5]
pos_edges_1_to_2 = [("1_%d" % s, "2_%d" % t, dict(value=net2[t, s])) for t, s in paths2 if net2[t, s] > 0.5]
neg_edges_2_to_1 = [("2_%d" % s, "1_%d" % t, dict(value=net1[t, s])) for t, s in paths1 if net1[t, s] <= 0.5]
neg_edges_1_to_2 = [("1_%d" % s, "2_%d" % t, dict(value=net2[t, s])) for t, s in paths2 if net2[t, s] <= 0.5]
pos_edges = pos_edges_2_to_1 + pos_edges_1_to_2
neg_edges = neg_edges_2_to_1 + neg_edges_1_to_2
G.add_edges_from([("1_%d" % i, "1_%d" % (i + 1)) for i in range(num_stages - 1)], color='xkcd:black')
G.add_edges_from([("2_%d" % i, "2_%d" % (i + 1)) for i in range(num_stages - 1)], color='xkcd:black')
top = {n for n, d in G.nodes(data=True) if d['task'] == 0}
pos = task_layout(G, top, align=align)
figsize = (1.5, num_stages / 2.) if align == 'vertical' else (num_stages / 2., 1.5)
fig = plt.figure(num=0, figsize=figsize)
fig.clf()
labels = {n: d['label'] for n, d in G.nodes(data=True)}
node_color = [d['color'] for _, d in G.nodes(data=True)]
edge_color = [d['color'] for _, _, d in G.edges(data=True)]
nx.draw(G, pos=pos, labels=labels, node_color=node_color, edge_color=edge_color, with_labels=with_labels)
nx.draw_networkx_edges(G, pos=pos, edgelist=pos_edges, edge_color='xkcd:violet')
arcs = nx.draw_networkx_edges(G, pos=pos, edgelist=neg_edges, edge_color='xkcd:silver', alpha=0.3)
for arc in arcs:
arc.set_linestyle('dotted')
plt.savefig(filename)
img = Image.open(filename)
return np.array(img).transpose((2, 0, 1))
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads cloud_sql_proxy for Linux and Mac, and packages it as cipd.
See https://cloud.google.com/sql/docs/mysql/sql-proxy#install
It's an unversioned binary. So we just tag it with the current date.
"""
import os
import shutil
import subprocess
import tempfile
import time
import urllib
def package(url, exe, pkg, tag):
tmp = tempfile.mkdtemp(prefix='cloud_sql_proxy_upload')
try:
print 'Fetching %s...' % url
urllib.urlretrieve(url, os.path.join(tmp, exe))
os.chmod(os.path.join(tmp, exe), 0777)
print 'Packaging it as %s and tagging with %s' % (pkg, tag)
subprocess.check_call([
'cipd', 'create',
'-in', tmp,
'-name', pkg,
'-tag', tag,
])
finally:
shutil.rmtree(tmp)
def main():
tag = time.strftime('downloaded:%Y_%m_%d')
package(
url='https://dl.google.com/cloudsql/cloud_sql_proxy.linux.amd64',
exe='cloud_sql_proxy',
pkg='infra/tools/cloud_sql_proxy/linux-amd64',
tag=tag)
package(
url='https://dl.google.com/cloudsql/cloud_sql_proxy.darwin.amd64',
exe='cloud_sql_proxy',
pkg='infra/tools/cloud_sql_proxy/mac-amd64',
tag=tag)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.