repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
grollins/foldkin
|
foldkin/zam_protein.py
|
1
|
1049
|
import os.path
from zam.protein import Protein
from zam.sequence import SeqToAA1
DEFAULT_PDB_DIR = os.path.expanduser("~/Dropbox/11_28_2011/pdb")
def create_zam_protein_from_path(file_path):
"""docstring for create_zam_protein"""
p = Protein(file_path)
new_zam_protein = ZamProtein(p)
return new_zam_protein
def create_zam_protein_from_pdb_id(pdb_id):
"""docstring for create_zam_protein"""
file_path = os.path.join(DEFAULT_PDB_DIR, pdb_id + ".pdb")
p = Protein(file_path)
new_zam_protein = ZamProtein(p)
return new_zam_protein
class ZamProtein(object):
"""docstring for ZamProtein"""
def __init__(self, protein):
super(ZamProtein, self).__init__()
self.protein = protein
def __len__(self):
return len(self.protein)
def get_contact_list(self, residue_inds=None):
return self.protein.ResContactList(residue_inds)
def get_sequence(self):
return SeqToAA1(self.protein.Seq)
def compute_aco(self):
return self.protein.MeanContactOrder()
|
bsd-2-clause
| -148,482,932,432,346,660
| 27.351351
| 64
| 0.669209
| false
| 3.005731
| false
| false
| false
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QSemaphore.py
|
1
|
1205
|
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QSemaphore(): # skipped bases: <class 'sip.simplewrapper'>
""" QSemaphore(int n=0) """
def acquire(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.acquire(int n=1) """
pass
def available(self): # real signature unknown; restored from __doc__
""" QSemaphore.available() -> int """
return 0
def release(self, int_n=1): # real signature unknown; restored from __doc__
""" QSemaphore.release(int n=1) """
pass
def tryAcquire(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QSemaphore.tryAcquire(int n=1) -> bool
QSemaphore.tryAcquire(int, int) -> bool
"""
return False
def __init__(self, int_n=0): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
gpl-2.0
| -6,895,287,090,918,313,000
| 29.897436
| 106
| 0.619917
| false
| 3.662614
| false
| false
| false
|
google-research/google-research
|
depth_and_motion_learning/depth_motion_field_model.py
|
1
|
18887
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model for training depth egomotion prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from depth_and_motion_learning import depth_prediction_nets
from depth_and_motion_learning import intrinsics_utils
from depth_and_motion_learning import maybe_summary
from depth_and_motion_learning import object_motion_nets
from depth_and_motion_learning import parameter_container
from depth_and_motion_learning import transform_utils
from depth_and_motion_learning.dataset import data_processing
from depth_and_motion_learning.dataset import reader_cityscapes
from depth_and_motion_learning.losses import loss_aggregator
DEFAULT_PARAMS = {
'batch_size': None,
'input': {
'data_path': '',
# If the average L1 distance between two image is less than this
# threshold, they will be assumed to be near duplicates - a situation
# that happens often in robot footage, when the camera and the scene is
# static.
'duplicates_filter_threshold': 0.01,
# Size of shuffling queue. Larger - better shuffling. Smaller - faster
# and less host memory usage.
'shuffle_queue_size': 1024,
# Used in tf.data.Dataset.prefetch.
'prefetch_size': 32,
# Allows arbitrary parameters to be passed to the reader.
'reader': {},
},
'image_preprocessing': {
'data_augmentation': True,
# Size into which images will be resized, after random cropping.
'image_height': 128,
'image_width': 416,
},
'loss_weights': {
'rgb_consistency': 1.0,
'ssim': 3.0,
'depth_consistency': 0.0,
'depth_smoothing': 0.001,
'depth_supervision': 0.0,
'rotation_cycle_consistency': 1e-3,
'translation_cycle_consistency': 5e-2,
'depth_variance': 0.0,
'motion_smoothing': 1.0,
'motion_drift': 0.2,
},
'loss_params': {
# Stops gradient on the target depth when computing the depth
# consistency loss.
'target_depth_stop_gradient': True,
# Normalize the scale by the mean depth.
'scale_normalization': False,
},
'depth_predictor_params': {
'layer_norm_noise_rampup_steps': 10000,
'weight_decay': 0.0,
'learn_scale': False,
'reflect_padding': False,
},
'motion_prediction_params': {
'weight_reg': 0.0,
'align_corners': True,
'auto_mask': True,
},
'learn_intrinsics': {
'enabled': False,
# If True, learn the same set of intrinsic params will be assigned to a
# given video_id (works with the YouTube format in /dataset).
'per_video': False,
# If per_video is true, this is the maximal number of video ids for
# which the hash table that keeps track of the intrsinsics.
'max_number_of_videos': 1000,
},
# True to feed depth predictions into the motion field network.
'cascade': True,
# True to use a pretrained mask network to confine moving objects.
'use_mask': False,
'learn_egomotion': True,
# Number of pixels ro dilate the foreground mask by (0 to not dilate).
'foreground_dilation': 8,
# If nonzero, motion fields will be unfrozen after motion_field_burnin_steps
# steps. Over the first half of the motion_field_burnin_steps steps, the
# motion fields will be zero. Then the ramp up is linear.
'motion_field_burnin_steps': 20000,
# TPUEstimator keys, to allow strict ParameterContainer usage.
'context': None,
'use_tpu': None,
}
def loss_fn(features, mode, params):
"""Computes the training loss for depth and egomotion training.
This function is written with TPU-friendlines in mind.
Args:
features: A dictionary mapping strings to tuples of (tf.Tensor, tf.Tensor),
representing pairs of frames. The loss will be calculated from these
tensors. The expected endpoints are 'rgb', 'depth', 'intrinsics_mat'
and 'intrinsics_mat_inv'.
mode: One of tf.estimator.ModeKeys: TRAIN, PREDICT or EVAL.
params: A dictionary with hyperparameters that optionally override
DEFAULT_PARAMS above.
Returns:
A dictionary mapping each loss name (see DEFAULT_PARAMS['loss_weights']'s
keys) to a scalar tf.Tensor representing the respective loss. The total
training loss.
Raises:
ValueError: `features` endpoints that don't conform with their expected
structure.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
if len(features['rgb']) != 2 or 'depth' in features and len(
features['depth']) != 2:
raise ValueError('RGB and depth endpoints are expected to be a tuple of two'
' tensors. Rather, they are %s.' % str(features))
# On tpu we strive to stack tensors together and perform ops once on the
# entire stack, to save time HBM memory. We thus stack the batch-of-first-
# frames and the batch-of-second frames, for both depth and RGB. The batch
# dimension of rgb_stack and gt_depth_stack are thus twice the original batch
# size.
rgb_stack = tf.concat(features['rgb'], axis=0)
depth_predictor = depth_prediction_nets.ResNet18DepthPredictor(
mode, params.depth_predictor_params.as_dict())
predicted_depth = depth_predictor.predict_depth(rgb_stack)
maybe_summary.histogram('PredictedDepth', predicted_depth)
endpoints = {}
endpoints['predicted_depth'] = tf.split(predicted_depth, 2, axis=0)
endpoints['rgb'] = features['rgb']
# We make the heuristic that depths that are less than 0.2 meters are not
# accurate. This is a rough placeholder for a confidence map that we're going
# to have in future.
if 'depth' in features:
endpoints['groundtruth_depth'] = features['depth']
if params.cascade:
motion_features = [
tf.concat([features['rgb'][0], endpoints['predicted_depth'][0]],
axis=-1),
tf.concat([features['rgb'][1], endpoints['predicted_depth'][1]],
axis=-1)
]
else:
motion_features = features['rgb']
motion_features_stack = tf.concat(motion_features, axis=0)
flipped_motion_features_stack = tf.concat(motion_features[::-1], axis=0)
# Unlike `rgb_stack`, here we stacked the frames in reverse order along the
# Batch dimension. By concatenating the two stacks below along the channel
# axis, we create the following tensor:
#
# Channel dimension (3)
# _ _
# | Frame1-s batch | Frame2-s batch |____Batch
# |_ Frame2-s batch | Frame1-s batch _| dimension (0)
#
# When we send this tensor to the motion prediction network, the first and
# second halves of the result represent the camera motion from Frame1 to
# Frame2 and from Frame2 to Frame1 respectively. Further below we impose a
# loss that drives these two to be the inverses of one another
# (cycle-consistency).
pairs = tf.concat([motion_features_stack, flipped_motion_features_stack],
axis=-1)
rot, trans, residual_translation, intrinsics_mat = (
object_motion_nets.motion_field_net(
images=pairs,
weight_reg=params.motion_prediction_params.weight_reg,
align_corners=params.motion_prediction_params.align_corners,
auto_mask=params.motion_prediction_params.auto_mask))
if params.motion_field_burnin_steps > 0.0:
step = tf.to_float(tf.train.get_or_create_global_step())
burnin_steps = tf.to_float(params.motion_field_burnin_steps)
residual_translation *= tf.clip_by_value(2 * step / burnin_steps - 1, 0.0,
1.0)
# If using grouth truth egomotion
if not params.learn_egomotion:
egomotion_mat = tf.concat(features['egomotion_mat'], axis=0)
rot = transform_utils.angles_from_matrix(egomotion_mat[:, :3, :3])
trans = egomotion_mat[:, :3, 3]
trans = tf.expand_dims(trans, 1)
trans = tf.expand_dims(trans, 1)
if params.use_mask:
mask = tf.to_float(tf.concat(features['mask'], axis=0) > 0)
if params.foreground_dilation > 0:
pool_size = params.foreground_dilation * 2 + 1
mask = tf.nn.max_pool(mask, [1, pool_size, pool_size, 1], [1] * 4, 'SAME')
residual_translation *= mask
maybe_summary.histogram('ResidualTranslation', residual_translation)
maybe_summary.histogram('BackgroundTranslation', trans)
maybe_summary.histogram('Rotation', rot)
endpoints['residual_translation'] = tf.split(residual_translation, 2, axis=0)
endpoints['background_translation'] = tf.split(trans, 2, axis=0)
endpoints['rotation'] = tf.split(rot, 2, axis=0)
if not params.learn_intrinsics.enabled:
endpoints['intrinsics_mat'] = features['intrinsics_mat']
endpoints['intrinsics_mat_inv'] = features['intrinsics_mat_inv']
elif params.learn_intrinsics.per_video:
int_mat = intrinsics_utils.create_and_fetch_intrinsics_per_video_index(
features['video_index'][0],
params.image_preprocessing.image_height,
params.image_preprocessing.image_width,
max_video_index=params.learn_intrinsics.max_number_of_videos)
endpoints['intrinsics_mat'] = tf.concat([int_mat] * 2, axis=0)
endpoints['intrinsics_mat_inv'] = intrinsics_utils.invert_intrinsics_matrix(
int_mat)
else:
# The intrinsic matrix should be the same, no matter the order of
# images (mat = inv_mat). It's probably a good idea to enforce this
# by a loss, but for now we just take their average as a prediction for the
# intrinsic matrix.
intrinsics_mat = 0.5 * sum(tf.split(intrinsics_mat, 2, axis=0))
endpoints['intrinsics_mat'] = [intrinsics_mat] * 2
endpoints['intrinsics_mat_inv'] = [
intrinsics_utils.invert_intrinsics_matrix(intrinsics_mat)] * 2
aggregator = loss_aggregator.DepthMotionFieldLossAggregator(
endpoints, params.loss_weights.as_dict(), params.loss_params.as_dict())
# Add some more summaries.
maybe_summary.image('rgb0', features['rgb'][0])
maybe_summary.image('rgb1', features['rgb'][1])
disp0, disp1 = tf.split(aggregator.output_endpoints['disparity'], 2, axis=0)
maybe_summary.image('disparity0/grayscale', disp0)
maybe_summary.image_with_colormap('disparity0/plasma',
tf.squeeze(disp0, axis=3), 'plasma', 0.0)
maybe_summary.image('disparity1/grayscale', disp1)
maybe_summary.image_with_colormap('disparity1/plasma',
tf.squeeze(disp1, axis=3), 'plasma', 0.0)
if maybe_summary.summaries_enabled():
if 'depth' in features:
gt_disp0 = 1.0 / tf.maximum(features['depth'][0], 0.5)
gt_disp1 = 1.0 / tf.maximum(features['depth'][1], 0.5)
maybe_summary.image('disparity_gt0', gt_disp0)
maybe_summary.image('disparity_gt1', gt_disp1)
depth_proximity_weight0, depth_proximity_weight1 = tf.split(
aggregator.output_endpoints['depth_proximity_weight'], 2, axis=0)
maybe_summary.image('consistency_weight0',
tf.expand_dims(depth_proximity_weight0, -1))
maybe_summary.image('consistency_weight1',
tf.expand_dims(depth_proximity_weight1, -1))
maybe_summary.image('trans', aggregator.output_endpoints['trans'])
maybe_summary.image('trans_inv', aggregator.output_endpoints['inv_trans'])
maybe_summary.image('trans_res', endpoints['residual_translation'][0])
maybe_summary.image('trans_res_inv', endpoints['residual_translation'][1])
return aggregator.losses
def input_fn(params):
"""An Estimator's input_fn for reading and preprocessing training data.
Reads pairs of RGBD frames from sstables, filters out near duplicates and
performs data augmentation.
Args:
params: A dictionary with hyperparameters.
Returns:
A tf.data.Dataset object.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
dataset = reader_cityscapes.read_frame_pairs_from_data_path(
params.input.data_path, params.input.reader)
if params.learn_intrinsics.enabled and params.learn_intrinsics.per_video:
intrinsics_ht = intrinsics_utils.HashTableIndexer(
params.learn_intrinsics.max_number_of_videos)
def key_to_index(input_endpoints):
video_id = input_endpoints.pop('video_id', None)
if (video_id is not None and params.learn_intrinsics.enabled and
params.learn_intrinsics.per_video):
index = intrinsics_ht.get_or_create_index(video_id[0])
input_endpoints['video_index'] = index
input_endpoints['video_index'] = tf.stack([index] * 2)
return input_endpoints
dataset = dataset.map(key_to_index)
def is_duplicate(endpoints):
"""Implements a simple duplicate filter, based on L1 difference in RGB."""
return tf.greater(
tf.reduce_mean(tf.abs(endpoints['rgb'][1] - endpoints['rgb'][0])),
params.input.duplicates_filter_threshold)
if params.input.duplicates_filter_threshold > 0.0:
dataset = dataset.filter(is_duplicate)
# Add data augmentation
if params.image_preprocessing.data_augmentation:
if params.learn_intrinsics.per_video:
raise ('Data augemnation together with learn_intrinsics.per_video is not '
'yet supported.')
def random_crop_and_resize_fn(endpoints):
return data_processing.random_crop_and_resize_pipeline(
endpoints, params.image_preprocessing.image_height,
params.image_preprocessing.image_width)
augmentation_fn = random_crop_and_resize_fn
else:
def resize_fn(endpoints):
return data_processing.resize_pipeline(
endpoints, params.image_preprocessing.image_height,
params.image_preprocessing.image_width)
augmentation_fn = resize_fn
dataset = dataset.map(augmentation_fn)
dataset = dataset.shuffle(params.input.shuffle_queue_size)
dataset = dataset.batch(params.batch_size, drop_remainder=True)
return dataset.prefetch(params.input.prefetch_size)
def get_vars_to_restore_fn(initialization):
"""Returns a vars_to_restore_fn for various types of `initialization`.
Args:
initialization: A string, the type of the initialization. Currently only
'imagenet' is supported.
Raises:
ValueError: `initialization` is not supported
"""
if initialization == 'imagenet':
def is_blacklisted(name):
for key in ['Adam', 'iconv', 'depth_scale', 'upconv', 'disp']:
if key in name:
return True
return False
def vars_to_restore_fn():
"""Returns a dictionary mapping checkpoint variable names to variables."""
vars_to_restore = {}
for v in tf.global_variables():
if is_blacklisted(v.op.name):
print(v.op.name, 'is blacklisted')
continue
if v.op.name.startswith('depth_prediction'):
name = v.op.name.replace('moving_mean', 'mu')
name = name.replace('moving_variance', 'sigma')
vars_to_restore[name[len('depth_prediction') + 1:]] = v
return vars_to_restore
return vars_to_restore_fn
else:
raise ValueError('Unknown initialization %s' % initialization)
def preprocess_masks(endpoints):
def create_mobile_mask(input_mask):
return tf.reduce_all(tf.not_equal(0, input_mask), axis=2, keepdims=True)
output = dict(endpoints)
output['mask'] = tuple([create_mobile_mask(m) for m in endpoints['mask']])
return output
def infer_depth(rgb_image, params):
"""Runs depth inference given an RGB frame.
Args:
rgb_image: A tf.Tensor or shape [B, H, W, 3] containing RGB images.
params: A dictionary of parameters contraining overrides for
DEFAULT_PARAMS.
Returns:
A tf.Tensor of shape [B, H, W, 1] containing the inferred depths.
"""
if rgb_image.shape.rank != 4:
raise ValueError('rgb_image should have rank 4, not %d.' %
rgb_image.shape.rank)
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
depth_predictor = depth_prediction_nets.ResNet18DepthPredictor(
tf.estimator.ModeKeys.PREDICT, params.depth_predictor_params.as_dict())
return depth_predictor.predict_depth(rgb_image)
def infer_egomotion(rgb_image1, rgb_image2, params):
"""Runs egomotion inference given two RGB frames.
Args:
rgb_image1: A tf.Tensor or shape [B, H, W, 3] containing RGB images, the
first frame.
rgb_image2: A tf.Tensor or shape [B, H, W, 3] containing RGB images, the
second frame.
params: A dictionary of parameters contraining overrides for DEFAULT_PARAMS.
Returns:
A tuple of two tf.Tensors of shape [B, 3] containing the inferred rotation
angles and translation vector components.
"""
params = parameter_container.ParameterContainer.from_defaults_and_overrides(
DEFAULT_PARAMS, params, is_strict=True, strictness_depth=2)
if rgb_image1.shape.rank != 4 or rgb_image2.shape.rank != 4:
raise ValueError('rgb_image1 and rgb_image1 should have rank 4, not '
'%d and %d.' %
(rgb_image1.shape.rank, rgb_image2.shape.rank))
rgb_stack = tf.concat([rgb_image1, rgb_image2], axis=0)
flipped_rgb_stack = tf.concat([rgb_image2, rgb_image1], axis=0)
rot, trans, _ = object_motion_nets.motion_vector_net(tf.concat(
[rgb_stack, flipped_rgb_stack], axis=3), 0.0, False)
rot12, rot21 = tf.split(rot, 2, axis=0)
trans12, trans21 = tf.split(trans, 2, axis=0)
# rot12 and rot21 should be the inverses on of the other, but in reality they
# not exactly are. Averaging rot12 and inv(rot21) gives a better estimator for
# the rotation. Similarly, trans12 and rot12*trans21 should be the negatives
# one of the other, so we average rot12*trans21 and trans12
# to get a better estimator. TODO(gariel): Check if there's an estimator
# with less variance.
avg_rot = 0.5 * (tf.linalg.inv(rot21) + rot12)
avg_trans = 0.5 * (-tf.squeeze(
tf.matmul(rot12, tf.expand_dims(trans21, -1)), axis=-1) + trans12)
return avg_rot, avg_trans
|
apache-2.0
| 2,114,861,337,026,813,700
| 38.762105
| 80
| 0.678721
| false
| 3.625144
| false
| false
| false
|
mullikine/ranger
|
ranger/container/settings.py
|
1
|
8531
|
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@lepus.uberspace.de>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:
return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent.set(name, value, self._path)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self._parent.get(name, self._path)
def __iter__(self):
for x in self._parent._settings:
yield x
__getitem__ = __getattr__
__setitem__ = __setattr__
|
gpl-3.0
| -4,823,316,337,248,086,000
| 32.586614
| 79
| 0.538155
| false
| 3.940416
| false
| false
| false
|
smendez-hi/SUMO-hib
|
tools/visualization/mpl_dump_onNet.py
|
1
|
17971
|
#!/usr/bin/env python
"""
@file mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_onNet.py 11671 2012-01-07 20:14:30Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['maxspeed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append(attrs["shape"])
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
w = values1[edge]
if w>0:
w = 10. * math.log(1 + values1[edge]) + min_width
else:
w = min_width
if options.max_width and w>options.max_width:
w = options.max_width
if w<min_width:
w = min_width
edge2plotWidth[edge] = w
else:
edge2plotWidth[edge] = min_width
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
if not options.show:
rcParams['backend'] = 'Agg'
# set figure size
if options.size and not options.show:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(ymin, ymax)
if saveName:
savefig(saveName);
if options.show:
show()
def plot(self, weights, options, colorMap):
self._minValue1 = weights._minValue1
self._minValue2 = weights._minValue2
self._maxValue1 = weights._maxValue1
self._maxValue2 = weights._maxValue2
if options.join:
self.plotData(weights, options, weights._edge2value1, weights._edge2value2, options.output, colorMap)
else:
for i in weights._intervalBegins:
if options.verbose:
print " Processing step %d..." % i
output = options.output
if output:
output = output.replace("HERE", "%")
output = output % i
self.plotData(weights, options, weights._unaggEdge2value1[i], weights._unaggEdge2value2[i], output, colorMap )
def knowsEdge(self, id):
return id in self._edge2from
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, net, value1, value2):
self._id = ''
self._edge2value2 = {}
self._edge2value1 = {}
self._edge2no1 = {}
self._edge2no2 = {}
self._net = net
self._intervalBegins = []
self._unaggEdge2value2 = {}
self._unaggEdge2value1 = {}
self._beginTime = -1
self._value1 = value1
self._value2 = value2
def startElement(self, name, attrs):
if name == 'interval':
self._beginTime = int(attrs['begin'])
self._intervalBegins.append(self._beginTime)
self._unaggEdge2value2[self._beginTime] = {}
self._unaggEdge2value1[self._beginTime] = {}
if name == 'edge':
if self._net.knowsEdge(attrs['id']):
self._id = attrs['id']
if self._id not in self._edge2value2:
self._edge2value2[self._id] = 0
self._edge2value1[self._id] = 0
self._edge2no1[self._id] = 0
self._edge2no2[self._id] = 0
value1 = self._value1
if attrs.has_key(value1):
value1 = float(attrs[value1])
self._edge2no1[self._id] = self._edge2no1[self._id] + 1
else:
value1 = float(value1)
self._edge2value1[self._id] = self._edge2value1[self._id] + value1
self._unaggEdge2value1[self._beginTime][self._id] = value1
value2 = self._value2
if attrs.has_key(value2):
value2 = float(attrs[value2])
self._edge2no2[self._id] = self._edge2no2[self._id] + 1
else:
value2 = float(value2)
self._edge2value2[self._id] = self._edge2value2[self._id] + value2
self._unaggEdge2value2[self._beginTime][self._id] = value2
def updateExtrema(self, values1ByEdge, values2ByEdge):
for edge in values1ByEdge:
if self._minValue1==-1 or self._minValue1>values1ByEdge[edge]:
self._minValue1 = values1ByEdge[edge]
if self._maxValue1==-1 or self._maxValue1<values1ByEdge[edge]:
self._maxValue1 = values1ByEdge[edge]
if self._minValue2==-1 or self._minValue2>values2ByEdge[edge]:
self._minValue2 = values2ByEdge[edge]
if self._maxValue2==-1 or self._maxValue2<values2ByEdge[edge]:
self._maxValue2 = values2ByEdge[edge]
def valueDependantNorm(self, values, minV, maxV, tendency, percSpeed):
if tendency:
for edge in self._edge2value2:
if values[edge]<0:
values[edge] = 0
else:
values[edge] = 1
elif percSpeed:
for edge in self._edge2value2:
values[edge] = (values[edge] / self._net._edge2speed[edge])
elif minV!=maxV:
for edge in self._edge2value2:
values[edge] = (values[edge] - minV) / (maxV - minV)
def norm(self, tendency, percSpeed):
self._minValue1 = -1
self._maxValue1 = -1
self._minValue2 = -1
self._maxValue2 = -1
# compute mean value if join is set
if options.join:
for edge in self._edge2value2:
if float(self._edge2no1[edge])!=0:
self._edge2value1[edge] = float(self._edge2value1[edge]) / float(self._edge2no1[edge])
else:
self._edge2value1[edge] = float(self._edge2value1[edge])
if float(self._edge2no2[edge])!=0:
self._edge2value2[edge] = float(self._edge2value2[edge]) / float(self._edge2no2[edge])
else:
self._edge2value2[edge] = float(self._edge2value2[edge])
# compute min/max
if options.join:
self.updateExtrema(self._edge2value1, self._edge2value2)
else:
for i in weights._intervalBegins:
self.updateExtrema(self._unaggEdge2value1[i], self._unaggEdge2value2[i])
# norm
if options.verbose:
print "w range: " + str(self._minValue1) + " - " + str(self._maxValue1)
print "c range: " + str(self._minValue2) + " - " + str(self._maxValue2)
if options.join:
self.valueDependantNorm(self._edge2value1, self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._edge2value2, self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
else:
for i in weights._intervalBegins:
self.valueDependantNorm(self._unaggEdge2value1[i], self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._unaggEdge2value2[i], self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-d", "--dump", dest="dump",
help="dump file to use", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="(base) name for the output", metavar="FILE")
# data handling
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="sums up values from all read intervals")
optParser.add_option("-w", "--min-width", dest="min_width",
type="float", help="sets minimum line width")
optParser.add_option("-W", "--max-width", dest="max_width",
type="float", help="sets maximum line width")
optParser.add_option("-c", "--min-color", dest="min_color",
type="float", help="sets minimum color (between 0 and 1)")
optParser.add_option("-C", "--max-color", dest="max_color",
type="float", help="sets maximum color (between 0 and 1)")
optParser.add_option("--tendency-coloring", action="store_true", dest="tendency_coloring",
default=False, help="show only 0/1 color for egative/positive values")
optParser.add_option("--percentage-speed", action="store_true", dest="percentage_speed",
default=False, help="speed is normed to maximum allowed speed on an edge")
optParser.add_option("--values", dest="values",
type="string", default="entered,speed", help="which values shall be parsed")
optParser.add_option("--color-map", dest="colormap",
type="string", default="0:#ff0000,.5:#ffff00,1:#00ff00", help="Defines the color map")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# output
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows each plot after generating it")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
# init color map
colorMap = parseColorMap(options.colormap)
# read network
if options.verbose:
print "Reading net..."
parser = make_parser()
net = NetReader()
parser.setContentHandler(net)
parser.parse(options.net)
# read weights
if options.verbose:
print "Reading weights..."
mValues = options.values.split(",")
weights = WeightsReader(net, mValues[0], mValues[1])
parser.setContentHandler(weights)
parser.parse(options.dump)
# process
if options.verbose:
print "Norming weights..."
weights.norm(options.tendency_coloring, options.percentage_speed)
if options.verbose:
print "Plotting..."
net.plot(weights, options, colorMap)
|
gpl-3.0
| -7,227,673,384,518,771,000
| 38.935556
| 147
| 0.537032
| false
| 3.648934
| false
| false
| false
|
pyro-ppl/numpyro
|
test/test_pickle.py
|
1
|
1681
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pickle
import pytest
from jax import random, test_util
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import (
HMC,
HMCECS,
MCMC,
NUTS,
SA,
BarkerMH,
DiscreteHMCGibbs,
MixedHMC,
)
def normal_model():
numpyro.sample("x", dist.Normal(0, 1))
def bernoulli_model():
numpyro.sample("x", dist.Bernoulli(0.5))
def logistic_regression():
data = jnp.arange(10)
x = numpyro.sample("x", dist.Normal(0, 1))
with numpyro.plate("N", 10, subsample_size=2):
batch = numpyro.subsample(data, 0)
numpyro.sample("obs", dist.Bernoulli(logits=x), obs=batch)
@pytest.mark.parametrize("kernel", [BarkerMH, HMC, NUTS, SA])
def test_pickle_hmc(kernel):
mcmc = MCMC(kernel(normal_model), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
@pytest.mark.parametrize("kernel", [DiscreteHMCGibbs, MixedHMC])
def test_pickle_discrete_hmc(kernel):
mcmc = MCMC(kernel(HMC(bernoulli_model)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
def test_pickle_hmcecs():
mcmc = MCMC(HMCECS(NUTS(logistic_regression)), num_warmup=10, num_samples=10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
|
apache-2.0
| 3,439,322,928,740,963,300
| 26.557377
| 81
| 0.693635
| false
| 2.755738
| true
| false
| false
|
fabianp/pytron
|
setup.py
|
1
|
1329
|
from Cython.Distutils import build_ext
import numpy as np
from glob import glob
from setuptools import setup, Extension
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
sources =['pytron/tron.pyx', 'pytron/src/tron.cpp', 'pytron/src/tron_helper.cpp'] + \
glob('pytron/src/blas/*.c')
setup(
name='pytron',
description='Python bindings for TRON optimizer',
long_description=open('README.rst').read(),
version='0.3',
author='Fabian Pedregosa',
author_email='f@bianp.net',
url='http://pypi.python.org/pypi/pytron',
packages=['pytron'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Simplified BSD',
requires=['numpy', 'scipy'],
cmdclass={'build_ext': build_ext},
ext_modules=[Extension('pytron.tron',
sources=sources,
language='c++', include_dirs=[np.get_include(), 'pytron/src/'])],
)
|
bsd-3-clause
| 1,267,563,133,996,209,700
| 28.533333
| 85
| 0.684725
| false
| 3.553476
| false
| false
| false
|
meskio/bitmask_client
|
src/leap/bitmask/platform_init/locks.py
|
1
|
12103
|
# -*- coding: utf-8 -*-
# locks.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities for handling multi-platform file locking mechanisms
"""
import logging
import errno
import os
import platform
from leap.bitmask import platform_init
from leap.common.events import signal as signal_event
from leap.common.events import events_pb2 as proto
if platform_init.IS_UNIX:
from fcntl import flock, LOCK_EX, LOCK_NB
else: # WINDOWS
import datetime
import glob
import shutil
import time
from tempfile import gettempdir
from leap.bitmask.util import get_modification_ts, update_modification_ts
logger = logging.getLogger(__name__)
if platform_init.IS_UNIX:
class UnixLock(object):
"""
Uses flock to get an exclusive lock over a file.
See man 2 flock
"""
def __init__(self, path):
"""
iniializes t he UnixLock with the path of the
desired lockfile
"""
self._fd = None
self.path = path
def get_lock(self):
"""
Tries to get a lock, and writes the running pid there if successful
"""
gotit, pid = self._get_lock_and_pid()
return gotit
def get_pid(self):
"""
Returns the pid of the locking process
"""
gotit, pid = self._get_lock_and_pid()
return pid
def _get_lock(self):
"""
Tries to get a lock, returning True if successful
:rtype: bool
"""
self._fd = os.open(self.path, os.O_CREAT | os.O_RDWR)
try:
flock(self._fd, LOCK_EX | LOCK_NB)
except IOError as exc:
# could not get the lock
if exc.args[0] in (errno.EDEADLK, errno.EAGAIN):
# errno 11 or 35
# Resource temporarily unavailable
return False
else:
raise
return True
@property
def locked_by_us(self):
"""
Returns True if the pid in the pidfile
is ours.
:rtype: bool
"""
gotit, pid = self._get_lock_and_pid()
return pid == os.getpid()
def _get_lock_and_pid(self):
"""
Tries to get a lock over the file.
Returns (locked, pid) tuple.
:rtype: tuple
"""
if self._get_lock():
self._write_to_pidfile()
return True, None
return False, self._read_from_pidfile()
def _read_from_pidfile(self):
"""
Tries to read pid from the pidfile,
returns False if no content found.
"""
pidfile = os.read(
self._fd, 16)
if not pidfile:
return False
try:
return int(pidfile.strip())
except Exception as exc:
exc.args += (pidfile, self.lock_file)
raise
def _write_to_pidfile(self):
"""
Writes the pid of the running process
to the pidfile
"""
fd = self._fd
os.ftruncate(fd, 0)
os.write(fd, '%d\n' % os.getpid())
os.fsync(fd)
if platform_init.IS_WIN:
# Time to wait (in secs) before assuming a raise window signal has not been
# ack-ed.
RAISE_WINDOW_TIMEOUT = 2
# How many steps to do while checking lockfile ts update.
RAISE_WINDOW_WAIT_STEPS = 10
def _release_lock(name):
"""
Tries to remove a folder path.
:param name: folder lock to remove
:type name: str
"""
try:
shutil.rmtree(name)
return True
except WindowsError as exc:
if exc.errno in (errno.EPIPE, errno.ENOENT,
errno.ESRCH, errno.EACCES):
logger.warning(
'exception while trying to remove the lockfile dir')
logger.warning('errno %s: %s' % (exc.errno, exc.args[1]))
# path does not exist
return False
else:
logger.debug('errno = %s' % (exc.errno,))
# we did not foresee this error, better add it explicitely
raise
class WindowsLock(object):
"""
Creates a lock based on the atomic nature of mkdir on Windows
system calls.
"""
LOCKBASE = os.path.join(gettempdir(), "bitmask-lock")
def __init__(self):
"""
Initializes the lock.
Sets the lock name to basename plus the process pid.
"""
self._fd = None
pid = os.getpid()
self.name = "%s-%s" % (self.LOCKBASE, pid)
self.pid = pid
def get_lock(self):
"""
Tries to get a lock, and writes the running pid there if successful
"""
gotit = self._get_lock()
return gotit
def _get_lock(self):
"""
Tries to write to a file with the current pid as part of the name
"""
try:
self._fd = os.makedirs(self.name)
except OSError as exc:
# could not create the dir
if exc.args[0] == 183:
logger.debug('cannot create dir')
# cannot create dir with existing name
return False
else:
raise
return self._is_one_pidfile()[0]
def _is_one_pidfile(self):
"""
Returns True, pid if there is only one pidfile with the expected
base path
:rtype: tuple
"""
pidfiles = glob.glob(self.LOCKBASE + '-*')
if len(pidfiles) == 1:
pid = pidfiles[0].split('-')[-1]
return True, int(pid)
else:
return False, None
def get_pid(self):
"""
Returns the pid of the locking process.
:rtype: int
"""
# XXX assert there is only one?
_, pid = self._is_one_pidfile()
return pid
def get_locking_path(self):
"""
Returns the pid path of the locking process.
:rtype: str
"""
pid = self.get_pid()
if pid:
return "%s-%s" % (self.LOCKBASE, pid)
def release_lock(self, name=None):
"""
Releases the pidfile dir for this process, by removing it.
"""
if not name:
name = self.name
_release_lock(name)
@classmethod
def release_all_locks(self):
"""
Releases all locks. Used for clean shutdown.
"""
for lockdir in glob.glob("%s-%s" % (self.LOCKBASE, '*')):
_release_lock(lockdir)
@property
def locked_by_us(self):
"""
Returns True if the pid in the pidfile
is ours.
:rtype: bool
"""
_, pid = self._is_one_pidfile()
return pid == self.pid
def update_ts(self):
"""
Updates the timestamp of the lock.
"""
if self.locked_by_us:
update_modification_ts(self.name)
def write_port(self, port):
"""
Writes the port for windows control to the pidfile folder
Returns True if successful.
:rtype: bool
"""
if not self.locked_by_us:
logger.warning("Tried to write control port to a "
"non-unique pidfile folder")
return False
port_file = os.path.join(self.name, "port")
with open(port_file, 'w') as f:
f.write("%s" % port)
return True
def get_control_port(self):
"""
Reads control port of the main instance from the port file
in the pidfile dir
:rtype: int
"""
pid = self.get_pid()
port_file = os.path.join(self.LOCKBASE + "-%s" % pid, "port")
port = None
try:
with open(port_file) as f:
port_str = f.read()
port = int(port_str.strip())
except IOError as exc:
if exc.errno == errno.ENOENT:
logger.error("Tried to read port from non-existent file")
else:
# we did not know explicitely about this error
raise
return port
def raise_window_ack():
"""
This function is called from the windows callback that is registered
with the raise_window event. It just updates the modification time
of the lock file so we can signal an ack to the instance that tried
to raise the window.
"""
lock = WindowsLock()
lock.update_ts()
def we_are_the_one_and_only():
"""
Returns True if we are the only instance running, False otherwise.
If we came later, send a raise signal to the main instance of the
application.
Under windows we are not using flock magic, so we wait during
RAISE_WINDOW_TIMEOUT time, if not ack is
received, we assume it was a stalled lock, so we remove it and continue
with initialization.
:rtype: bool
"""
_sys = platform.system()
if _sys in ("Linux", "Darwin"):
locker = UnixLock('/tmp/bitmask.lock')
locker.get_lock()
we_are_the_one = locker.locked_by_us
if not we_are_the_one:
signal_event(proto.RAISE_WINDOW)
return we_are_the_one
elif _sys == "Windows":
locker = WindowsLock()
locker.get_lock()
we_are_the_one = locker.locked_by_us
if not we_are_the_one:
locker.release_lock()
lock_path = locker.get_locking_path()
ts = get_modification_ts(lock_path)
nowfun = datetime.datetime.now
t0 = nowfun()
pause = RAISE_WINDOW_TIMEOUT / float(RAISE_WINDOW_WAIT_STEPS)
timeout_delta = datetime.timedelta(0, RAISE_WINDOW_TIMEOUT)
check_interval = lambda: nowfun() - t0 < timeout_delta
# let's assume it's a stalled lock
we_are_the_one = True
signal_event(proto.RAISE_WINDOW)
while check_interval():
if get_modification_ts(lock_path) > ts:
# yay! someone claimed their control over the lock.
# so the lock is alive
logger.debug('Raise window ACK-ed')
we_are_the_one = False
break
else:
time.sleep(pause)
if we_are_the_one:
# ok, it really was a stalled lock. let's remove all
# that is left, and put only ours there.
WindowsLock.release_all_locks()
WindowsLock().get_lock()
return we_are_the_one
else:
logger.warning("Multi-instance checker "
"not implemented for %s" % (_sys))
# lies, lies, lies...
return True
|
gpl-3.0
| -3,435,159,310,197,118,000
| 29.032258
| 79
| 0.512683
| false
| 4.397892
| false
| false
| false
|
RPGOne/Skynet
|
pytorch-master/torch/nn/parallel/data_parallel.py
|
1
|
4062
|
import torch
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
class DataParallel(Module):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids, output_device=None, dim=0, module_kwargs=None):
"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
replicas = replicate(module, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, inputs, module_kwargs)
return gather(outputs, output_device, dim)
|
bsd-3-clause
| -7,975,203,935,464,098,000
| 38.436893
| 93
| 0.679468
| false
| 4.074223
| false
| false
| false
|
dkgroot/remotecontrol_skinny_phone
|
tests/conftest.py
|
1
|
1313
|
import pytest
import re
from SccpLogger.SccpLogger import SccpLogger, EOF, TIMEOUT
def handle_tone(ssh, index, child_result_list):
tone = int(ssh.match.group(2))
dir = int(ssh.match.group(3))
return 'Tone', {'State':ssh.match.group(1).decode('utf-8'),'Type':sccp.lookup_tone(tone),'Direction':sccp.lookup_tonedirection(dir)}
@pytest.yield_fixture(scope="session", autouse=True, params=["10.15.15.205"])
def sccplogger(request):
options={'username':'cisco','password':'cisco','shelluser':'default','shellpasswd':'user','logfilename':'output.log'}
#hostname = '10.15.15.205'
hostname = request.param
_logger = SccpLogger(hostname, options)
try:
print("\nconnecting to %s..." %hostname)
_logger.connect()
print("connected")
_logger.login()
print('logged in to shell. setting up debug environment...')
_logger.setup_debug()
print('starting strace...')
_logger.start_strace()
print('ready to process events...\n')
yield _logger
#_logger.disconnect()
except TIMEOUT:
print("Connection timed out")
except EOF:
print("Disconnect from phone")
except KeyboardInterrupt:
print("Interrupted by User")
except Exception as e:
print("Exception occured: %s" %e)
|
gpl-3.0
| -8,339,181,251,816,196,000
| 36.514286
| 135
| 0.642803
| false
| 3.688202
| false
| false
| false
|
alt-core/sheetbot
|
models.py
|
1
|
5113
|
# coding: utf-8
import json
import random
import string
from google.appengine.ext import ndb
class GlobalBotVariables(ndb.Model):
scenario_uri = ndb.StringProperty()
class GroupMembers(ndb.Model):
members = ndb.StringProperty(repeated=True)
class PlayerStatus(ndb.Model):
scene = ndb.StringProperty()
scene_history = ndb.StringProperty(repeated=True)
action_token = ndb.StringProperty()
value = ndb.TextProperty()
class PlayerStatusDB(object):
MAX_HISTORY = 5 # ヒストリーは最大5つまで
def __init__(self, user_id):
self.id = user_id
self.entry = PlayerStatus.get_by_id(user_id)
if self.entry:
self.db = json.loads(self.entry.value) or {}
else:
self.entry = PlayerStatus(id=user_id, scene="*start", value="{}")
self.db = {}
self.is_dirty = False
self.is_values_dirty = False
if self.action_token is None:
self.renew_action_token()
def __getitem__(self, item):
value = self.db[item]
return value
def __setitem__(self, item, value):
if isinstance(value, list) or isinstance(value, dict):
is_ref = True
else:
is_ref = False
if item not in self.db or (self.db[item] != value or is_ref):
# 参照型は直接中身を書き換えられてしまうと更新チェックができないので、保守的に倒す
self.db[item] = value
self.is_dirty = True
self.is_values_dirty = True
def __delitem__(self, item):
del self.db[item]
self.is_dirty = True
self.is_values_dirty = True
def __contains__(self, item):
return item in self.db
def keys(self):
return self.db.keys()
def get(self, item, default=None):
if item in self:
return self[item]
else:
return default
def reset(self):
self.db = {}
self.entry.scene = None
self.entry.scene_history = []
self.is_dirty = True
self.is_values_dirty = True
self.renew_action_token()
@property
def scene(self):
return self.entry.scene
@scene.setter
def scene(self, value):
self.entry.scene = value
self.is_dirty = True
@property
def scene_history(self):
return self.entry.scene_history
@scene_history.setter
def scene_history(self, value):
self.entry.scene_history = value
self.is_dirty = True
def push_scene_history(self, scene_title):
if scene_title is not None:
scene_history = self.scene_history
scene_history.append(scene_title)
self.scene_history = scene_history[-PlayerStatusDB.MAX_HISTORY:]
def pop_scene_history(self):
if len(self.scene_history) > 0:
return self.scene_history.pop()
return None
@property
def action_token(self):
return self.entry.action_token
@action_token.setter
def action_token(self, value):
self.entry.action_token = value
self.is_dirty = True
def renew_action_token(self):
self.action_token = \
u''.join([random.choice(string.ascii_letters) for _ in range(8)])
def __str__(self):
return str(self.db)
def save(self):
if self.is_dirty:
if self.is_values_dirty:
self.entry.value = json.dumps(self.db)
self.entry.put()
class GroupDB(object):
def __init__(self, group_id):
self.entry = GroupMembers.get_by_id(id=group_id)
if self.entry is None:
self.entry = GroupMembers(id=group_id, members=[])
def append_member(self, member):
if member not in self.entry.members:
self.entry.members.append(member)
self.entry.put()
def remove_member(self, member):
if member in self.entry.members:
self.entry.members.remove(member)
self.entry.put()
def clear(self):
if self.entry.members:
del self.entry.members[:]
self.entry.put()
class ImageFileStatDB(ndb.Model):
file_digest = ndb.StringProperty()
width = ndb.IntegerProperty()
height = ndb.IntegerProperty()
@classmethod
def get_cached_image_file_stat(cls, kind, image_url):
key = u'{}|{}'.format(kind, image_url)
stat = cls.get_by_id(id=key)
if stat is None:
return None
size = (stat.width, stat.height)
return stat.file_digest, size
@classmethod
def put_cached_image_file_stat(cls, kind, image_url, file_digest, size):
key = u'{}|{}'.format(kind, image_url)
entry = cls.get_by_id(id=key)
if entry is None:
entry = cls(id=key, file_digest=file_digest, width=size[0], height=size[1])
else:
if entry.file_digest == file_digest:
# 更新しない
return
entry.file_digest = file_digest
entry.width, entry.height = size
entry.put()
|
mit
| 7,030,397,467,821,336,000
| 26.478022
| 87
| 0.581684
| false
| 3.579814
| false
| false
| false
|
gem/oq-svir-qgis
|
svir/dialogs/load_hcurves_as_layer_dialog.py
|
1
|
6001
|
# -*- coding: utf-8 -*-
# /***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2014 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from qgis.core import (
QgsFeature, QgsGeometry, QgsPointXY, edit, QgsTask, QgsApplication)
from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog
from svir.utilities.utils import log_msg, WaitCursorManager
from svir.tasks.extract_npz_task import ExtractNpzTask
class LoadHazardCurvesAsLayerDialog(LoadOutputAsLayerDialog):
"""
Dialog to load hazard curves from an oq-engine output, as layer
"""
def __init__(self, drive_engine_dlg, iface, viewer_dock, session, hostname,
calc_id, output_type='hcurves', path=None, mode=None,
engine_version=None, calculation_mode=None):
assert output_type == 'hcurves'
LoadOutputAsLayerDialog.__init__(
self, drive_engine_dlg, iface, viewer_dock, session, hostname,
calc_id, output_type=output_type, path=path, mode=mode,
engine_version=engine_version, calculation_mode=calculation_mode)
self.setWindowTitle(
'Load hazard curves as layer')
self.create_num_sites_indicator()
self.create_rlz_or_stat_selector(all_ckb=True)
self.create_imt_selector(all_ckb=True)
self.load_all_rlzs_or_stats_chk.setChecked(True)
self.load_all_imts_chk.setChecked(True)
log_msg('Extracting hazard curves.'
' Watch progress in QGIS task bar',
level='I', message_bar=self.iface.messageBar())
self.extract_npz_task = ExtractNpzTask(
'Extract hazard curves', QgsTask.CanCancel, self.session,
self.hostname, self.calc_id, self.output_type, self.finalize_init,
self.on_extract_error)
QgsApplication.taskManager().addTask(self.extract_npz_task)
def set_ok_button(self):
self.ok_button.setEnabled(True)
def populate_dataset(self):
self.dataset = self.npz_file['all']
def populate_rlz_or_stat_cbx(self):
self.rlzs_or_stats = self.npz_file['all'].dtype.names[2:]
for rlz_or_stat in self.rlzs_or_stats:
self.rlz_or_stat_cbx.addItem(rlz_or_stat)
def on_rlz_or_stat_changed(self):
rlz_or_stat = self.rlz_or_stat_cbx.currentText()
dataset = self.npz_file['all'][rlz_or_stat]
self.imts = [imt for imt in dataset.dtype.names]
self.imt_cbx.clear()
for imt in self.imts:
self.imt_cbx.addItem(imt)
def on_imt_changed(self):
self.set_ok_button()
def show_num_sites(self):
self.num_sites_lbl.setText(
self.num_sites_msg % self.dataset.shape)
def populate_out_dep_widgets(self):
self.populate_rlz_or_stat_cbx()
self.populate_dataset()
self.show_num_sites()
def build_layer_name(self, **kwargs):
investigation_time = self.get_investigation_time()
layer_name = "hcurves_%sy" % investigation_time
return layer_name
def get_field_types(self, **kwargs):
field_types = {}
for rlz_or_stat in self.rlzs_or_stats:
if (not self.load_all_rlzs_or_stats_chk.isChecked()
and rlz_or_stat != self.rlz_or_stat_cbx.currentText()):
continue
for imt in self.dataset[rlz_or_stat].dtype.names:
if (not self.load_all_imts_chk.isChecked()
and imt != self.imt_cbx.currentText()):
continue
for iml in self.dataset[rlz_or_stat][imt].dtype.names:
field_name = "%s_%s_%s" % (rlz_or_stat, imt, iml)
# NOTE: assuming that all fields are numeric
field_types[field_name] = 'F'
return field_types
def on_iml_changed(self):
self.set_ok_button()
def read_npz_into_layer(self, field_names, **kwargs):
with edit(self.layer):
lons = self.npz_file['all']['lon']
lats = self.npz_file['all']['lat']
feats = []
for row_idx, row in enumerate(self.dataset):
feat = QgsFeature(self.layer.fields())
for field_name_idx, field_name in enumerate(field_names):
rlz_or_stat, imt, iml = field_name.split('_')
poe = row[rlz_or_stat][imt][iml].item()
feat.setAttribute(field_name, poe)
feat.setGeometry(QgsGeometry.fromPointXY(
QgsPointXY(lons[row_idx], lats[row_idx])))
feats.append(feat)
added_ok = self.layer.addFeatures(feats)
if not added_ok:
msg = 'There was a problem adding features to the layer.'
log_msg(msg, level='C', message_bar=self.iface.messageBar())
def load_from_npz(self):
with WaitCursorManager('Creating layer...', self.iface.messageBar()):
self.build_layer()
self.style_curves()
|
agpl-3.0
| 54,562,013,012,367,380
| 41.864286
| 79
| 0.588402
| false
| 3.656917
| false
| false
| false
|
cliali/py2
|
test.py
|
1
|
11812
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import telebot
from telebot import types
from telebot import util
import re
import time
from time import sleep
import sys
import json
import os
import logging
import subprocess
import requests
import requests as req
import random
from random import randint
import base64
import urllib
from urllib import urlretrieve as dw
import urllib2
import redis
reload(sys)
sys.setdefaultencoding("utf-8")
#########################################################################################################################################################################
TOKEN = '277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE'
bot = telebot.TeleBot(TOKEN)
is_sudo = '242361127'
redis = redis.StrictRedis(host='localhost', port=6379, db=0)
f = "\n \033[01;30m Bot Firstname: {} \033[0m".format(bot.get_me().first_name)
u = "\n \033[01;34m Bot Username: {} \033[0m".format(bot.get_me().username)
i = "\n \033[01;32m Bot ID: {} \033[0m".format(bot.get_me().id)
c = "\n \033[01;31m Bot Is Online Now! \033[0m"
print(f + u + i + c)
bn = "\n Bot Firstname: {} ".format(bot.get_me().first_name)
bu = "\n Bot Username: {} ".format(bot.get_me().username)
bi = "\n Bot ID: {} ".format(bot.get_me().id)
bc = "\n Bot Is Online Now!"
bot.send_message(is_sudo, 'ًں‘‹\n{} {} {} {}'.format(bn,bu,bi,bc))
#########################################################################################################################################################################
markupstart = types.InlineKeyboardMarkup()
markupstart.add(types.InlineKeyboardButton('ًں‡®ًں‡·ظپط§ط±ط³غŒًں‡®ًں‡·', callback_data='farsi'))
markupstart.add(types.InlineKeyboardButton('ًں‡؛ًں‡¸Englishًں‡؛ًں‡¸', callback_data='english'))
markupstartfa = types.InlineKeyboardMarkup()
#markupstartfa.add(types.InlineKeyboardButton()
#markupstartfa.add(types.InlineKeyboardButton()
#markupstartfa.add(types.InlineKeyboardButton()
markupstartfa.add(types.InlineKeyboardButton('ط²ظ…ط§ظ†', callback_data='timefa'))
markupstartfa.add(types.InlineKeyboardButton('ط±ظپطھظ† ط¨ظ‡ طط§ظ„طھ ط§غŒظ†ظ„ط§غŒظ†', switch_inline_query=''))
markupstarten = types.InlineKeyboardMarkup()
#markupstarten.add(types.InlineKeyboardButton()
#markupstarten.add(types.InlineKeyboardButton()
#markupstarten.add(types.InlineKeyboardButton()
markupstarten.add(types.InlineKeyboardButton('date', callback_data='timeen'))
markupstarten.add(types.InlineKeyboardButton('Inline mode', switch_inline_query=''))
markupback = types.InlineKeyboardMarkup()
markupback.add(types.InlineKeyboardButton('ًں”™ط¨ط±ع¯ط´طھ', callback_data='backfa'))
markupbacken = types.InlineKeyboardMarkup()
markupbacken.add(types.InlineKeyboardButton('ًں”™Back', callback_data='backen'))
markupreload = types.InlineKeyboardMarkup()
markupreload.add(types.InlineKeyboardButton('ًں”ƒreload', callback_data='reload'))
markupredatefa = types.InlineKeyboardMarkup()
markupredatefa.add(types.InlineKeyboardButton('ط¨ط±ظˆط² ع©ط±ط¯ظ†', callback_data='refa'))
markupredateen = types.InlineKeyboardMarkup()
markupredateen.add(types.InlineKeyboardButton('refersh', callback_data='reen'))
@bot.message_handler(commands=['start'])
def start(message):
id = message.chat.id
redis.sadd('startmebot',id)
if redis.hget("lang:{}".format(message.chat.id),"farsi"):
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
elif redis.hget("lang:{}".format(message.chat.id),"english"):
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
else:
bot.send_message(message.chat.id, '*Hi*_\nWelcome To TestBot_*\nPlease Select Your Language*\n`\nط³ظ„ط§ظ…\nط¨ظ‡ ط±ظˆط¨ط§طھ طھط³طھ ط®ظˆط´ ط¢ظ…ط¯غŒط¯\nظ„ط·ظپط§ ط²ط¨ط§ظ† ط®ظˆط¯ ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯`', parse_mode='markdown', reply_markup=markupstart)
@bot.message_handler(commands=['reload'])
def reload(m):
cid = m.chat.id
bot.send_message(cid, 'reload command:', reply_markup=markupreload)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.message:
if call.data == "farsi":
redis.hset("lang:{}".format(call.message.chat.id),"farsi",True)
redis.hdel("lang:{}".format(call.message.chat.id),"english")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="ط²ط¨ط§ظ† ط´ظ…ط§ ط¨ط§ ظ…ظˆظپظ‚غŒطھ ط¨ظ‡ ظپط§ط±ط³غŒ ط§ظ†طھط®ط§ط¨ ط´ط¯\n\nظ„ط·ظپط§ غŒع©ط¯ط§ظ… ط§ط² ط¯ع©ظ…ظ‡ ظ‡ط§غŒ ط²غŒط± ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯ًں‘‡", reply_markup=markupstartfa)
bot.answer_callback_query(callback_query_id=call.id,text="ط®ظˆط´ ط¢ظ…ط¯غŒط¯ًںکٹ")
if call.message:
if call.data == "english":
redis.hset("lang:{}".format(call.message.chat.id),"english",True)
redis.hdel("lang:{}".format(call.message.chat.id),"farsi")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Your language selected to englishًں‡؛ًں‡¸\nPlease select one of the buttonًں‘‡", reply_markup=markupstarten)
bot.answer_callback_query(callback_query_id=call.id,text="Wellcomeًںکٹ")
if call.message:
if call.data == "backfa":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="ط¨ظ‡ ط¹ظ‚ط¨ ط¨ط±ع¯ط´طھغŒط¯ًں”™\n\nظ„ط·ظپط§ غŒع©ط¯ط§ظ… ط§ط² ط¯ع©ظ…ظ‡ ظ‡ط§غŒ ط²غŒط± ط±ط§ ط§ظ†طھط®ط§ط¨ ع©ظ†غŒط¯ًں‘‡", reply_markup=markupstartfa)
bot.answer_callback_query(callback_query_id=call.id, text="ط¨ظ‡ ط¹ظ‚ط¨ ط¨ط±ع¯ط´طھغŒط¯ًں”™")
if call.message:
if call.data == "backen":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Come backedًں”™\nPlease select one of the buttonًں‘‡", reply_markup=markupstarten)
bot.answer_callback_query(callback_query_id=call.id, text="Come backedًں”™")
if call.message:
if call.data == "reload":
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [â–†____________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆___________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆__________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆_________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆________]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆_______]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆______]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆_____]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆____]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆___]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆__]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆▆_]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reload: [▆▆▆▆▆▆▆▆▆▆▆▆▆]")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="reloaded!", reply_markup=markupreload)
if call.message:
if call.data == "timefa":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
FAtime = parsed_jsona['FAtime']
FAdate = parsed_jsona['FAdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="طھط§ط±غŒط®: {} \nط³ط§ط¹طھ: {}".format(FAdate,FAtime), reply_markup=markupredatefa)
if call.message:
if call.data == "timeen":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
ENtime = parsed_jsona['ENtime']
ENdate = parsed_jsona['ENdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="date: {} \ntime: {}".format(ENdate,ENtime), reply_markup=markupredateen)
if call.message:
if call.data == "refa":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
FAtime = parsed_jsona['FAtime']
FAdate = parsed_jsona['FAdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="طھط§ط±غŒط®: {} \nط³ط§ط¹طھ: {}".format(FAdate,FAtime), reply_markup=markupredatefa)
if call.message:
if call.data == "reen":
reqa = urllib2.Request('http://api.gpmod.ir/time/')
openera = urllib2.build_opener()
fa = openera.open(reqa)
parsed_jsona = json.loads(fa.read())
ENtime = parsed_jsona['ENtime']
ENdate = parsed_jsona['ENdate']
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="date: {} \ntime: {}".format(ENdate,ENtime), reply_markup=markupredateen)
bot.polling(none_stop=True, timeout=20)
|
apache-2.0
| 4,858,250,203,884,125,000
| 62.588957
| 283
| 0.629084
| false
| 2.142886
| false
| false
| false
|
th0ma5w/pyPartOfSpeech
|
POSTagger.py
|
1
|
2826
|
"""
* pyPOS
*
* Python Version Copyright 2011 Thomas Winningham
* Javascript Version and Comments Copyright 2010, Percy Wegmann
* Licensed under the LGPLv3 license
* http://www.opensource.org/licenses/lgpl-3.0.html
Parts of Speech Tagger
"""
from Lexicon import POSTAGGER_LEXICON
def is_number(s):
'''Simple test of string for number'''
try:
float(s)
return True
except ValueError:
return False
class POSTagger:
def __init__(self):
global POSTAGGER_LEXICON
self.lexicon = POSTAGGER_LEXICON
def wordInLexicon(self,word):
'''Test if the word exists in the lexicon'''
if self.lexicon.has_key(word):
return True
# 1/22/2002 mod (from Lisp code): if not in hash, try lower case:
else:
if self.lexicon.has_key(word.lower()):
return True
return False
def tag(self,words):
'''Tag a list of words'''
ret=[None for x in range(len(words))]
for x in range(len(words)):
ss = False
word = words[x]
if self.lexicon.has_key(word):
ss = self.lexicon[word]
# 1/22/2002 mod (from Lisp code): if not in hash, try lower case:
if not ss:
word = word.lower()
if self.lexicon.has_key(word):
ss = self.lexicon[word]
if (not ss and len(word) == 1):
ret[x] = words[x] + "^"
if not ss:
ret[x] = "NN"
else:
ret[x] = ss[0]
#Apply transformational rules
for x in range(len(words)):
word=ret[x]
# rule 1: DT, {VBD | VBP} --> DT, NN
if x > 0 and ret[x-1] == "DT":
if word == "VBD" or word == "VBP" or word == "VB":
ret[x] = "NN"
# rule 2: convert a noun to a number (CD) if "." appears in the word
if word[0] == "N":
if words[x].__contains__('.'):
ret[x] = "CD"
if is_number(words[x]):
ret[x] = "CD"
# rule 3: convert a noun to a past participle if words[i] ends with "ed"
if ret[x][0] == "N" and words[x][-2:] == "ed":
ret[x] = "VBN"
# rule 4: convert any type to adverb if it ends in "ly";
if ret[x][-2:] == 'ly':
ret[x] = "RB"
# rule 5: convert a common noun (NN or NNS) to a adjective if it ends with "al"
if ret[x][:2] == "NN" and ret[x][-2:] == "al":
ret[x] = ' '.join(str(x),"JJ")
# rule 6: convert a noun to a verb if the preceding work is "would"
if x > 0 and ret[x][:2] == "NN" and words[x-1].lower() == "would":
ret[x] = "VB"
# rule 7: if a word has been categorized as a common noun and it ends with "s",
# then set its type to plural common noun (NNS)
if ret[x] == "NN" and words[x][-1:] == "s":
ret[x] = "NNS"
# rule 8: convert a common noun to a present participle verb (i.e., a gerund)
if ret[x] == "NN" and words[x][-3:] == "ing":
ret[x] = "VBG"
result = zip(words,ret)
return result
if __name__ == "__main__":
print POSTagger().tag(["i", "went", "to", "the", "store", "to", "buy", "5.2", "gallons", "of", "milk"])
|
lgpl-3.0
| 7,371,887,207,909,501,000
| 29.387097
| 104
| 0.591295
| false
| 2.626394
| false
| false
| false
|
facebookexperimental/eden
|
eden/hg-server/edenscm/hgext/infinitepush/__init__.py
|
1
|
13116
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Infinite push
""" store draft commits in the cloud
Configs::
[infinitepush]
# Server-side and client-side option. Pattern of the infinitepush bookmark
branchpattern = PATTERN
# Server or client
server = False
# Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
indextype = disk
# Server-side option. Used only if indextype=sql.
# Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
# Server-side option. Used only if indextype=disk.
# Filesystem path to the index store
indexpath = PATH
# Server-side option. Possible values: 'disk' or 'external'
# Fails if not set
storetype = disk
# Server-side option.
# Path to the binary that will save bundle to the bundlestore
# Formatted cmd line will be passed to it (see `put_args`)
put_binary = put
# Server-side option. Used only if storetype=external.
# Format cmd-line string for put binary. Placeholder: {filename}
put_args = {filename}
# Server-side option.
# Path to the binary that get bundle from the bundlestore.
# Formatted cmd line will be passed to it (see `get_args`)
get_binary = get
# Server-side option. Used only if storetype=external.
# Format cmd-line string for get binary. Placeholders: {filename} {handle}
get_args = {filename} {handle}
# Server-side option
logfile = FIlE
# Server-side option
loglevel = DEBUG
# Server-side option. Used only if indextype=sql.
# Sets mysql wait_timeout option.
waittimeout = 300
# Server-side option. Used only if indextype=sql.
# Sets mysql innodb_lock_wait_timeout option.
locktimeout = 120
# Server-side option. Used only if indextype=sql.
# limit number of days to generate warning on trying to
# fetch too old commit for hg up / hg pull with short hash rev
shorthasholdrevthreshold = 31
# Server-side option. Used only if indextype=sql.
# Name of the repository
reponame = ''
# Server-side option. Used only if storetype=sql
# Whether or not to record new bundles into the forwardfillqueue table.
forwardfill = False
# Server-side option. Used only if storetype=sql
# Whether or not to record new scratch bookmarks into the
# replaybookmarksqueue table.
replaybookmarks = False
# Client-side option. Used by --list-remote option. List of remote scratch
# patterns to list if no patterns are specified.
defaultremotepatterns = ['*']
# Server-side option. If bookmark that was pushed matches
# `fillmetadatabranchpattern` then background
# `hg debugfillinfinitepushmetadata` process will save metadata
# in infinitepush index for nodes that are ancestor of the bookmark.
fillmetadatabranchpattern = ''
# Instructs infinitepush to forward all received bundle2 parts to the
# bundle for storage. Defaults to False.
storeallparts = True
# Server-side option. Maximum acceptable bundle size in megabytes.
maxbundlesize = 500
# Which compression algorithm to use for infinitepush bundles.
bundlecompression = ZS
[remotenames]
# Client-side option
# This option should be set only if remotenames extension is enabled.
# Whether remote bookmarks are tracked by remotenames extension.
bookmarks = True
"""
from __future__ import absolute_import
from edenscm.mercurial import (
bundle2,
changegroup,
discovery,
error,
extensions,
node as nodemod,
pycompat,
util,
)
from edenscm.mercurial.i18n import _
from . import bundleparts, bundlestore, client, common, infinitepushcommands, server
cmdtable = infinitepushcommands.cmdtable
colortable = {
"commitcloud.changeset": "green",
"commitcloud.meta": "bold",
"commitcloud.commitcloud": "yellow",
}
def reposetup(ui, repo):
common.reposetup(ui, repo)
if common.isserver(ui) and repo.local():
repo.bundlestore = bundlestore.bundlestore(repo)
def uisetup(ui):
# remotenames circumvents the default push implementation entirely, so make
# sure we load after it so that we wrap it.
order = extensions._order
order.remove("infinitepush")
order.append("infinitepush")
extensions._order = order
# Register bundleparts capabilities and handlers.
bundleparts.uisetup(ui)
def extsetup(ui):
common.extsetup(ui)
if common.isserver(ui):
server.extsetup(ui)
else:
client.extsetup(ui)
def _deltaparent(orig, self, revlog, rev, p1, p2, prev):
# This version of deltaparent prefers p1 over prev to use less space
dp = revlog.deltaparent(rev)
if dp == nodemod.nullrev and not revlog.storedeltachains:
# send full snapshot only if revlog configured to do so
return nodemod.nullrev
return p1
def _createbundler(ui, repo, other):
bundler = bundle2.bundle20(ui, bundle2.bundle2caps(other))
compress = ui.config("infinitepush", "bundlecompression", "UN")
bundler.setcompression(compress)
# Disallow pushback because we want to avoid taking repo locks.
# And we don't need pushback anyway
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, allowpushback=False))
bundler.newpart("replycaps", data=pycompat.encodeutf8(capsblob))
return bundler
def _sendbundle(bundler, other):
stream = util.chunkbuffer(bundler.getchunks())
try:
reply = other.unbundle(stream, [b"force"], other.url())
# Look for an error part in the response. Note that we don't apply
# the reply bundle, as we're not expecting any response, except maybe
# an error. If we receive any extra parts, that is an error.
for part in reply.iterparts():
if part.type == "error:abort":
raise bundle2.AbortFromPart(
part.params["message"], hint=part.params.get("hint")
)
elif part.type == "reply:changegroup":
pass
else:
raise error.Abort(_("unexpected part in reply: %s") % part.type)
except error.BundleValueError as exc:
raise error.Abort(_("missing support for %s") % exc)
def pushbackupbundle(ui, repo, other, outgoing, bookmarks):
"""
push a backup bundle to the server
Pushes an infinitepush bundle containing the commits described in `outgoing`
and the bookmarks described in `bookmarks` to the `other` server.
"""
# Wrap deltaparent function to make sure that bundle takes less space
# See _deltaparent comments for details
extensions.wrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
try:
bundler = _createbundler(ui, repo, other)
bundler.addparam("infinitepush", "True")
pushvarspart = bundler.newpart("pushvars")
pushvarspart.addparam("BYPASS_READONLY", "True", mandatory=False)
backup = False
if outgoing and not outgoing.missing and not bookmarks:
ui.status(_("nothing to back up\n"))
return True
if outgoing and outgoing.missing:
backup = True
parts = bundleparts.getscratchbranchparts(
repo,
other,
outgoing,
confignonforwardmove=False,
ui=ui,
bookmark=None,
create=False,
bookmarknode=None,
)
for part in parts:
bundler.addpart(part)
if bookmarks:
backup = True
bundler.addpart(bundleparts.getscratchbookmarkspart(other, bookmarks))
if backup:
_sendbundle(bundler, other)
return backup
finally:
extensions.unwrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
def pushbackupbundlewithdiscovery(ui, repo, other, heads, bookmarks):
if heads:
with ui.configoverride({("remotenames", "fastheaddiscovery"): False}):
outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=heads)
else:
outgoing = None
return pushbackupbundle(ui, repo, other, outgoing, bookmarks)
def isbackedupnodes(getconnection, nodes):
"""
check on the server side if the nodes are backed up using 'known' or 'knownnodes' commands
"""
with getconnection() as conn:
if "knownnodes" in conn.peer.capabilities():
return conn.peer.knownnodes([nodemod.bin(n) for n in nodes])
else:
return conn.peer.known([nodemod.bin(n) for n in nodes])
def pushbackupbundledraftheads(ui, repo, getconnection, heads):
"""
push a backup bundle containing non-public heads to the server
Pushes an infinitepush bundle containing the commits that are non-public
ancestors of `heads`, to the `other` server.
"""
if heads:
# Calculate the commits to back-up. The bundle needs to cleanly
# apply to the server, so we need to include the whole draft stack.
commitstobackup = [
ctx.node() for ctx in repo.set("not public() & ::%ln", heads)
]
# Calculate the parent commits of the commits we are backing up.
# These are the public commits that should be on the server.
parentcommits = [
ctx.node() for ctx in repo.set("parents(roots(%ln))", commitstobackup)
]
# Build a discovery object encapsulating the commits to backup.
# Skip the actual discovery process, as we know exactly which
# commits are missing. For the common commits, include all the
# parents of the commits we are sending. In the unlikely event that
# the server is missing public commits, we will try again with
# discovery enabled.
og = discovery.outgoing(repo, commonheads=parentcommits, missingheads=heads)
og._missing = commitstobackup
og._common = parentcommits
else:
og = None
try:
with getconnection() as conn:
return pushbackupbundle(ui, repo, conn.peer, og, None)
except Exception as e:
ui.warn(_("push failed: %s\n") % e)
ui.warn(_("retrying push with discovery\n"))
with getconnection() as conn:
return pushbackupbundlewithdiscovery(ui, repo, conn.peer, heads, None)
def pushbackupbundlestacks(ui, repo, getconnection, heads):
# Push bundles containing the commits. Initially attempt to push one
# bundle for each stack (commits that share a single root). If a stack is
# too large, or if the push fails, and the stack has multiple heads, push
# head-by-head.
roots = repo.set("roots(not public() & ::%ls)", heads)
newheads = set()
failedheads = set()
for root in roots:
ui.status(_("backing up stack rooted at %s\n") % root)
stack = [ctx.hex() for ctx in repo.set("(%n::%ls)", root.node(), heads)]
if len(stack) == 0:
continue
stackheads = [ctx.hex() for ctx in repo.set("heads(%ls)", stack)]
if len(stack) > 1000:
# This stack is too large, something must have gone wrong
ui.warn(
_("not backing up excessively large stack rooted at %s (%d commits)")
% (root, len(stack))
)
failedheads |= set(stackheads)
continue
if len(stack) < 20 and len(stackheads) > 1:
# Attempt to push the whole stack. This makes it easier on the
# server when accessing one of the head commits, as the ancestors
# will always be in the same bundle.
try:
if pushbackupbundledraftheads(
ui, repo, getconnection, [nodemod.bin(h) for h in stackheads]
):
newheads |= set(stackheads)
continue
else:
ui.warn(_("failed to push stack bundle rooted at %s\n") % root)
except Exception as e:
ui.warn(_("push of stack %s failed: %s\n") % (root, e))
ui.warn(_("retrying each head individually\n"))
# The stack only has one head, is large, or pushing the whole stack
# failed, push each head in turn.
for head in stackheads:
try:
if pushbackupbundledraftheads(
ui, repo, getconnection, [nodemod.bin(head)]
):
newheads.add(head)
continue
else:
ui.warn(
_("failed to push stack bundle with head %s\n")
% nodemod.short(nodemod.bin(head))
)
except Exception as e:
ui.warn(
_("push of head %s failed: %s\n")
% (nodemod.short(nodemod.bin(head)), e)
)
failedheads.add(head)
return newheads, failedheads
|
gpl-2.0
| 4,742,549,794,455,033,000
| 34.069519
| 94
| 0.634568
| false
| 4.114178
| false
| false
| false
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/volumes/paths/ceph_san_path8.py
|
1
|
5935
|
import zstackwoodpecker.test_state as ts_header
import zstackwoodpecker.operations.resource_operations as res_ops
import random
import os
TestAction = ts_header.TestAction
class VM(object):
def __init__(self, name=None):
self.name = name
self.cloned_name_list = [self.name + '_cloned_vm%s' % i for i in range(5)]
@property
def start(self):
return [TestAction.start_vm, self.name]
@property
def stop(self):
return [TestAction.stop_vm, self.name]
@property
def migrate(self):
return [TestAction.migrate_vm, self.name]
@property
def reinit(self):
return [TestAction.reinit_vm, self.name]
@property
def change_image(self):
return [TestAction.change_vm_image, self.name, os.getenv('imageName_s')]
def clone(self, clone_num=1, full=False):
if full:
return [TestAction.clone_vm, self.name, ','.join(self.cloned_name_list[:clone_num]), "=full"]
else:
return [TestAction.clone_vm, self.name, ','.join(self.cloned_name_list[:clone_num])]
def path():
cond = res_ops.gen_query_conditions('state', '=', "Enabled")
cond = res_ops.gen_query_conditions('status', '=', "Connected", cond)
ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)
cond_imagestore = res_ops.gen_query_conditions('type', '=', "ImageStoreBackupStorage", cond)
cond_ceph = res_ops.gen_query_conditions('type', '=', "Ceph", cond)
imagestore = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_imagestore)
ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_ceph)
san_ps = [ps.uuid for ps in ps_inv if ps.type == 'SharedBlock']
ceph_ps = [ps.uuid for ps in ps_inv if ps.type == 'Ceph']
san_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'SharedBlock']
ceph_vms = [VM('utility_vm_for_robot_test' + '-' + ps.name) for ps in ps_inv if ps.type == 'Ceph']
vm2 = VM('vm2')
if san_ps and ceph_ps:
return dict(initial_formation="template3",
path_list=[[TestAction.create_volume, "ceph_volume1", "=ps_uuid::%s" % ceph_ps[0]],
[TestAction.attach_volume, san_vms[-1].name, "ceph_volume1"],
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp1'],
[TestAction.resize_volume, san_vms[-1].name, 5*1024*1024],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp2'],
san_vms[-1].stop,
[TestAction.reinit_vm, san_vms[-1].name],
san_vms[-1].start,
san_vms[-1].migrate,
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot2"],
san_vms[-1].clone(4),
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "ceph_volume1"],
[TestAction.detach_volume, "ceph_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "ceph_volume1"],
[TestAction.create_volume_snapshot, san_vms[-1].name + '-root', san_vms[-1].name + '-sp3'],
[TestAction.create_volume_snapshot, "ceph_volume1", "ceph_volume1_snapshot1"],
[TestAction.create_volume, "san_shared_volume1", "=ps_uuid::%s,scsi,shareable" % random.choice(san_ps)],
ceph_vms[0].migrate,
ceph_vms[0].clone(4),
[TestAction.attach_volume, san_vms[-1].cloned_name_list[0], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[1], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[2], "san_shared_volume1"],
[TestAction.attach_volume, san_vms[-1].cloned_name_list[3], "san_shared_volume1"],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[0] + '-root', san_vms[-1].cloned_name_list[0] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[1] + '-root', san_vms[-1].cloned_name_list[1] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[2] + '-root', san_vms[-1].cloned_name_list[2] + '-sp1'],
[TestAction.create_volume_snapshot, san_vms[-1].cloned_name_list[3] + '-root', san_vms[-1].cloned_name_list[3] + '-sp1'],
[TestAction.delete_volume, "ceph_volume1"],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[0]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[1]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[2]],
[TestAction.delete_vm, san_vms[-1].cloned_name_list[3]],
])
else:
return dict(initial_formation="template3", path_list=[])
|
apache-2.0
| -4,641,030,024,721,269,000
| 60.185567
| 152
| 0.533446
| false
| 3.616697
| true
| false
| false
|
vivainio/androguard
|
mercury/client/merc/lib/debuggable.py
|
1
|
1148
|
#!/usr/bin/python
#
# License: Refer to the README in the root directory
#
import argparse, shlex
from basecmd import BaseCmd
class Debuggable(BaseCmd):
def __init__(self, session):
BaseCmd.__init__(self, session)
self.prompt = "*mercury#debuggable> "
def do_back(self, _args):
"""
Return to menu
"""
return -1
def do_info(self, args):
"""
List debuggable apps on the device with optional filter
usage: info [--filter <filter>]
"""
# Define command-line arguments using argparse
parser = argparse.ArgumentParser(prog = 'info', add_help = False)
parser.add_argument('--filter', '-f', metavar = '<filter>')
try:
# Split arguments using shlex - this means that parameters with spaces can be used - escape " characters inside with \
splitargs = parser.parse_args(shlex.split(args))
print self.session.executeCommand("debuggable", "info", {'filter':splitargs.filter} if splitargs.filter else None).getPaddedErrorOrData()
# FIXME: Choose specific exceptions to catch
except:
pass
|
gpl-3.0
| -5,308,201,073,243,280,000
| 27.7
| 149
| 0.626307
| false
| 4.205128
| false
| false
| false
|
smclt30p/PCS
|
core/plugins/PluginMenu.py
|
1
|
1947
|
from PyQt5.QtCore import QObject, pyqtSlot
from PyQt5.QtCore import QSignalMapper
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QAction
from core.plugins.PluginLoader import PluginLoader
class PluginMenu(QObject):
currRoot = None
def constructMenu(self, root):
self.currRoot = root
root.clear()
plugins = PluginLoader.getLoadedPlugins()
if len(plugins) == 0:
item = root.addAction("No plugins found")
item.setEnabled(False)
return
for plugin in plugins:
item = root.addMenu(plugin.getPluginName())
actionToggle = item.addAction("UNDEFINED")
if plugin.isActive():
actionToggle.setText("Disable")
else:
actionToggle.setText("Enable")
actionSettings = item.addAction("Settings")
item.addSeparator()
actionAbout = item.addAction("About")
if not plugin.hasAbout or not plugin.isActive():
actionAbout.setEnabled(False)
if not plugin.hasSettings or not plugin.isActive():
actionSettings.setEnabled(False)
actionAbout.triggered.connect(self.handleAbout)
#actionSettings.triggered.connect(self.handleToggle)
actionToggle.triggered.connect(self.handleToggle)
actionAbout.plugin = plugin
actionSettings.plugin = plugin
actionToggle.plugin = plugin
return root
def handleToggle(self):
action = self.sender()
if action.plugin.isActive():
action.plugin.setActive(False)
else:
action.plugin.setActive(True)
PluginLoader.reloadPlugins()
if self.currRoot != None:
self.constructMenu(self.currRoot)
def handleAbout(self):
action = self.sender()
action.plugin.getAboutInterface().show()
|
bsd-2-clause
| 1,721,882,309,426,148,900
| 25.324324
| 64
| 0.616846
| false
| 4.714286
| false
| false
| false
|
lym/allura-git
|
ForgeSVN/forgesvn/svn_main.py
|
1
|
8582
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-*- python -*-
import logging
from pylons import tmpl_context as c
# Non-stdlib imports
from ming.utils import LazyProperty
from ming.orm.ormsession import ThreadLocalORMSession
from tg import expose, redirect, validate, flash
from tg.decorators import with_trailing_slash, without_trailing_slash
from timermiddleware import Timer
from paste.deploy.converters import asint
# Pyforge-specific imports
import allura.tasks.repo_tasks
from allura.controllers import BaseController
from allura.controllers.repository import RepoRootController
from allura.lib.decorators import require_post
from allura.lib.repository import RepositoryApp, RepoAdminController
from allura.app import SitemapEntry, ConfigOption, AdminControllerMixin
from allura.lib import helpers as h
from allura.lib import validators as v
from allura import model as M
# Local imports
from . import model as SM
from . import version
from . import widgets
from .controllers import BranchBrowser
from .model.svn import svn_path_exists
log = logging.getLogger(__name__)
class ForgeSVNApp(RepositoryApp):
'''This is the SVN app for PyForge'''
__version__ = version.__version__
config_options = RepositoryApp.config_options + [
ConfigOption('checkout_url', str, '')
]
permissions_desc = dict(RepositoryApp.permissions_desc, **{
'write': 'Repo commit access.',
'admin': 'Set permissions, checkout url, and viewable files. Import a remote repo.',
})
tool_label = 'SVN'
tool_description = """
Enterprise-class centralized version control for the masses.
"""
ordinal = 4
forkable = False
default_branch_name = 'HEAD'
def __init__(self, project, config):
super(ForgeSVNApp, self).__init__(project, config)
self.root = BranchBrowser()
default_root = RepoRootController()
self.root.refresh = default_root.refresh
self.root.commit_browser = default_root.commit_browser
self.root.commit_browser_data = SVNCommitBrowserController().commit_browser_data
self.root.status = default_root.status
self.admin = SVNRepoAdminController(self)
@LazyProperty
def repo(self):
return SM.Repository.query.get(app_config_id=self.config._id)
def install(self, project):
'''Create repo object for this tool'''
super(ForgeSVNApp, self).install(project)
SM.Repository(
name=self.config.options.mount_point,
tool='svn',
status='initializing',
fs_path=self.config.options.get('fs_path'))
ThreadLocalORMSession.flush_all()
init_from_url = self.config.options.get('init_from_url')
init_from_path = self.config.options.get('init_from_path')
if init_from_url or init_from_path:
allura.tasks.repo_tasks.clone.post(
cloned_from_path=init_from_path,
cloned_from_name=None,
cloned_from_url=init_from_url)
else:
allura.tasks.repo_tasks.init.post()
def admin_menu(self):
links = super(ForgeSVNApp, self).admin_menu()
links.insert(1, SitemapEntry(
'Import Repo',
c.project.url() + 'admin/' + self.config.options.mount_point + '/' + 'importer/'))
return links
class SVNRepoAdminController(RepoAdminController):
def __init__(self, app):
super(SVNRepoAdminController, self).__init__(app)
self.importer = SVNImportController(self.app)
@without_trailing_slash
@expose('jinja:forgesvn:templates/svn/checkout_url.html')
def checkout_url(self, **kw):
return dict(app=self.app, allow_config=True)
@without_trailing_slash
@expose()
@require_post()
@validate({'external_checkout_url': v.NonHttpUrl})
def set_checkout_url(self, **post_data):
checkout_url = (post_data.get('checkout_url') or '').strip()
external_checkout_url = (post_data.get('external_checkout_url') or '').strip()
if not checkout_url or svn_path_exists("file://%s%s/%s" %
(self.app.repo.fs_path,
self.app.repo.name,
checkout_url)):
if (self.app.config.options.get('checkout_url') or '') != checkout_url:
self.app.config.options.checkout_url = checkout_url
flash("Checkout URL successfully changed")
else:
flash("%s is not a valid path for this repository" %
checkout_url, "error")
if 'external_checkout_url' not in c.form_errors:
if (self.app.config.options.get('external_checkout_url') or '') != external_checkout_url:
self.app.config.options.external_checkout_url = external_checkout_url
flash("External checkout URL successfully changed")
else:
flash("Invalid external checkout URL: %s" % c.form_errors['external_checkout_url'], "error")
class SVNImportController(BaseController, AdminControllerMixin):
import_form = widgets.ImportForm()
def __init__(self, app):
self.app = app
@with_trailing_slash
@expose('jinja:forgesvn:templates/svn/import.html')
def index(self, **kw):
c.is_empty = self.app.repo.is_empty()
c.form = self.import_form
return dict()
@without_trailing_slash
@expose()
@require_post()
@validate(import_form, error_handler=index)
def do_import(self, checkout_url=None, **kwargs):
if self.app.repo.is_empty():
with h.push_context(
self.app.config.project_id,
app_config_id=self.app.config._id):
allura.tasks.repo_tasks.reclone.post(
cloned_from_path=None,
cloned_from_name=None,
cloned_from_url=checkout_url)
M.Notification.post_user(
c.user, self.app.repo, 'importing',
text='''Repository import scheduled,
an email notification will be sent when complete.''')
else:
M.Notification.post_user(
c.user, self.app.repo, 'error',
text="Can't import into non empty repository.")
redirect(c.project.url() + 'admin/tools')
class SVNCommitBrowserController(BaseController):
@without_trailing_slash
@expose('json:')
def commit_browser_data(self, start=None, limit=None, **kw):
data = {
'commits': [],
'next_column': 1,
'max_row': 0,
'built_tree': {},
'next_commit': None,
}
limit, _ = h.paging_sanitizer(limit or 100, 0, 0)
for i, commit in enumerate(c.app.repo.log(revs=start, id_only=False, page_size=limit+1)):
if i >= limit:
data['next_commit'] = str(commit['id'])
break
data['commits'].append(str(commit['id']))
data['built_tree'][commit['id']] = {
'column': 0,
'parents': map(str, commit['parents']),
'short_id': '[r%s]' % commit['id'],
'message': commit['message'],
'oid': str(commit['id']),
'row': i,
'url': c.app.repo.url_for_commit(commit['id']),
}
data['max_row'] = len(data['commits']) - 1
return data
def svn_timers():
return Timer(
'svn_lib.{method_name}', SM.svn.SVNLibWrapper, 'checkout', 'add',
'checkin', 'info2', 'log', 'cat', 'list')
def forgesvn_timers():
return Timer('svn_tool.{method_name}', SM.svn.SVNImplementation, '*')
|
apache-2.0
| 8,798,467,815,965,985,000
| 37.657658
| 104
| 0.612561
| false
| 3.965804
| true
| false
| false
|
XueqingLin/tensorflow
|
tensorflow/tensorboard/backend/server_test.py
|
1
|
18758
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import threading
import zlib
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
from tensorflow.tensorboard.plugins import REGISTERED_PLUGINS
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {self.get_temp_dir(): None})
# 0 to pick an unused port.
self._server = server.BuildServer(
self._multiplexer, 'localhost', 0, '/foo/logdir/argument')
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': '/foo/logdir/argument'})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(isinstance(run_json['run1']['firstEventTimestamp'],
numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(run_json, {'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is extracted from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']}})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Cache-Control'),
'private, max-age=3600', msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs',
'/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = tf.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertEqual(info_json['tensors'], {
'var1': {
'shape': [1, 2],
'name': 'var1',
'metadataFile': None,
'bookmarksFile': None,
},
'var2': {
'shape': [10, 10],
'name': 'var2',
'metadataFile': None,
'bookmarksFile': None,
},
'var3': {
'shape': [100, 100],
'name': 'var3',
'metadataFile': None,
'bookmarksFile': None,
}
})
def testProjectorTensor(self):
"""Test the format of /tensor endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
tensor_tsv = (self._get('/data/plugin/projector/tensor?run=run1&name=var1')
.read())
self.assertEqual(tensor_tsv, b'6.0\t6.0')
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = tf.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
"""
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = tf.train.SummaryWriter(run1_path)
histogram_value = tf.HistogramProto(min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = tf.Summary.Image(height=1,
width=1,
colorspace=1,
encoded_image_string=encoded_image)
audio_value = tf.Summary.Audio(sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(tf.Event(wall_time=0,
step=0,
summary=tf.Summary(value=[
tf.Summary.Value(tag='histogram',
histo=histogram_value),
tf.Summary.Value(tag='image',
image=image_value),
tf.Summary.Value(tag='audio',
audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(tf.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=tf.Summary(value=[tf.Summary.Value(tag='simple_values',
simple_value=i)])))
writer.flush()
writer.close()
if 'projector' in REGISTERED_PLUGINS:
self._GenerateProjectorTestData(run1_path)
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
config_pbtxt = text_format.MessageToString(config)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with tf.Graph().as_default():
sess = tf.Session()
checkpoint_path = os.path.join(run_path, 'model')
tf.get_variable(
'var1', [1, 2], initializer=tf.constant_initializer(6.0))
tf.get_variable('var2', [10, 10])
tf.get_variable('var3', [100, 100])
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.save(sess, checkpoint_path)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -5,388,877,923,216,814,000
| 37.917012
| 97
| 0.625973
| false
| 3.784144
| true
| false
| false
|
radinformatics/whatisit
|
whatisit/apps/api/serializers.py
|
1
|
1125
|
from django.contrib.auth.models import User
from whatisit.apps.wordfish.models import (
Annotation,
AllowedAnnotation,
Report,
ReportCollection,
ReportSet
)
from rest_framework import serializers
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Report
fields = ('report_id', 'report_text')
#class SingleReportSerializer(serializers.ModelSerializer):
# class Meta:
# model = Report
# fields = ('id','report_text')
class ReportCollectionSerializer(serializers.ModelSerializer):
class Meta:
model = ReportCollection
fields = ('name',)
class ReportSetSerializer(serializers.ModelSerializer):
reports = serializers.PrimaryKeyRelatedField(many=True, queryset=Report.objects.all())
class Meta:
model = ReportSet
fields = ('name','reports')
#class UserSerializer(serializers.ModelSerializer):
# collections = serializers.PrimaryKeyRelatedField(many=True, queryset=ReportCollection.objects.all())#
# class Meta:
# model = User
# fields = ('id', 'username', 'collections')
|
mit
| 860,719,285,074,145,900
| 25.162791
| 106
| 0.695111
| false
| 4.293893
| false
| false
| false
|
akvo/akvo-rsr
|
akvo/rsr/management/commands/a4a_optimy_import.py
|
1
|
11690
|
#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Import projects from Optimy for Aqua for All
Usage:
python manage.py a4a_optimy_import [--project-id <optimy_project_id>]
"""
from itertools import groupby
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
import requests
import textwrap
from akvo.rsr.iso3166 import ISO_3166_COUNTRIES
from akvo.rsr.models import (
BudgetItem,
BudgetItemLabel,
Organisation,
Partnership,
Project,
ProjectCustomField,
ProjectLocation,
)
from akvo.utils import custom_get_or_create_country
BASE_URL = "https://api.optimytool.com/v1.3"
USERNAME = settings.OPTIMY_USERNAME
PASSWORD = settings.OPTIMY_PASSWORD
COUNTRY_NAME_TO_ISO_MAP = {name: code for code, name in ISO_3166_COUNTRIES}
MASTER_PROGRAM_ID = 9062
PROGRAM_IDS = {"VIA Water": 9222, "SCALE": 9224, "Response Facility": 9469}
OPTIMY_FORM_IDS = {
"making-water-count": "68d4a00a-416d-5ce1-9c12-2d6d1dc1a047",
"response-facility": "6e962295-06c9-5de1-a39e-9cd2272b1837",
}
FORM_QUESTION_MAPPING = {
# Making Water Count
"68d4a00a-416d-5ce1-9c12-2d6d1dc1a047": {
"title": "9900586f-3c4b-5e3e-a9e6-a209eb8cb8e3",
# FIXME: subtitle?
"cofinancing-budget": "6c05de7b-4031-5809-a692-a45beadf7cec",
"a4a-budget": "b0268b0c-d7e9-513a-bb27-1de7c0ec593a",
"total-budget": "322932f0-e294-5621-a37b-fd57fec9937a",
"start-date": "b785b97e-64f7-5149-a07b-7216497aa39f",
"end-date": "d3c4132c-1e55-5177-943e-3afa25b092ab",
"project-number": "683c31bc-d1d3-57f2-bf57-2e4c54894181",
"country": "913bec17-7f11-540a-8cb5-c5803e32a98b",
"summary": "02f1316c-4d5c-5989-8183-e392a634d23e",
"program": "09c477bb-d887-5862-9b12-ea5ab566b363",
"grantee": "51550c5f-a019-561d-80ca-50ed38a2bfce"
},
# Response Facility
"6e962295-06c9-5de1-a39e-9cd2272b1837": {
"title": "ed814396-7e42-5a72-a1fb-c478947c499b",
# FIXME: subtitle?
"cofinancing-budget": "ad2b9e11-6ac7-57b2-a20d-d13259f72484",
"a4a-budget": "fac61f74-8d27-5128-9afb-a34283c39e75",
"total-budget": "0b99fc04-bf13-55c2-805a-fec273774a26",
"start-date": "e13cf4d6-d4be-56a3-9228-9c12263ead07",
"end-date": "d6b82834-24e7-5a1b-ab7e-369c745c302a",
"project-number": "fa543aa4-6cf7-53f8-a071-f775d8f89711",
"country": "cdc40519-f33c-5b29-b668-84ff60823ad7",
"summary": "4cff3960-6f4c-5a7f-a681-1dd8382d15e3",
"grantee": "60dfcace-9344-5ddf-89ef-2076f96ec07f"
},
}
CONTRACT_STATUSES = {
"68d4a00a-416d-5ce1-9c12-2d6d1dc1a047": "d30a945f-e524-53fe-8b2f-0c65b27be1ea",
"6e962295-06c9-5de1-a39e-9cd2272b1837": "2df6666f-d73b-5b57-9f66-51150dc9d6c9",
}
A4A = Organisation.objects.get(name="Aqua for All")
DEFAULT_PROJECT_INFO = {
"default_aid_type": "B01",
"default_flow_type": "10",
"default_tied_status": "3",
"default_finance_type": "110",
}
def programs_exist():
program = Project.objects.filter(id=MASTER_PROGRAM_ID).first()
if program is not None:
sub_programs = set(program.descendants(depth=1).values_list("pk", flat=True))
program_ids = set(PROGRAM_IDS.values())
return (sub_programs & program_ids) == program_ids
return False
def get_projects(contracts_only=True):
response = requests.get(f"{BASE_URL}/projects", auth=(USERNAME, PASSWORD))
content = response.json()
projects = content["data"]
if contracts_only:
projects = [
project
for project in projects
if project["status_id"] == CONTRACT_STATUSES[project["form_id"]]
]
return projects
def get_submission_versions(project_id):
response = requests.get(
f"{BASE_URL}/projects/{project_id}/versions", auth=(USERNAME, PASSWORD)
)
data = response.json()["data"]
versions = [
list(versions)[-1]["version_id"]
for form_part_id, versions in groupby(data, key=lambda x: x["form_part_id"])
]
return versions
def get_project_answers(project_id):
version_ids = get_submission_versions(project_id)
answers = []
for version_id in version_ids:
print(f"Fetching answers for {project_id} - {version_id}...")
response = requests.get(
f"{BASE_URL}/projects/{project_id}/versions/{version_id}/answers",
auth=(USERNAME, PASSWORD),
)
data = response.json()["data"]
answers.extend(data)
return {ans["question_id"]: ans for ans in answers}
def get_answer(form_id, answers, key, ans_key="value"):
answer = answers.get(FORM_QUESTION_MAPPING[form_id][key], {}).get(ans_key)
if not answer:
print(f"Could not find answer for {key}")
return answer
def create_project(project, answers):
project_id = project["id"]
form_id = project["form_id"]
if form_id == OPTIMY_FORM_IDS["response-facility"]:
lead_project_id = PROGRAM_IDS["Response Facility"]
else:
program_name = get_answer(form_id, answers, "program", ans_key="answer_name")
lead_project_id = PROGRAM_IDS.get(program_name)
if lead_project_id is None:
print(f"Skipping {project_id} since it has no associated program")
return None
optimy_project_id_field = "Optimy Project ID"
custom_field = ProjectCustomField.objects.filter(
name=optimy_project_id_field, value=project_id
).first()
title = get_answer(form_id, answers, "title")[:200]
project_created = False
if custom_field is not None:
project = custom_field.project
else:
project = Project.objects.create(title=title)
project_created = True
ProjectCustomField.objects.get_or_create(
project=project,
name="Optimy Project ID",
defaults=dict(value=project_id, section="1", order="1"),
)
program = Project.objects.get(pk=lead_project_id)
project.add_to_program(program)
# Add Aqua for All as financing partner
Partnership.objects.get_or_create(
project=project,
organisation=A4A,
iati_organisation_role=Partnership.IATI_FUNDING_PARTNER,
)
# Add implementing partner
grantee = get_answer(form_id, answers, "grantee")
if grantee and project_created:
grantee_org = Organisation.objects.filter(Q(name=grantee) | Q(long_name=grantee)).first()
if not grantee_org:
grantee_org = Organisation.objects.create(
name=textwrap.wrap(grantee, 40)[0],
long_name=grantee
)
Partnership.objects.get_or_create(
project=project,
organisation=grantee_org,
iati_organisation_role=Partnership.IATI_IMPLEMENTING_PARTNER,
)
# Add Aqua for All project Number
project_number_question = get_answer(
form_id, answers, "project-number", "question_name"
)
project_number_value = get_answer(form_id, answers, "project-number")
if project_number_value:
ProjectCustomField.objects.get_or_create(
project=project,
name=project_number_question,
defaults=dict(value=project_number_value, section="1", order="1"),
)
start_date = get_answer(form_id, answers, "start-date")
end_date = get_answer(form_id, answers, "end-date")
iati_id = f"{A4A.iati_org_id}-{project.pk}"
# Update project attributes
data = dict(
title=title,
date_start_planned=start_date,
date_end_planned=end_date,
is_public=False,
project_plan_summary=get_answer(form_id, answers, "summary"),
iati_status="2", # Implementation status
iati_activity_id=iati_id,
)
# NOTE: Don't update Title, description and is_public for existing projects
if not project_created:
data.pop('title')
data.pop('project_plan_summary')
data.pop('is_public')
data.update(DEFAULT_PROJECT_INFO)
for key, value in data.items():
if value is not None:
setattr(project, key, value)
project.save(update_fields=data.keys())
# Create budget objects
BudgetItem.objects.filter(project=project).delete()
# Co-financing budget
other = BudgetItemLabel.objects.get(label="Other")
budget = get_answer(form_id, answers, "cofinancing-budget")
extra = get_answer(form_id, answers, "cofinancing-budget", "answer_name")
if budget:
if extra:
extra = " ".join(extra.split()[1:-1]).title()
BudgetItem.objects.create(
project=project,
label=other,
amount=budget,
other_extra=extra,
value_date=start_date,
period_start=start_date,
period_end=end_date,
)
# A4A budget
budget = get_answer(form_id, answers, "a4a-budget")
extra = get_answer(form_id, answers, "a4a-budget", "answer_name")
if budget:
if extra:
extra = " ".join(extra.split()[1:-1]).title()
BudgetItem.objects.create(
project=project,
label=other,
amount=budget,
other_extra=extra,
value_date=start_date,
period_start=start_date,
period_end=end_date,
)
# Create location objects
if project_created:
project.primary_location = None
if form_id == OPTIMY_FORM_IDS["response-facility"]:
iso_code = get_answer(form_id, answers, "country").lower()
else:
name = get_answer(form_id, answers, "country", ans_key="answer_name")
iso_code = COUNTRY_NAME_TO_ISO_MAP.get(name)
if iso_code:
country = custom_get_or_create_country(iso_code)
ProjectLocation.objects.create(location_target=project, country=country)
else:
print(f"Could not find iso code for {name}")
# Publish the project
project.publish()
return project
def set_program_iati_ids():
for program_id in (MASTER_PROGRAM_ID,) + tuple(PROGRAM_IDS.values()):
program = Project.objects.get(id=program_id)
data = dict(iati_activity_id=f"{A4A.iati_org_id}-{program_id}")
data.update(DEFAULT_PROJECT_INFO)
for key, value in data.items():
setattr(program, key, value)
program.save(update_fields=data.keys())
class Command(BaseCommand):
help = "Import projects from Optimy for Aqua for All"
def add_arguments(self, parser):
parser.add_argument(
"--project-id", type=str, help="ID of the project to import"
)
def handle(self, *args, **options):
if not programs_exist():
raise CommandError("Not all programs are present in the DB")
project_id = options["project_id"]
if not project_id:
print("Fetching projects from Optimy")
projects = get_projects()
else:
projects = [dict(id=project_id)]
# Set program IDs
set_program_iati_ids()
print(f"Importing {len(projects)} Projects ...")
for project in projects:
project_id = project["id"]
answers = get_project_answers(project_id)
project = create_project(project, answers)
if project is not None:
print(f"Imported {project_id} as {project.id} - {project.title}")
|
agpl-3.0
| 3,057,250,417,693,562,000
| 34.317221
| 97
| 0.635073
| false
| 3.184418
| false
| false
| false
|
vsemionov/boomerang
|
api/token.py
|
1
|
1630
|
from collections import OrderedDict
from rest_framework import viewsets, mixins, permissions, response
from rest_framework.authtoken.models import Token
from rest_framework.settings import api_settings as rest_settings
from rest_framework_jwt.settings import api_settings as jwt_settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
class AuthViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
view_name = None
permission_classes = (permissions.IsAuthenticated,)
def get_view_name(self):
return self.view_name
def create_token(self, user):
raise NotImplementedError()
def list(self, request, *args, **kwargs):
user = request.user
token = self.create_token(user)
token = OrderedDict((('username', user.username),
('token', token)))
return response.Response(token)
class TokenViewSet(AuthViewSet):
view_name = 'Token'
def create_token(self, user):
token, created = Token.objects.get_or_create(user=user)
return token.key
class JWTViewSet(AuthViewSet):
view_name = 'JWT'
authentication_classes = tuple(cls for cls in rest_settings.DEFAULT_AUTHENTICATION_CLASSES
if cls is not JSONWebTokenAuthentication)
jwt_payload_handler = staticmethod(jwt_settings.JWT_PAYLOAD_HANDLER)
jwt_encode_handler = staticmethod(jwt_settings.JWT_ENCODE_HANDLER)
def create_token(self, user):
payload = self.jwt_payload_handler(user)
token = self.jwt_encode_handler(payload)
return token
|
mit
| -8,011,112,225,322,380,000
| 30.960784
| 94
| 0.690184
| false
| 4.233766
| false
| false
| false
|
shootstar/novatest
|
nova/cells/state.py
|
1
|
14713
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import copy
import datetime
import functools
from oslo.config import cfg
from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
cell_state_manager_opts = [
cfg.IntOpt('db_check_interval',
default=60,
help='Seconds between getting fresh cell info from db.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url_info = rpc_driver.parse_transport_url(
self.db_info['transport_url'])
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = url_info[field]
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_from_db(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self._time_to_sync():
self._cell_db_sync()
return f(self, *args, **kwargs)
return wrapper
class CellStateManager(base.Base):
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self._cell_db_sync()
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_db(self, ctxt):
"""Make our cell info map match the db."""
# Add/update existing cells ...
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = dict([(cell['name'], cell) for cell in db_cells])
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, context):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can
build for every instance_type that we have. This number is
computed by looking at room available on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
reserve_level = CONF.cells.reserve_percent / 100.0
compute_hosts = {}
def _get_compute_hosts():
compute_nodes = self.db.compute_node_get_all(context)
for compute in compute_nodes:
service = compute['service']
if not service or service['disabled']:
continue
host = service['host']
compute_hosts[host] = {
'free_ram_mb': compute['free_ram_mb'],
'free_disk_mb': compute['free_disk_gb'] * 1024,
'total_ram_mb': compute['memory_mb'],
'total_disk_mb': compute['local_gb'] * 1024}
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
def _update_from_values(values, instance_type):
memory_mb = instance_type['memory_mb']
disk_mb = (instance_type['root_gb'] +
instance_type['ephemeral_gb']) * 1024
ram_mb_free_units.setdefault(str(memory_mb), 0)
disk_mb_free_units.setdefault(str(disk_mb), 0)
ram_free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb)
disk_free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb)
ram_mb_free_units[str(memory_mb)] += ram_free_units
disk_mb_free_units[str(disk_mb)] += disk_free_units
instance_types = self.db.instance_type_get_all(context)
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for instance_type in instance_types:
_update_from_values(compute_values, instance_type)
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@utils.synchronized('cell-db-sync')
def _cell_db_sync(self):
"""Update status for all cells if it's time. Most calls to
this are from the check_for_update() decorator that checks
the time, but it checks outside of a lock. The duplicate
check here is to prevent multiple threads from pulling the
information simultaneously.
"""
if self._time_to_sync():
LOG.debug(_("Updating cell cache from db."))
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
self._refresh_cells_from_db(ctxt)
self._update_our_capacity(ctxt)
@sync_from_db
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in self.child_cells.itervalues()]
cell_list.extend([cell.get_cell_info()
for cell in self.parent_cells.itervalues()])
return cell_list
@sync_from_db
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_from_db
def get_child_cells(self):
"""Return list of child cell_infos."""
return self.child_cells.values()
@sync_from_db
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return self.parent_cells.values()
@sync_from_db
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_from_db
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_from_db
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capabilities"),
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_from_db
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capacities"),
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_from_db
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_from_db
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_from_db
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
|
apache-2.0
| -7,506,351,170,441,942,000
| 37.215584
| 78
| 0.583498
| false
| 3.878988
| false
| false
| false
|
bonyuta0204/NetDissec
|
src/netprobe_pytorch.py
|
1
|
10545
|
#!/usr/bin/env python
# Bolei added
import pdb
import torch
import torchvision
from torch.autograd import Variable as V
from torchvision import transforms as trn
import os
import numpy
import glob
import shutil
import codecs
import time
import sys
os.environ['GLOG_minloglevel'] = '2'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
from scipy.misc import imresize, imread
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import zoom
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from collections import namedtuple
import upsample
import rotate
import expdir
caffe.set_mode_gpu()
caffe.set_device(0)
def create_probe(
directory, dataset, definition, weights, mean, blobs,
colordepth=3,
rotation_seed=None, rotation_power=1,
limit=None, split=None,
batch_size=16, ahead=4,
cl_args=None, verbose=True):
# If we're already done, skip it!
ed = expdir.ExperimentDirectory(directory)
if all(ed.has_mmap(blob=b) for b in blobs):
return
'''
directory: where to place the probe_conv5.mmap files.
data: the AbstractSegmentation data source to draw upon
definition: the filename for the caffe prototxt
weights: the filename for the caffe model weights
mean: to use to normalize rgb values for the network
blobs: ['conv3', 'conv4', 'conv5'] to probe
'''
if verbose:
print 'Opening dataset', dataset
data = loadseg.SegmentationData(args.dataset)
# the network to dissect
if args.weights == None:
# load the imagenet pretrained model
net = torchvision.models.__dict__[args.definition](pretrained=True)
else:
# load your own model
net = torchvision.models.__dict__[
args.definition](num_classes=args.num_classes)
checkpoint = torch.load(args.weights)
# the data parallel layer will add 'module' before each layer name
state_dict = {str.replace(k, 'module.', ''): v for k,
v in checkpoint['state_dict'].iteritems()}
net.load_state_dict(state_dict)
net.eval()
# hook up to get the information for each selected layer
layers = net._modules.keys()
size_blobs_output = []
def hook_size(module, input, output):
size_blobs_output.append(output.data.size())
input_sample = V(torch.randn(1, 3, args.input_size, args.input_size))
for blob in blobs:
net._modules.get(blob).register_forward_hook(hook_size)
output_sample = net(input_sample)
input_dim = [args.input_size, args.input_size]
data_size = data.size(split) # the image size
if limit is not None:
data_size = min(data_size, limit)
# Make sure we have a directory to work in
ed.ensure_dir()
# Step 0: write a README file with generated information.
ed.save_info(dict(
dataset=dataset,
split=split,
definition=definition,
weights=weights,
mean=mean,
blobs=blobs,
input_dim=input_dim,
rotation_seed=rotation_seed,
rotation_power=rotation_power))
# Clear old probe data
ed.remove_all('*.mmap*')
# Create new (empty) mmaps
if verbose:
print 'Creating new mmaps.'
out = {}
rot = None
if rotation_seed is not None:
rot = {}
for idx, blob in enumerate(blobs):
#shape = (data_size, ) + net.blobs[blob].data.shape[1:]
shape = (data_size, int(size_blobs_output[idx][1]), int(
size_blobs_output[idx][2]), int(size_blobs_output[idx][3]))
out[blob] = ed.open_mmap(blob=blob, mode='w+', shape=shape)
# Rather than use the exact RF, here we use some heuristics to compute the approximate RF
size_RF = (args.input_size /
size_blobs_output[idx][2], args.input_size / size_blobs_output[idx][3])
fieldmap = ((0, 0), size_RF, size_RF)
ed.save_info(blob=blob, data=dict(
name=blob, shape=shape, fieldmap=fieldmap))
# The main loop
if verbose:
print 'Beginning work.'
pf = loadseg.SegmentationPrefetcher(data, categories=['image'],
split=split, once=True, batch_size=batch_size, ahead=ahead)
index = 0
start_time = time.time()
last_batch_time = start_time
batch_size = 0
net.cuda()
# hook the feature extractor
features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())
for blob in blobs:
net._modules.get(blob).register_forward_hook(hook_feature)
for batch in pf.tensor_batches(bgr_mean=mean):
del features_blobs[:] # clear up the feature basket
batch_time = time.time()
rate = index / (batch_time - start_time + 1e-15)
batch_rate = batch_size / (batch_time - last_batch_time + 1e-15)
last_batch_time = batch_time
if verbose:
print 'netprobe index', index, 'items per sec', batch_rate, rate
sys.stdout.flush()
inp = batch[0]
batch_size = len(inp)
if limit is not None and index + batch_size > limit:
# Truncate last if limited
batch_size = limit - index
inp = inp[:batch_size]
if colordepth == 1:
inp = numpy.mean(inp, axis=1, keepdims=True)
# previous feedforward case
inp = inp[:, ::-1, :, :]
inp_tensor = V(torch.from_numpy(inp.copy()))
# approximately normalize the input to make the images scaled at around 1.
inp_tensor.div_(255.0 * 0.224)
inp_tensor = inp_tensor.cuda()
result = net.forward(inp_tensor)
# output the hooked feature
for i, key in enumerate(blobs):
out[key][index:index +
batch_size] = numpy.copy(features_blobs[i][:batch_size])
# print 'Recording data in mmap done'
index += batch_size
if index >= data_size:
break
assert index == data_size, (
"Data source should return evey item once %d %d." %
(index, data_size))
if verbose:
print 'Renaming mmaps.'
for blob in blobs:
ed.finish_mmap(out[blob])
# Final step: write the README file
write_readme_file([
('cl_args', cl_args),
('data', data),
('definition', definition),
('weight', weights),
('mean', mean),
('blobs', blobs)], ed, verbose=verbose)
def ensure_dir(targetdir):
if not os.path.isdir(targetdir):
try:
os.makedirs(targetdir)
except:
print 'Could not create', targetdir
pass
def write_readme_file(args, ed, verbose):
'''
Writes a README.txt that describes the settings used to geenrate the ds.
'''
with codecs.open(ed.filename('README.txt'), 'w', 'utf-8') as f:
def report(txt):
f.write('%s\n' % txt)
if verbose:
print txt
title = '%s network probe' % ed.basename()
report('%s\n%s' % (title, '=' * len(title)))
for key, val in args:
if key == 'cl_args':
if val is not None:
report('Command-line args:')
for ck, cv in vars(val).items():
report(' %s: %r' % (ck, cv))
report('%s: %r' % (key, val))
report('\ngenerated at: %s' % time.strftime("%Y-%m-%d %H:%M"))
try:
label = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
report('git label: %s' % label)
except:
pass
if __name__ == '__main__':
import sys
import traceback
import argparse
try:
import loadseg
parser = argparse.ArgumentParser(
description='Probe a caffe network and save results in a directory.')
parser.add_argument(
'--directory',
default='.',
help='output directory for the net probe')
parser.add_argument(
'--blobs',
nargs='*',
help='network blob names to collect')
parser.add_argument(
'--definition',
help='the deploy prototext defining the net')
parser.add_argument(
'--weights',
default=None,
help='the pretrained weight')
parser.add_argument(
'--mean',
nargs='*', type=float,
help='mean values to subtract from input')
parser.add_argument(
'--dataset',
help='the directory containing the dataset to use')
parser.add_argument(
'--split',
help='the split of the dataset to use')
parser.add_argument(
'--limit',
type=int, default=None,
help='limit dataset to this size')
parser.add_argument(
'--batch_size',
type=int, default=64,
help='the batch size to use')
parser.add_argument(
'--input_size',
type=int, default=224,
help='the image size input to the network(usually it is 224x224, but alexnet uses 227x227)')
parser.add_argument(
'--ahead',
type=int, default=4,
help='number of batches to prefetch')
parser.add_argument(
'--rotation_seed',
type=int, default=None,
help='the seed for the random rotation to apply')
parser.add_argument(
'--rotation_power',
type=float, default=1.0,
help='the power of hte random rotation')
parser.add_argument(
'--colordepth',
type=int, default=3,
help='set to 1 for grayscale')
parser.add_argument(
'--num_classes',
type=int, default=365,
help='the number of classes for the network output(default is 365)')
args = parser.parse_args()
create_probe(
args.directory, args.dataset, args.definition, args.weights,
numpy.array(args.mean, dtype=numpy.float32), args.blobs,
batch_size=args.batch_size, ahead=args.ahead, limit=args.limit,
colordepth=args.colordepth,
rotation_seed=args.rotation_seed, rotation_power=args.rotation_power,
split=args.split, cl_args=args, verbose=True)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
|
mit
| -4,024,479,165,878,248,000
| 32.906752
| 104
| 0.583215
| false
| 3.922991
| false
| false
| false
|
jonathanslenders/pyvim
|
pyvim/completion.py
|
1
|
4439
|
from __future__ import unicode_literals
from prompt_toolkit.completion import Completer, Completion
import re
import weakref
__all__ = (
'DocumentCompleter',
)
class DocumentWordsCompleter(Completer):
"""
Completer that completes on words that appear already in the open document.
"""
def get_completions(self, document, complete_event):
word_before_cursor = document.get_word_before_cursor()
# Create a set of words that could be a possible completion.
words = set()
for w in re.split(r'\W', document.text):
if len(w) > 1:
if w.startswith(word_before_cursor) and w != word_before_cursor:
words.add(w)
# Yield Completion instances.
for w in sorted(words):
yield Completion(w, start_position=-len(word_before_cursor))
class DocumentCompleter(Completer):
"""
This is the general completer for EditorBuffer completions.
Depending on the file type and settings, it selects another completer to
call.
"""
def __init__(self, editor, editor_buffer):
# (Weakrefs, they are already pointing to us.)
self._editor_ref = weakref.ref(editor)
self._editor_buffer_ref = weakref.ref(editor_buffer)
def get_completions(self, document, complete_event):
editor = self._editor_ref()
location = self._editor_buffer_ref().location or '.txt'
# Select completer.
if location.endswith('.py') and editor.enable_jedi:
completer = _PythonCompleter(location)
else:
completer = DocumentWordsCompleter()
# Call completer.
return completer.get_completions(document, complete_event)
class _PythonCompleter(Completer):
"""
Wrapper around the Jedi completion engine.
"""
def __init__(self, location):
self.location = location
def get_completions(self, document, complete_event):
script = self._get_jedi_script_from_document(document)
if script:
try:
completions = script.completions()
except TypeError:
# Issue #9: bad syntax causes completions() to fail in jedi.
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
pass
except UnicodeDecodeError:
# Issue #43: UnicodeDecodeError on OpenBSD
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/43
pass
except AttributeError:
# Jedi issue #513: https://github.com/davidhalter/jedi/issues/513
pass
except ValueError:
# Jedi issue: "ValueError: invalid \x escape"
pass
except KeyError:
# Jedi issue: "KeyError: u'a_lambda'."
# https://github.com/jonathanslenders/ptpython/issues/89
pass
except IOError:
# Jedi issue: "IOError: No such file or directory."
# https://github.com/jonathanslenders/ptpython/issues/71
pass
else:
for c in completions:
yield Completion(c.name_with_symbols, len(c.complete) - len(c.name_with_symbols),
display=c.name_with_symbols)
def _get_jedi_script_from_document(self, document):
import jedi # We keep this import in-line, to improve start-up time.
# Importing Jedi is 'slow'.
try:
return jedi.Script(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path=self.location)
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
except AttributeError:
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
# See also: https://github.com/davidhalter/jedi/issues/508
return None
except IndexError:
# Workaround Jedi issue #514: for https://github.com/davidhalter/jedi/issues/514
return None
except KeyError:
# Workaroud for a crash when the input is "u'", the start of a unicode string.
return None
|
bsd-3-clause
| -7,874,857,262,923,508,000
| 35.991667
| 101
| 0.590674
| false
| 4.347698
| false
| false
| false
|
anderfosca/contextbroker
|
code/virtualenv/broker/modules/update.py
|
1
|
6420
|
__author__ = 'anderson'
import xml.etree.ElementTree as ET
import sys
import re
import generic_response
import pymongo
from pymongo import MongoClient
from dateutil.parser import parse
import logging
def send_to_consumer(url, xml_string):
print "sending to: " + url + '\n' + xml_string
#r = requests.post(url, xml_string)
#print r.json(), r.status_code
# context_update
# dados esperados: xml com informacoes do update do Provider
# descricao: Registra os dados fornecidos pelo Provider na tabela de registros (registryTable)
# retorna: mensagem de sucesso ou erro
# TODO verificar consistencia dos dados
# TODO verificar erros possiveis
def context_update(xml_string_original):
"""
:rtype : str
"""
logger = logging.getLogger('broker')
logger.info('update - Initiate')
xml_string = re.sub(' xmlns="[^"]+"', '', xml_string_original, count=1)
xml_string = re.sub(' xmlns:xsi="[^"]+"', '', xml_string, count=1)
xml_string = re.sub(' xsi:schemaLocation="[^"]+"', '', xml_string, count=1)
root = ET.fromstring(xml_string)
for ctxEl in root.find('ctxEls').findall('ctxEl'):
nameProv = ctxEl.find('contextProvider').get('id')
version = ctxEl.find('contextProvider').get('v')
entityType = ctxEl.find('entity').get('type')
entityId = ctxEl.find('entity').get('id')
scope = ctxEl.find('scope').text
timestamp = ctxEl.find('timestamp').text
expires = ctxEl.find('expires').text
if parse(timestamp) > parse(expires):
logger.warn('update - Timestamp after Expires')
return generic_response.generate_response('ERROR','400','Bad Parameters: Timestamp after Expires',
'update',nameProv,version,entityId,entityType,scope)
parList=[]
for par in list(ctxEl.find('dataPart')):
parList.append(ET.tostring(par))
dataPart = "".join(parList)
try:
###################################MONGODB
client = MongoClient()
db = client.broker
provider_el = db.providers.find_one({'name': nameProv})
scope_el = db.scopes.find_one({'name': scope, 'provider_id': provider_el['_id']})
if provider_el is None or scope_el is None: # se provider ou scope inexistente, ja descarta
return generic_response.generate_response('ERROR','400','Bad Paramenters',
'update',nameProv,version,entityId,entityType,scope)
##################################MONGODB
#########################MONGODB
logger.info('update - Inserting entity: %s', entityType+'|'+entityId)
entity_element = {'name': entityId, 'type': entityType}
db.entities.update_one(entity_element, {'$setOnInsert': entity_element}, upsert=True)
entity_el = db.entities.find_one(entity_element)
#########################MONGODB
#################################MONGODB
logger.info('update - Inserting Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
on_insert = {'provider': provider_el, 'scope': scope_el, 'entity': entity_el}
on_update = {'timestamp': timestamp, 'expires': expires, 'data_part': dataPart}
if db.registries.update_one(on_insert, {'$setOnInsert': on_insert, '$set': on_update}, upsert=True).upserted_id:
logger.info('update - Inserted Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
else:
logger.info('update - Updated Registry for Provider: %s; Scope: %s; Entity: %s',
provider_el['name'], scope_el['name'], entity_el['type']+'|'+entity_el['name'])
################################MONGODB
# hora de conferir as subscriptions
logger.info('update - Checking Subscriptions for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
results = check_subscriptions(entityId, entityType, scope)
if results.count() > 0:
logger.info('update - Found Subscriptions for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
for result in results:
logger.info('update - Sending to Subscripted: %s', result['callback_url'])
send_to_consumer(result['callback_url'], xml_string_original)
return generic_response.generate_response('OK','200','Update and Subscription Success','update',
nameProv,version,entityId,entityType,scope)
else:
logger.info('update - No Subscriptions found for Scope: %s; Entity: %s', scope, entityType+'|'+entityId)
return generic_response.generate_response('OK','200','Update Success','update',
nameProv,version,entityId,entityType,scope)
except Exception as e:
logger.error('update - Internal Error: %s', sys.exc_info()[0])
error_message = "Internal Error"
return generic_response.generate_response('ERROR','500',error_message,'update',
nameProv,version,entityId,entityType,scope)
# check_subscriptions
# dados esperados: entity, scope
# descricao: Consumer envia entidade e escopos sobre os quais deseja receber atualizacoes, na sua Url, e um tempo de
# vida para a subscription
# # retorna: mensagem de sucesso ou erro
def check_subscriptions(entity_name, entity_type, scope):
"""
:rtype : str
:returns :
"""
#################################MONGODB
client = MongoClient()
db = client.broker
entity_el_id = db.entities.find_one({'name': entity_name, 'type': entity_type}, {'_id': 1})["_id"]
scope_el_id = db.scopes.find_one({'name': scope}, {'_id': 1})["_id"]
results = db.subscriptions.find({'entity_id': entity_el_id,
'scopes': {'$in': [scope_el_id]}}, {'callback_url': 1})
################################MONGODB
for r in results:
print r['callback_url']
return results
|
gpl-2.0
| 1,476,318,394,329,219,600
| 50.36
| 124
| 0.568536
| false
| 3.93865
| false
| false
| false
|
arne-cl/discoursegraphs
|
src/discoursegraphs/readwrite/exmaralda.py
|
1
|
19867
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann
"""
The ``exmaralda`` module converts a ``DiscourseDocumentGraph`` (possibly
containing multiple annotation layers) into an Exmaralda ``*.exb`` file
and vice versa.
"""
import os
import sys
from collections import defaultdict
from lxml import etree
from lxml.builder import ElementMaker
from discoursegraphs import (DiscourseDocumentGraph, EdgeTypes,
get_annotation_layers,
get_pointing_chains, get_span,
select_nodes_by_layer)
from discoursegraphs.util import create_dir
class ExmaraldaFile(object):
"""
This class converts a DiscourseDocumentGraph into an Exmaralda file.
Attributes
----------
toknode2id : dict
maps from a token node ID to its Exmaralda ID (ID in the common
timeline)
"""
def __init__(self, docgraph, remove_redundant_layers=True):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
"""
self.toknode2id = {node_id: i
for i, node_id in enumerate(docgraph.tokens)}
self.E = ElementMaker()
self.tier_count = 0
self.tree = self.__add_document_structure(docgraph,
remove_redundant_layers)
def __str__(self):
"""
returns the generated Exmaralda ``*.exb`` file as a string.
"""
return etree.tostring(self.tree, pretty_print=True,
xml_declaration=True, encoding='UTF-8')
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
"""
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__())
def __create_document_header(self):
"""
Look, mum! XML generation without string concatenation!1!!
This creates an empty, but functional header for an Exmaralda *.exb
file.
"""
E = self.E
root = E('basic-transcription')
head = E('head')
meta = E('meta-information')
project = E('project-name')
tname = E('transcription-name')
ref_file = E('referenced-file', url="")
ud = E('ud-meta-information')
comment = E('comment')
tconvention = E('transcription-convention')
meta.append(project)
meta.append(tname)
meta.append(ref_file)
meta.append(ud)
meta.append(comment)
meta.append(tconvention)
speakers = E('speakertable')
head.append(meta)
head.append(speakers)
root.append(head)
return root
def __add_document_structure(self, docgraph,
remove_redundant_layers=True):
"""return an Exmaralda XML etree representation a docgraph"""
E = self.E
root = self.__create_document_header()
body = E('basic-body')
timeline = E('common-timeline')
# for n tokens we need to create n+1 timeline indices
for i in xrange(len(docgraph.tokens)+1):
idx = str(i)
# example: <tli id="T0" time="0"/>
timeline.append(E('tli', {'id': 'T'+idx, 'time': idx}))
body.append(timeline)
body = self.__add_token_tiers(docgraph, body)
annotation_layers = get_annotation_layers(docgraph)
for layer in annotation_layers:
if not remove_redundant_layers: # add all layers
self.__add_annotation_tier(docgraph, body, layer)
elif is_informative(layer): # only add informative layers
self.__add_annotation_tier(docgraph, body, layer)
self.__add_coreference_chain_tiers(docgraph, body)
root.append(body)
return root
def __add_annotation_tier(self, docgraph, body, annotation_layer):
"""
adds a span-based annotation layer as a <tier> to the Exmaralda <body>.
Parameter
---------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
annotation_layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
"""
layer_cat = annotation_layer.split(':')[-1]
temp_tier = self.E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': layer_cat, 'type': "t",
'display-name': "[{}]".format(annotation_layer)})
self.tier_count += 1
for node_id in select_nodes_by_layer(docgraph, annotation_layer):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
event_label = docgraph.node[node_id].get('label', '')
event = self.E('event',
{'start': "T{}".format(start_id),
'end': "T{}".format(end_id)},
event_label)
temp_tier.append(event)
body.append(temp_tier)
def __add_coreference_chain_tiers(self, docgraph, body,
min_chain_length=3):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
min_chain_length : int
don't add tiers for chains with less than N elements (default: 3)
TODO: this method assumes that each pointing relation chains signifies
a coreference chain.
"""
E = self.E
for i, chain in enumerate(get_pointing_chains(docgraph)):
chain_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "chain", 'type': "t",
'display-name': "[coref-chain-{}]".format(i)})
self.tier_count += 1
chain_length = len(chain)
if chain_length < min_chain_length:
continue # ignore short chains
for j, node_id in enumerate(chain):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
element_str = "chain_{0}: {1}/{2}".format(
i, chain_length-j, chain_length)
chain_tier.append(
E('event', {'start': "T{}".format(start_id),
'end': "T{}".format(end_id)}, element_str))
body.append(chain_tier)
def __add_token_tiers(self, docgraph, body):
"""
adds all tiers that annotate single tokens (e.g. token string, lemma,
POS tag) to the etree representation of the Exmaralda XML file.
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
"""
E = self.E
token_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "tok", 'type': "t",
'display-name': "[tok]"})
self.tier_count += 1
token_attribs = defaultdict(lambda: defaultdict(str))
for token_node_id in docgraph.tokens:
for attrib in docgraph.node[token_node_id]:
is_boring_attrib = attrib in ('layers', 'label')
is_boring_cat = attrib.split(':')[-1] in ('token',
'id', 'word',
'morph', 'lemma')
if not is_boring_attrib and not is_boring_cat:
token_attribs[attrib][token_node_id] = \
docgraph.node[token_node_id][attrib]
for i, (_tok_id, token_str) in enumerate(docgraph.get_tokens()):
# example: <event start="T0" end="T1">Zum</event>
token_tier.append(
E('event', {'start': "T{}".format(i),
'end': "T{}".format(i+1)}, token_str))
body.append(token_tier)
for anno_tier in token_attribs:
category = anno_tier.split(':')[-1]
temp_tier = E(
'tier', {'id': "TIE{}".format(self.tier_count),
'category': category, 'type': "t",
'display-name': "[{}]".format(anno_tier)})
self.tier_count += 1
for token_node_id in token_attribs[anno_tier]:
token_tier_id = self.toknode2id[token_node_id]
token_attrib = token_attribs[anno_tier][token_node_id]
temp_tier.append(
E('event', {'start': "T{}".format(token_tier_id),
'end': "T{}".format(token_tier_id+1)},
token_attrib))
body.append(temp_tier)
return body
def __span2event(self, span_node_ids):
"""
converts a span of tokens (list of token node IDs) into an Exmaralda
event (start and end ID).
Parameters
----------
span_node_ids : list of str
sorted list of node IDs representing a span of tokens
Returns
-------
event : tuple of (str, str)
event start ID and event end ID
"""
return (self.toknode2id[span_node_ids[0]],
self.toknode2id[span_node_ids[-1]]+1)
class ExmaraldaDocumentGraph(DiscourseDocumentGraph):
"""graph representation of an Exmaralda-annotated document"""
def __init__(self, exmaralda_file, name=None, namespace='exmaralda',
token_tier='tok', ignored_tier_categories=None):
"""
generates a document graph from an Exmaralda *.exb file
Parameters
----------
exmaralda_file : str
path to an *.exb file
name : str or None
name of the document graph. If None, will be set to the input
file's basename
namespace : str
namespace of the graph, default: exmaralda
token_tier: str
the category attribute of the <tier> that contains the tokens.
default: tok
ignored_tier_categories : None or list of str
a list of tier categories which will not be added to the document
graph
"""
# super calls __init__() of base class DiscourseDocumentGraph
super(ExmaraldaDocumentGraph, self).__init__()
self.name = name if name else os.path.basename(exmaralda_file)
self.ns = namespace
self.root = self.ns+':root_node'
tree = etree.parse(exmaralda_file)
self.tokens = []
self.__add_tokenization(tree)
if ignored_tier_categories:
for tier in tree.iter('tier'):
if tier.attrib['category'] not in ignored_tier_categories:
self.__add_tier(tier, token_tier_name=token_tier)
else:
for tier in tree.iter('tier'):
self.__add_tier(tier, token_tier_name=token_tier)
def __add_tokenization(self, tree):
"""adds a node for each token ID in the document"""
for token_id in self.get_token_ids(tree):
self.add_node(token_id, layers={self.ns})
self.tokens.append(token_id)
def __add_tier(self, tier, token_tier_name):
"""
adds a tier to the document graph (either as additional attributes
to the token nodes or as a span node with outgoing edges to the token
nodes it represents)
"""
if tier.attrib['category'] == token_tier_name:
self.__add_tokens(tier)
else:
if self.is_token_annotation_tier(tier):
self.__add_token_annotation_tier(tier)
else:
self.__add_span_tier(tier)
def __add_tokens(self, token_tier):
"""
adds all tokens to the document graph. Exmaralda considers them to
be annotations as well, that's why we could only extract the token
node IDs from the timeline (using ``__add_tokenization()``), but not
the tokens themselves.
Parameters
----------
token_tier : etree._Element
an etree element representing the <tier> which contains the tokens
"""
for event in token_tier.iter('event'):
assert len(self.gen_token_range(event.attrib['start'],
event.attrib['end'])) == 1, \
"Events in the token tier must not span more than one token."
token_id = event.attrib['start']
self.node[token_id][self.ns+':token'] = event.text
def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
"""
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True
def __add_token_annotation_tier(self, tier):
"""
adds a tier to the document graph, in which each event annotates
exactly one token.
"""
for i, event in enumerate(tier.iter('event')):
anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category'])
anno_val = event.text if event.text else ''
self.node[event.attrib['start']][anno_key] = anno_val
def __add_span_tier(self, tier):
"""
adds a tier to the document graph in which each event annotates a span
of one or more tokens.
"""
tier_id = tier.attrib['id']
# add the tier's root node with an inbound edge from the document root
self.add_node(
tier_id, layers={self.ns, self.ns+':tier'},
attr_dict={self.ns+':category': tier.attrib['category'],
self.ns+':type': tier.attrib['type'],
self.ns+':display-name': tier.attrib['display-name']})
self.add_edge(self.root, tier_id, edge_type=EdgeTypes.dominance_relation)
# add a node for each span, containing an annotation.
# add an edge from the tier root to each span and an edge from each
# span to the tokens it represents
for i, event in enumerate(tier.iter('event')):
span_id = '{}_{}'.format(tier_id, i)
span_tokens = self.gen_token_range(event.attrib['start'], event.attrib['end'])
annotation = event.text if event.text else ''
self.add_node(
span_id, layers={self.ns, self.ns+':span'},
attr_dict={self.ns+':annotation': annotation,
'label': annotation})
self.add_edge(tier_id, span_id, edge_type=EdgeTypes.dominance_relation)
for token_id in span_tokens:
self.add_edge(span_id, token_id,
edge_type=EdgeTypes.spanning_relation)
@staticmethod
def get_token_ids(tree):
"""
returns a list of all token IDs occuring the the given exmaralda file,
sorted by their time stamp in ascending order.
"""
def tok2time(token_element):
'''
extracts the time (float) of a <tli> element
(i.e. the absolute position of a token in the document)
'''
return float(token_element.attrib['time'])
timeline = tree.find('//common-timeline')
return (tok.attrib['id']
for tok in sorted((tli for tli in timeline.iterchildren()),
key=tok2time))
@staticmethod
def tokenid2index(token_id):
"""converts a token ID (e.g. 'T0') to its index (i.e. 0)"""
return int(token_id[1:])
def indexdelta(self, stop_id, start_id):
"""returns the distance (int) between to idices.
Two consecutive tokens must have a delta of 1.
"""
return self.tokenid2index(stop_id) - self.tokenid2index(start_id)
def gen_token_range(self, start_id, stop_id):
"""
returns a list of all token IDs in the given, left-closed,
right-open interval (i.e. includes start_id, but excludes stop_id)
>>> gen_token_range('T0', 'T1')
['T0']
>>> gen_token_range('T1', 'T5')
['T1', 'T2', 'T3', 'T4']
"""
index_range = range(self.tokenid2index(start_id), self.tokenid2index(stop_id))
return ["T{}".format(index) for index in index_range]
def is_informative(layer):
"""
returns true, iff the annotation layer contains information that 'makes
sense' in Exmaralda (i.e. there are annotations we don't need and which
would clutter the Exmaralda Partitur editor).
Parameters
----------
layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
Returns
-------
is_informative : bool
Returns True, iff the layer is likely to contain information that
should be exported to Exmaralda. Usually, we don't want to include
information about sentence or token boundaries, since they are already
obvious from the token layer.
"""
# very dirty hack
# TODO: fix Issue #36 (efficient self.layers / get_hierarchical_layers()
return layer not in ('tiger', 'tiger:token', 'tiger:sentence:root',
'tiger:sentence:vroot', 'tiger:edge', 'tiger:secedge',
'exmaralda', 'exmaralda:tier',
'discoursegraph')
# pseudo-function to create a document graph from an Exmaralda file
read_exb = read_exmaralda = ExmaraldaDocumentGraph
def write_exb(docgraph, output_file):
"""
converts a DiscourseDocumentGraph into an Exmaralda ``*.exb`` file and
writes it to the given file (or file path).
"""
exmaralda_file = ExmaraldaFile(docgraph)
assert isinstance(output_file, (str, file))
if isinstance(output_file, str):
path_to_file = os.path.dirname(output_file)
if not os.path.isdir(path_to_file):
create_dir(path_to_file)
exmaralda_file.write(output_file)
else: # output_file is a file object
output_file.write(exmaralda_file.__str__())
# alias for write_exb(): convert docgraph into Exmaralda file
write_exmaralda = write_exb
if __name__ == "__main__":
import argparse
import cPickle as pickle
parser = argparse.ArgumentParser()
parser.add_argument('input_file',
help='pickle file of a document graph to be converted')
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
assert os.path.isfile(args.input_file), \
"'{}' isn't a file".format(args.input_file)
with open(args.input_file, 'rb') as docgraph_file:
docgraph = pickle.load(docgraph_file)
write_exb(docgraph, args.output_file)
|
bsd-3-clause
| 1,024,340,223,439,078,100
| 37.353282
| 90
| 0.554236
| false
| 4.077791
| false
| false
| false
|
novapost/workalendar
|
workalendar/europe/russia.py
|
1
|
3547
|
from datetime import date
from ..core import OrthodoxCalendar, MON, daterange, cleaned_date
from ..registry_tools import iso_register
@iso_register('RU')
class Russia(OrthodoxCalendar):
'Russia'
# Civil holidays
include_labour_day = True
FIXED_HOLIDAYS = OrthodoxCalendar.FIXED_HOLIDAYS + (
(1, 2, "Day After New Year"),
(2, 23, "Defendence of the Fatherland"),
(3, 8, "International Women's Day"),
(5, 9, "Victory Day"),
(6, 12, "National Day"),
(11, 4, "Day of Unity"),
)
# Christian holidays
include_christmas = False
covid19_2020_start = date(2020, 3, 28)
covid19_2020_end = date(2020, 4, 30)
def get_fixed_holidays(self, year):
if year >= 1992:
self.labour_day_label = "The Day of Spring and Labour"
else:
self.labour_day_label = "International Workers' Day"
days = super().get_fixed_holidays(year)
if year >= 2005:
days.extend([
(date(year, 1, 3), "Third Day after New Year"),
(date(year, 1, 4), "Fourth Day after New Year"),
(date(year, 1, 5), "Fifth Day after New Year"),
(date(year, 1, 6), "Sixth Day after New Year"),
(date(year, 1, 8), "Eighth Day after New Year"),
])
if year == 2020:
index = 1
for day in daterange(self.covid19_2020_start,
self.covid19_2020_end):
days.append(
(day, f"Non-Working Day (COVID-19) #{index}")
)
index += 1
# Adding May extra days
days.extend([
(date(year, 5, 4), "May 4th, 2020 holiday"),
(date(year, 5, 5), "Day of Spring and Labor"),
])
# Extra COVID-19 in May
days.extend([
(date(year, 5, 6), "Covid-19 May #1"),
(date(year, 5, 7), "Covid-19 May #2"),
(date(year, 5, 8), "Covid-19 May #3"),
])
# Constitution Vote Public Holiday
days.append((date(year, 7, 1), "Constitution Vote Public Holiday"))
elif year == 2021:
days.extend([
(date(year, 2, 22), "Day Before Defendence of the Fatherland"),
(date(year, 11, 5), 'Day After Day of Unity'),
(date(year, 12, 31), "New Year's Eve"),
])
return days
def get_calendar_holidays(self, year):
holidays = super().get_calendar_holidays(year)
shifts = []
for day, label in holidays:
if day.month == 1 and day.day in range(1, 9):
continue
# Add an exception for 2020 non-working days due to COVID-19
if self.covid19_2020_start <= day <= self.covid19_2020_end:
continue # pragma: no cover
if day.weekday() in self.get_weekend_days():
shifts.append((
self.get_first_weekday_after(day, MON),
label + " shift"
))
holidays.extend(shifts)
return holidays
def is_working_day(self, day,
extra_working_days=None, extra_holidays=None):
day = cleaned_date(day)
if day == date(2021, 2, 20):
return True
return super().is_working_day(
day,
extra_working_days=extra_working_days,
extra_holidays=extra_holidays
)
|
mit
| 5,417,020,881,726,888,000
| 33.105769
| 79
| 0.50578
| false
| 3.698644
| false
| false
| false
|
dnguyen0304/clare
|
clare/clare/application/room_list_watcher/tests/test_scrapers.py
|
1
|
1168
|
# -*- coding: utf-8 -*-
import mock
from nose.tools import assert_equal
from .. import scrapers
class TestBufferingSourceAdapter(object):
def __init__(self):
self.elements = None
self.scraper = None
self.source = None
self.n = None
def setup(self):
self.elements = xrange(2)
self.scraper = scrapers.Nop()
self.scraper.scrape = mock.Mock(return_value=self.elements)
self.source = scrapers.BufferingSourceAdapter(scraper=self.scraper,
url=None)
self.n = len(self.elements)
def test_scrape_is_not_called_while_buffer_has_elements(self):
for i in xrange(self.n):
self.source.emit()
assert_equal(self.scraper.scrape.call_count, 1)
def test_scrape_is_called_when_buffer_is_empty(self):
for i in xrange(self.n + 1):
self.source.emit()
assert_equal(self.scraper.scrape.call_count, 2)
def test_records_are_ordered_and_reversed(self):
records = [self.source.emit() for i in xrange(self.n)]
assert_equal(*map(list, (reversed(records), self.elements)))
|
mit
| -6,742,735,069,976,354,000
| 30.567568
| 75
| 0.607021
| false
| 3.560976
| false
| false
| false
|
adammck/rapidsms-community-apps
|
tags/app.py
|
1
|
1323
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import rapidsms
from models import *
class App(rapidsms.App):
def parse(self, msg):
text = msg.text
msg.tags = []
# check the contents of this message for EVERY SINGLE
# TAG that we know of. TODO: cache this for a little
# while to avoid kicking the crap out of the database
for tag in Tag.objects.all():
if tag.match(text):
# log and add this tag object to the message
self.info("Tagged message with: %r" % (tag))
msg.tags.append(tag)
# remove this tag from the message string,
# so other apps don't have to deal with it.
# this allows the tag syntax to play nice
# with other prefix-based apps
text = tag.crop(text)
# if we found and stripped tags out of the
# message, update the object and log it
if text != msg.text:
self.info("Message is now: %s" % (text))
msg.text = text
# not so important, but make a note if
# the message didn't contain tags. just
# in case it _should_ have, we can at
# least see that the app is working
else:
self.debug("No tags were found")
|
bsd-3-clause
| -7,288,373,953,676,032,000
| 31.268293
| 61
| 0.555556
| false
| 4.160377
| false
| false
| false
|
mistercrunch/panoramix
|
superset/dashboards/commands/importers/v0.py
|
1
|
12454
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import time
from copy import copy
from datetime import datetime
from typing import Any, Dict, Optional
from flask_babel import lazy_gettext as _
from sqlalchemy.orm import make_transient, Session
from superset import ConnectorRegistry, db
from superset.commands.base import BaseCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.datasets.commands.importers.v0 import import_dataset
from superset.exceptions import DashboardImportException
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
logger = logging.getLogger(__name__)
def import_chart(
slc_to_import: Slice,
slc_to_override: Optional[Slice],
import_time: Optional[int] = None,
) -> int:
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
)
slc_to_import.datasource_id = datasource.id # type: ignore
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info("Final slice: %s", str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def import_dashboard(
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
dashboard_to_import: Dashboard,
import_time: Optional[int] = None,
) -> int:
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
"""Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
logger.info("Started import of the dashboard: %s", dashboard_to_import.to_json())
session = db.session
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
# Clearing the slug to avoid conflicts
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice).all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# since PR #9109, filter_immune_slices and filter_immune_slice_fields
# are converted to filter_scopes
# but dashboard create from import may still have old dashboard filter metadata
# here we convert them to new filter_scopes metadata first
filter_scopes = {}
if (
"filter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
# then replace old slice id to new slice id:
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
new_slices = (
session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id # type: ignore
def decode_dashboards( # pylint: disable=too-many-return-statements
o: Dict[str, Any]
) -> Any:
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
if "__Dashboard__" in o:
return Dashboard(**o["__Dashboard__"])
if "__Slice__" in o:
return Slice(**o["__Slice__"])
if "__TableColumn__" in o:
return TableColumn(**o["__TableColumn__"])
if "__SqlaTable__" in o:
return SqlaTable(**o["__SqlaTable__"])
if "__SqlMetric__" in o:
return SqlMetric(**o["__SqlMetric__"])
if "__DruidCluster__" in o:
return DruidCluster(**o["__DruidCluster__"])
if "__DruidColumn__" in o:
return DruidColumn(**o["__DruidColumn__"])
if "__DruidDatasource__" in o:
return DruidDatasource(**o["__DruidDatasource__"])
if "__DruidMetric__" in o:
return DruidMetric(**o["__DruidMetric__"])
if "__datetime__" in o:
return datetime.strptime(o["__datetime__"], "%Y-%m-%dT%H:%M:%S")
return o
def import_dashboards(
session: Session,
content: str,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> None:
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(content, object_hook=decode_dashboards)
if not data:
raise DashboardImportException(_("No data in file"))
for table in data["datasources"]:
import_dataset(table, database_id, import_time=import_time)
session.commit()
for dashboard in data["dashboards"]:
import_dashboard(dashboard, import_time=import_time)
session.commit()
class ImportDashboardsCommand(BaseCommand):
"""
Import dashboard in JSON format.
This is the original unversioned format used to export and import dashboards
in Superset.
"""
# pylint: disable=unused-argument
def __init__(
self, contents: Dict[str, str], database_id: Optional[int] = None, **kwargs: Any
):
self.contents = contents
self.database_id = database_id
def run(self) -> None:
self.validate()
for file_name, content in self.contents.items():
logger.info("Importing dashboard from file %s", file_name)
import_dashboards(db.session, content, self.database_id)
def validate(self) -> None:
# ensure all files are JSON
for content in self.contents.values():
try:
json.loads(content)
except ValueError:
logger.exception("Invalid JSON file")
raise
|
apache-2.0
| 3,782,625,302,160,400,400
| 35.521994
| 88
| 0.638028
| false
| 3.748946
| false
| false
| false
|
lino-framework/lino
|
lino/modlib/checkdata/choicelists.py
|
1
|
5000
|
# Copyright 2015-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.utils import translation
from lino.core.gfks import gfk2lookup
from lino.core.roles import SiteStaff
from django.utils.text import format_lazy
from lino.api import dd, rt, _
if False:
class Feedbacks(dd.ChoiceList):
verbose_name = _("Checkdata feedback")
verbose_name_plural = _("Checkdata feedback")
add = Feedbacks.add_item()
add("10", _("Ignorable"), 'ignorable')
add("20", _("Serious"), 'serious')
class Severities(dd.ChoiceList):
verbose_name = _("Severity")
verbose_name_plural = _("Data problem severities")
add = Severities.add_item()
add("10", _("Note"), 'note')
add("20", _("Warning"), 'warning')
add("30", _("Error"), 'error')
class Checker(dd.Choice):
verbose_name = None
severity = None
self = None
model = None
help_text = None
def __init__(self):
# value = self.__module__ + '.' + self.__class__.__name__
value = self.__module__.split('.')[-2] + '.' + self.__class__.__name__
# if isinstance(self.model, six.string_types):
# value = self.model + '.' + self.__class__.__name__
# else:
# value = self.model.__name__ + '.' + self.__class__.__name__
if self.verbose_name is None:
text = value
else:
text = self.verbose_name
super(Checker, self).__init__(value, text, None)
@classmethod
def activate(cls):
if cls.self is not None:
raise Exception("Duplicate call to {0}.activate()".format(cls))
cls.self = cls()
Checkers.add_item_instance(cls.self)
@classmethod
def update_unbound_problems(cls, **kwargs):
assert cls.self.model is None
cls.self.update_problems(**kwargs)
todo, done = cls.self.update_problems(**kwargs)
msg = "Found {0} and fixed {1} data problems for {2}."
dd.logger.info(msg.format(len(todo), len(done), cls.self))
@classmethod
def check_instance(cls, *args, **kwargs):
return cls.self.get_checkdata_problems(*args, **kwargs)
def get_checkable_models(self):
if self.model is None:
return [None]
return rt.models_by_base(self.model, toplevel_only=True)
def resolve_model(self, site):
if isinstance(self.model, str):
self.model = dd.resolve_model(self.model, strict=True)
def update_problems(self, obj=None, delete=True, fix=False):
Problem = rt.models.checkdata.Problem
if delete:
# if obj is None:
# flt = {
# Problem.owner.ct_field.name + "__isnull": True,
# Problem.owner.fk_field.name + "__isnull": True
# }
# else:
# flt = gfk2lookup(Problem.owner, obj, checker=self)
flt = gfk2lookup(Problem.owner, obj, checker=self)
qs = Problem.objects.filter(**flt)
qs.delete()
done = []
todo = []
for fixable, msg in self.get_checkdata_problems(obj, fix):
if fixable:
# attn: do not yet translate
# msg = string_concat(u"(\u2605) ", msg)
msg = format_lazy("(\u2605) {}", msg)
if fixable and fix:
done.append(msg)
else:
todo.append(msg)
if len(todo):
# dd.logger.info("%s : %s", obj, todo)
user = self.get_responsible_user(obj)
if user is None:
lang = dd.get_default_language()
else:
lang = user.language
with translation.override(lang):
if obj is None:
for msg in todo:
prb = Problem(message=str(msg), checker=self, user=user)
prb.full_clean()
prb.save()
else:
msg = '\n'.join([str(s) for s in todo])
prb = Problem(owner=obj, message=msg, checker=self, user=user)
prb.full_clean()
prb.save()
return (todo, done)
def get_checkdata_problems(self, obj, fix=False):
return []
def get_responsible_user(self, obj):
return dd.plugins.checkdata.get_responsible_user(self, obj)
class Checkers(dd.ChoiceList):
required_roles = dd.login_required(SiteStaff)
verbose_name = _("Data checker")
verbose_name_plural = _("Data checkers")
item_class = Checker
max_length = 250
# e.g. "lino_welfare.modlib.pcsw.models.ClientCoachingsChecker"
column_names = "value text"
show_values = False
detail_layout = """
value text
checkdata.ProblemsByChecker
"""
@dd.receiver(dd.pre_analyze)
def resolve_checkers(sender, **kw):
for chk in Checkers.get_list_items():
chk.resolve_model(sender)
|
bsd-2-clause
| -260,389,720,889,201,000
| 32.112583
| 82
| 0.5528
| false
| 3.76506
| false
| false
| false
|
CHBMB/LazyLibrarian
|
lib/fuzzywuzzy/fuzz.py
|
1
|
8419
|
#!/usr/bin/env python
# encoding: utf-8
"""
fuzz.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import platform
import warnings
try:
from .StringMatcher import StringMatcher as SequenceMatcher
except ImportError:
#if platform.python_implementation() != "PyPy":
# warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
from difflib import SequenceMatcher
from . import utils
###########################
# Basic Scoring Functions #
###########################
@utils.check_for_none
@utils.check_empty_string
def ratio(s1, s2):
s1, s2 = utils.make_type_consistent(s1, s2)
m = SequenceMatcher(None, s1, s2)
return utils.intr(100 * m.ratio())
@utils.check_for_none
@utils.check_empty_string
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores))
##############################
# Advanced Scoring Functions #
##############################
def _process_and_sort(s, force_ascii, full_process=True):
"""Return a cleaned string with token sorted."""
# pull tokens
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_string = u" ".join(sorted(tokens))
return sorted_string.strip()
# Sorted Token
# find all alphanumeric tokens in the string
# sort those tokens and take ratio of resulting joined strings
# controls for unordered string elements
@utils.check_for_none
def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
if partial:
return partial_ratio(sorted1, sorted2)
else:
return ratio(sorted1, sorted2)
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
@utils.check_for_none
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
"""Find all alphanumeric tokens in each string...
- treat them as a set
- construct two strings of the form:
<sorted_intersection><sorted_remainder>
- take ratios of those two strings
- controls for unordered partial matches"""
p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# pull tokens
tokens1 = set(p1.split())
tokens2 = set(p2.split())
intersection = tokens1.intersection(tokens2)
diff1to2 = tokens1.difference(tokens2)
diff2to1 = tokens2.difference(tokens1)
sorted_sect = " ".join(sorted(intersection))
sorted_1to2 = " ".join(sorted(diff1to2))
sorted_2to1 = " ".join(sorted(diff2to1))
combined_1to2 = sorted_sect + " " + sorted_1to2
combined_2to1 = sorted_sect + " " + sorted_2to1
# strip
sorted_sect = sorted_sect.strip()
combined_1to2 = combined_1to2.strip()
combined_2to1 = combined_2to1.strip()
if partial:
ratio_func = partial_ratio
else:
ratio_func = ratio
pairwise = [
ratio_func(sorted_sect, combined_1to2),
ratio_func(sorted_sect, combined_2to1),
ratio_func(combined_1to2, combined_2to1)
]
return max(pairwise)
def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
###################
# Combination API #
###################
# q is for quick
def QRatio(s1, s2, force_ascii=True):
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2)
def UQRatio(s1, s2):
return QRatio(s1, s2, force_ascii=False)
# w is for weighted
def WRatio(s1, s2, force_ascii=True):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms.
"""
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# should we look at partials?
try_partial = True
unbase_scale = .95
partial_scale = .90
base = ratio(p1, p2)
len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
# if strings are similar length, don't use partials
if len_ratio < 1.5:
try_partial = False
# if one string is much much shorter than the other
if len_ratio > 8:
partial_scale = .6
if try_partial:
partial = partial_ratio(p1, p2) * partial_scale
ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
ptser = partial_token_set_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
return utils.intr(max(base, partial, ptsor, ptser))
else:
tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
return utils.intr(max(base, tsor, tser))
def UWRatio(s1, s2):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms. Same as WRatio but preserving unicode.
"""
return WRatio(s1, s2, force_ascii=False)
|
gpl-3.0
| -7,268,800,526,245,139,000
| 30.650376
| 115
| 0.663618
| false
| 3.466035
| false
| false
| false
|
tuhuayuan/chatmind
|
main.py
|
1
|
1130
|
# -*- coding: UTF-8 -*-
import tornado.web
import tornado.httpserver
import tornado.ioloop
import wechat
from tornado.options import define, options
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/wechat_index", wechat.IndexHandler)
]
settings = {
'debug': options.debug
}
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
@property
def dbsession(self):
return None
def main():
define("port", default=2080, help="run on the givent port", type=int)
define("debug", default=False, help="run in debug mode", type=bool)
define("config", default="", help="load the givent config file")
tornado.options.parse_command_line()
try:
tornado.options.parse_config_file(options.config)
except IOError:
options.print_help()
return
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
mit
| -8,163,191,410,808,373,000
| 26.560976
| 73
| 0.646018
| false
| 3.951049
| false
| false
| false
|
zwffff2015/stock
|
api/tushareApi.py
|
1
|
1084
|
import tushare as ts
def getOneSpecifiedPriceHistoryData(code, start, end, priceType='close'):
data = getSpecifiedPriceHistoryData(code, start, end, priceType)
# print data
for item in data:
return data[item]
def getSpecifiedPriceHistoryData(code, start, end, priceType='close'):
# print code, start, end
data = ts.get_k_data(code, ktype='D', autype='qfq', start=start, end=end)
if len(data) <= 0:
return dict({'default': 0})
closeList = dict(data[priceType])
return closeList
def getRealTimeData(code, priceType='price'):
data = ts.get_realtime_quotes(code)
# print data
if data is None:
return 0
priceList = data[priceType].values[0]
return priceList
def getSimpleHistoryData(code, start, end):
data = ts.get_k_data(code, ktype='D', autype='qfq', start=start, end=end)
if len(data) <= 0:
return None
return data
def getHistoryData(code, start, end):
data = ts.get_hist_data(code, ktype='D', start=start, end=end)
if len(data) <= 0:
return None
return data
|
mit
| 638,128,801,533,022,700
| 26.1
| 77
| 0.654059
| false
| 3.274924
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/disk_instance_view.py
|
1
|
1509
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiskInstanceView(Model):
"""The instance view of the disk.
:param name: The disk name.
:type name: str
:param encryption_settings: Specifies the encryption settings for the OS
Disk. <br><br> Minimum api-version: 2015-06-15
:type encryption_settings:
list[~azure.mgmt.compute.v2017_03_30.models.DiskEncryptionSettings]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2017_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[DiskEncryptionSettings]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, name=None, encryption_settings=None, statuses=None):
super(DiskInstanceView, self).__init__()
self.name = name
self.encryption_settings = encryption_settings
self.statuses = statuses
|
mit
| -3,492,721,895,791,761,000
| 37.692308
| 97
| 0.616302
| false
| 4.399417
| false
| false
| false
|
IBM-Security/ibmsecurity
|
ibmsecurity/isam/base/lmi.py
|
1
|
2757
|
import logging
import time
from ibmsecurity.appliance.ibmappliance import IBMError
logger = logging.getLogger(__name__)
def restart(isamAppliance, check_mode=False, force=False):
"""
Restart LMI
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Restarting LMI", "/restarts/restart_server", {})
def get(isamAppliance, check_mode=False, force=False):
"""
Get LMI Status
"""
# Be sure to ignore server error
return isamAppliance.invoke_get("Get LMI Status", "/lmi", ignore_error=True)
def await_startup(isamAppliance, wait_time=300, check_freq=5, start_time=None, check_mode=False, force=False):
"""
Wait for appliance to bootup or LMI to restart
Checking lmi responding is best option from REST API perspective
# Frequency (in seconds) when routine will check if server is up
# check_freq (seconds)
# Ideally start_time should be taken before restart request is send to LMI
# start_time (REST API standard)
# Time to wait for appliance/lmi to respond and have a different start time
# wait_time (seconds)
# Note: This function will not work unless first steps are completed.
"""
# Get the current start_time if not provided
if start_time is None:
ret_obj = get(isamAppliance)
start_time = ret_obj['data'][0]['start_time']
sec = 0
warnings = []
# Now check if it is up and running
while 1:
ret_obj = get(isamAppliance)
if ret_obj['rc'] == 0 and isinstance(ret_obj['data'], list) and len(ret_obj['data']) > 0 and 'start_time' in \
ret_obj['data'][0] and ret_obj['data'][0]['start_time'] != start_time:
logger.info("Server is responding and has a different start time!")
return isamAppliance.create_return_object(warnings=warnings)
else:
time.sleep(check_freq)
sec += check_freq
logger.debug(
"Server is not responding yet. Waited for {0} secs, next check in {1} secs.".format(sec, check_freq))
if sec >= wait_time:
warnings.append("The LMI restart not detected or completed, exiting... after {0} seconds".format(sec))
break
return isamAppliance.create_return_object(warnings=warnings)
def restart_and_wait(isamAppliance, wait_time=300, check_freq=5, check_mode=False, force=False):
ret_obj = get(isamAppliance)
_start_time = ret_obj['data'][0]['start_time']
restart(isamAppliance, check_mode, force)
return await_startup(isamAppliance, wait_time=wait_time, check_freq=check_freq, start_time=_start_time,
check_mode=False, force=False)
|
apache-2.0
| -7,524,142,561,415,226,000
| 34.346154
| 118
| 0.654697
| false
| 3.642008
| false
| false
| false
|
avelino/pycorreios
|
setup.py
|
1
|
1456
|
from setuptools import setup
import os
DESCRIPTION = "API for Brazillian Correios in Python"
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.md').read()
except:
pass
def get_version(version_tuple):
version = '%s.%s' % (version_tuple[0], version_tuple[1])
if version_tuple[2]:
version = '%s.%s' % (version, version_tuple[2])
return version
# Dirty hack to get version number from pycorreios/__init__.py - we can't
# file is read
init = os.path.join(os.path.dirname(__file__), 'pycorreios', '__init__.py')
version_line = list(filter(lambda l: l.startswith('VERSION'), open(init)))[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
print(VERSION)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(name='pycorreios',
version=VERSION,
packages=[
'test',
'pycorreios',
],
author='Thiago Avelino',
author_email='thiagoavelinoster@gmail.com',
url='https://github.com/avelino/pycorreios/',
license='MIT',
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
test_suite='test',
)
|
mit
| 2,467,762,093,427,377,000
| 27
| 77
| 0.64011
| false
| 3.568627
| false
| false
| false
|
kstateome/django-cas
|
cas/backends.py
|
1
|
7139
|
import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas1(ticket, service):
"""
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def _internal_verify_cas(ticket, service, suffix):
"""Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
# The CAS Response includes the PGT_IOU, which we use to lookup the PGT/TGT.
pgt_element = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_element:
pgt_iou_token = pgt_element[0].firstChild.nodeValue
try:
pgt_iou_mapping = _get_pgt_iou_mapping(pgt_iou_token)
except Exception as e:
logger.warning('Failed to do proxy authentication. %s' % e)
else:
try:
tgt = Tgt.objects.get(username=username)
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgt_iou_mapping.tgt)
logger.info('Creating TGT ticket for {user}'.format(user=username))
else:
tgt.tgt = pgt_iou_mapping.tgt
tgt.save()
pgt_iou_mapping.delete()
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
def verify_proxy_ticket(ticket, service):
"""
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
def _get_pgt_iou_mapping(pgt_iou):
"""
Returns the instance of PgtIou -> Pgt mapping which is associated with the provided pgt_iou token.
Because this mapping is created in a different request which the CAS server makes to the proxy callback url, the
PGTIOU->PGT mapping might not be found yet in the database by this calling thread, hence the attempt to get
the ticket is retried for up to 5 seconds.
This should be handled some better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt_iou
"""
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while retries_left:
try:
return PgtIOU.objects.get(pgtIou=pgt_iou)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgt for pgtIou %s" % pgt_iou)
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
mit
| -9,150,461,524,364,492,000
| 27.217391
| 118
| 0.600364
| false
| 4.204358
| false
| false
| false
|
macosforge/ccs-calendarserver
|
txweb2/filter/range.py
|
1
|
4686
|
# -*- test-case-name: txweb2.test.test_stream -*-
import os
import time
from txweb2 import http, http_headers, responsecode, stream
# Some starts at writing a response filter to handle request ranges.
class UnsatisfiableRangeRequest(Exception):
pass
def canonicalizeRange((start, end), size):
"""Return canonicalized (start, end) or raises UnsatisfiableRangeRequest
exception.
NOTE: end is the last byte *inclusive*, which is not the usual convention
in python! Be very careful! A range of 0,1 should return 2 bytes."""
# handle "-500" ranges
if start is None:
start = max(0, size - end)
end = None
if end is None or end >= size:
end = size - 1
if start >= size:
raise UnsatisfiableRangeRequest
return start, end
def makeUnsatisfiable(request, oldresponse):
if request.headers.hasHeader('if-range'):
return oldresponse # Return resource instead of error
response = http.Response(responsecode.REQUESTED_RANGE_NOT_SATISFIABLE)
response.headers.setHeader("content-range", ('bytes', None, None, oldresponse.stream.length))
return response
def makeSegment(inputStream, lastOffset, start, end):
offset = start - lastOffset
length = end + 1 - start
if offset != 0:
before, inputStream = inputStream.split(offset)
before.close()
return inputStream.split(length)
def rangefilter(request, oldresponse):
if oldresponse.stream is None:
return oldresponse
size = oldresponse.stream.length
if size is None:
# Does not deal with indeterminate length outputs
return oldresponse
oldresponse.headers.setHeader('accept-ranges', ('bytes',))
rangespec = request.headers.getHeader('range')
# If we've got a range header and the If-Range header check passes, and
# the range type is bytes, do a partial response.
if (
rangespec is not None and http.checkIfRange(request, oldresponse) and
rangespec[0] == 'bytes'
):
# If it's a single range, return a simple response
if len(rangespec[1]) == 1:
try:
start, end = canonicalizeRange(rangespec[1][0], size)
except UnsatisfiableRangeRequest:
return makeUnsatisfiable(request, oldresponse)
response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
response.headers.setHeader('content-range', ('bytes', start, end, size))
content, after = makeSegment(oldresponse.stream, 0, start, end)
after.close()
response.stream = content
return response
else:
# Return a multipart/byteranges response
lastOffset = -1
offsetList = []
for arange in rangespec[1]:
try:
start, end = canonicalizeRange(arange, size)
except UnsatisfiableRangeRequest:
continue
if start <= lastOffset:
# Stupid client asking for out-of-order or overlapping ranges, PUNT!
return oldresponse
offsetList.append((start, end))
lastOffset = end
if not offsetList:
return makeUnsatisfiable(request, oldresponse)
content_type = oldresponse.headers.getRawHeaders('content-type')
boundary = "%x%x" % (int(time.time() * 1000000), os.getpid())
response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
response.headers.setHeader(
'content-type',
http_headers.MimeType('multipart', 'byteranges',
[('boundary', boundary)])
)
response.stream = out = stream.CompoundStream()
lastOffset = 0
origStream = oldresponse.stream
headerString = "\r\n--%s" % boundary
if len(content_type) == 1:
headerString += '\r\nContent-Type: %s' % content_type[0]
headerString += "\r\nContent-Range: %s\r\n\r\n"
for start, end in offsetList:
out.addStream(
headerString %
http_headers.generateContentRange(('bytes', start, end, size))
)
content, origStream = makeSegment(origStream, lastOffset, start, end)
lastOffset = end + 1
out.addStream(content)
origStream.close()
out.addStream("\r\n--%s--\r\n" % boundary)
return response
else:
return oldresponse
__all__ = ['rangefilter']
|
apache-2.0
| 1,989,880,107,851,133,000
| 32.71223
| 97
| 0.597525
| false
| 4.334875
| false
| false
| false
|
Brainiq7/Ananse
|
ananse_dl/extractor/gdcvault.py
|
1
|
6407
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True, # Requires rtmpdump
}
},
# {
# 'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
# 'md5': 'a5eb77996ef82118afbbe8e48731b98e',
# 'info_dict': {
# 'id': '1015301',
# 'ext': 'flv',
# 'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
# }
# }
]
def _parse_mp4(self, xml_description):
video_formats = []
mp4_video = xml_description.find('./metadata/mp4video')
if mp4_video is None:
return None
mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video.text)
video_root = mobj.group('root')
formats = xml_description.findall('./metadata/MBRVideos/MBRVideo')
for format in formats:
mobj = re.match(r'mp4\:(?P<path>.*)', format.find('streamName').text)
url = video_root + mobj.group('path')
vbr = format.find('bitrate').text
video_formats.append({
'url': url,
'vbr': int(vbr),
})
return video_formats
def _parse_flv(self, xml_description):
video_formats = []
akami_url = xml_description.find('./metadata/akamaiHost').text
slide_video_path = xml_description.find('./metadata/slideVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + slide_video_path,
'format_note': 'slide deck video',
'quality': -2,
'preference': -2,
'format_id': 'slides',
})
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + speaker_video_path,
'format_note': 'speaker video',
'quality': -1,
'preference': -1,
'format_id': 'speaker',
})
return video_formats
def _login(self, webpage_url, video_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, video_id, 'Logging in')
start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page')
self._download_webpage(logout_url, video_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, video_id)
direct_url = self._search_regex(
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
video_url = 'http://www.gdcvault.com/' + direct_url
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': title,
}
xml_root = self._html_search_regex(
r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>',
start_page, 'xml root', default=None)
if xml_root is None:
# Probably need to authenticate
login_res = self._login(webpage_url, video_id)
if login_res is None:
self.report_warning('Could not login.')
else:
start_page = login_res
# Grab the url from the authenticated page
xml_root = self._html_search_regex(
r'<iframe src="(.*?)player.html.*?".*?</iframe>',
start_page, 'xml root')
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename', default=None)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
xml_decription_url = xml_root + 'xml/' + xml_name
xml_description = self._download_xml(xml_decription_url, video_id)
video_title = xml_description.find('./metadata/title').text
video_formats = self._parse_mp4(xml_description)
if video_formats is None:
video_formats = self._parse_flv(xml_description)
return {
'id': video_id,
'title': video_title,
'formats': video_formats,
}
|
unlicense
| 8,123,906,485,022,277,000
| 37.596386
| 146
| 0.524739
| false
| 3.638274
| false
| false
| false
|
Mandrilux/GOC_2017
|
api/polydevs/parking/views.py
|
1
|
1100
|
from django.shortcuts import get_object_or_404
from rest_framework import viewsets, filters
from parking.models import Parking
from parking.serializers import ParkingSerializer, ParkingDetailSerializer
from parking.filters import FilterParking
class MultipleFieldLookupMixin(object):
def get_object(self):
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
filter = {}
for field in self.lookup_fields:
if self.kwargs[field]:
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter)
class ParkingViewSet(MultipleFieldLookupMixin,
viewsets.ModelViewSet):
queryset = Parking.objects.all()
serializer_class = ParkingSerializer
filter_backends = (filters.DjangoFilterBackend,)
lookup_fields = ('lon', 'lat',)
filter_class = FilterParking
def get_serializer_class(self):
if self.action is 'retrieve' or self.action is 'update' or self.action is 'delete':
return ParkingDetailSerializer
return ParkingSerializer
|
agpl-3.0
| 6,739,548,767,564,175,000
| 35.666667
| 91
| 0.7
| false
| 4.089219
| false
| false
| false
|
meltwater/proxymatic
|
src/proxymatic/discovery/registrator.py
|
1
|
2745
|
import logging
import json
import socket
import traceback
import urllib2
from urlparse import urlparse
from proxymatic.services import Server, Service
from proxymatic import util
class RegistratorEtcdDiscovery(object):
def __init__(self, backend, url):
self._backend = backend
self._url = urlparse(url)
self._healthy = False
self.priority = 5
def isHealthy(self):
return self._healthy
def start(self):
def action():
# Fetch all registered service instances
geturl = 'http://%s/v2/keys%s?recursive=true' % (self._url.netloc, self._url.path)
logging.debug("GET registrator services from %s", geturl)
response = urllib2.urlopen(geturl)
waitIndex = int(response.info().getheader('X-Etcd-Index')) + 1
services = self._parse(response.read())
self._backend.update(self, services)
logging.info("Refreshed services from registrator store %s", self._url.geturl())
# Signal that we're up and running
self._healthy = True
# Long poll for updates
pollurl = 'http://%s/v2/keys%s?wait=true&recursive=true&waitIndex=%s' % (self._url.netloc, self._url.path, waitIndex)
urllib2.urlopen(pollurl).read()
# Run action() in thread with retry on error
util.run(action, "etcd error from '" + self._url.geturl() + "': %s")
def _parse(self, content):
services = {}
state = json.loads(content)
for node in util.rget(state, 'node', 'nodes') or []:
for backend in util.rget(node, 'nodes') or []:
try:
parts = backend['key'].split(':')
port = int(parts[2])
protocol = parts[3] if len(parts) > 3 else 'tcp'
key = '%s/%s' % (port, protocol.lower())
# Resolve hostnames since HAproxy wants IP addresses
endpoint = backend['value'].split(':')
ipaddr = socket.gethostbyname(endpoint[0])
server = Server(ipaddr, endpoint[1], endpoint[0])
# Append backend to service
if key not in services:
name = node['key'].split('/')[-1]
services[key] = Service(name, 'registrator:%s' % self._url.geturl(), port, protocol)
services[key] = services[key].addServer(server)
except Exception as e:
logging.warn("Failed to parse service %s backend %s/%s: %s", node['key'], backend['key'], backend['value'], str(e))
logging.debug(traceback.format_exc())
return services
|
mit
| -343,665,641,712,587,600
| 39.970149
| 135
| 0.55592
| false
| 4.302508
| false
| false
| false
|
CumulusNetworks/pyjeet
|
pyjeet/logcontainer.py
|
1
|
5027
|
#
# Copyright 2014 Cumulus Networks, Inc. All rights reserved.
# Author: Alexandre Renard <arenardvv@gmail.com>
#
# pyjeet --
# the distributed log analysis tool for networking troubleshooting.
#
from abc import ABCMeta, abstractmethod
from file import *
from network_obj import *
class LogContainer:
__metaclass__ = ABCMeta
def __init__(self):
self.files = []
self.logs = []
self.interfaces = []
self.selected_interfaces = []
self.bridges = []
self.selected_bridges = []
@abstractmethod
def get_interfaces_files(self, standalone):
'''
Return the config files needed to configure interfaces
'''
pass
def set_selected_interfaces(self, selected_interfaces, normalizer=None):
#select user interfaces with user input in the loaded interfaces
if not self.interfaces and normalizer:
self.load_interfaces(normalizer)
#if no particular interface is chosen get them all on all keyword
if len(selected_interfaces) and "all" in selected_interfaces:
self.selected_interfaces = self.interfaces
else:
self.selected_interfaces = self.get_interfaces_from_names(selected_interfaces)
def set_selected_bridges(self, selected_bridges, normalizer=None):
#select bridges with user input in the loaded bridges
if not self.bridges and normalizer:
self.load_bridges(normalizer)
#if no particular bridge is chosen get them all on all keyword
if len(selected_bridges) and "all" in selected_bridges:
self.selected_bridges = self.bridges
else:
self.selected_bridges = self.get_bridges_from_names(selected_bridges)
def clear_selected_interfaces(self):
self.selected_interfaces = []
def clear_selected_bridges(self):
self.selected_bridges = []
def load_interfaces(self, normalizer, standalone=False):
#loads all interfaces from interface conf files
files_info = self.get_interfaces_files(standalone)
for info in files_info:
for data in File(info['name'], info['content']).normalize(normalizer, is_log=False,debug_context=True).data:
if not self.find_interface(data):
if not data.has_key('linux_interface'):
continue
self.interfaces.append(Interface(data))
return self
def load_bridges(self, standalone=False):
#loads all bridges from brctl conf files
brctl_data = self.get_bridges_files(standalone)
for line in brctl_data:
line = line.split()
if len(line) == 1:
# if vlan interface given take the whole interface
inf = self.get_if_object_from_name(line[0].split('.')[-1])
if inf is not None:
inf.set_bridge(self.bridges[-1])
self.bridges[-1].add_if(inf)
elif len(line) == 4:
self.bridges.append(Bridge(line[0]))
inf = self.get_if_object_from_name(line[-1].split('.')[-1])
if inf is not None:
inf.set_bridge(self.bridges[-1])
self.bridges[-1].add_if(inf)
else:
logging.debug("Weird number of parameters in line from brctl show")
continue
return self
def get_if_object_from_name(self, linux_name):
for interface in self.interfaces:
if interface.linux == linux_name:
return interface
def find_interface(self, data):
for interface in self.interfaces:
linux = data.get('linux_interface')
if linux and interface.linux == linux:
interface.update(data)
return True
sdk = data.get('sdk_interface')
if sdk and interface.sdk == sdk:
interface.update(data)
return True
id = data.get('id_interface')
if id and interface.id == id:
interface.update(data)
return True
return False
def get_interfaces_from_names(self, interfaces_name):
return [interface for interface in self.interfaces if
(interface.linux and interfaces_name.count(interface.linux)) or (
interface.sdk and interfaces_name.count(interface.sdk))]
def get_bridges_from_names(self, bridges_name):
return [bridge for bridge in self.bridges if
(bridge.name and bridges_name.count(bridge.name))]
def normalize_files(self, normalizer, timestamp, interval, normalized_logs=None):
for f in self.files:
f.normalize(normalizer, timestamp, interval, True, True, normalized_logs)
return self
def sort_logs(self):
for f in self.files:
self.logs.extend(f.data)
self.logs.sort(lambda l1, l2: int(l1.date - l2.date))
return self
|
gpl-2.0
| 6,239,281,146,889,183,000
| 38.273438
| 120
| 0.600955
| false
| 4.256562
| false
| false
| false
|
yarray/md2pdf
|
compile.py
|
1
|
2492
|
#! /usr/bin/env python
'''
compiler from markdown file to pdf using wkhtmltopdf
Usage:
compile.py [--script FILE] [--style FILE]
[--pdf-options STRING] [--toc] <input> [<output>]
compile.py (-h | --help)
Options:
-h --help Show help screen
--script FILE Script reference to be used in html
--style FILE The css stylesheet
--pdf-options STRING Options passed to wkhtmltopdf
--toc Generate table of content
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
from docopt import docopt
def guess_convertor():
def aux(name):
return os.path.join(os.path.dirname(__file__), name)
possible_vers = ['wkhtmltopdf-amd64', 'wkhtmltopdf-i386']
return [cand for cand in
[os.path.join(os.path.dirname(__file__), name)
for name in possible_vers]
if os.path.isfile(cand)][0]
def compile_to_html(source, toc=False):
if toc:
try:
import markdown
with open(source) as f:
return markdown.markdown(f.read(), extensions=['toc'])
except:
return os.popen('markdown {0}'.format(source)).read()
return os.popen('markdown {0}'.format(source)).read()
def build_sample(html, style):
text = ''
if style:
text += '<head><link href="{0}" rel="stylesheet"></head>\n'\
.format(style)
return text + html
def write_html(html, name, script_name):
text = '<html lang="en-us">'
text += html
if script_name:
with open(script_name) as sc:
text += sc.read()
text += '</html>'
with open(name, 'w') as f:
f.write(text)
def generate_pdf(for_print, output, options):
cmd = guess_convertor() + ' --encoding utf-8 -s A4 ' + \
(options + ' ' if options else '') + \
'./{0} --javascript-delay 1000 {1}'.format(for_print, output)
print cmd
os.system(cmd)
if __name__ == '__main__':
args = docopt(__doc__)
source = args['<input>']
name, ext = os.path.splitext(source)
out_name = args['<output>'] if args['<output>'] else (name + '.pdf')
sample = name + '.sample.html'
style = args['--style']
script = args['--script']
pdf_options = args['--pdf-options']
html = compile_to_html(source, args['--toc'])
write_html(build_sample(html, style), sample, script)
generate_pdf(sample, out_name, pdf_options)
|
mit
| 2,322,206,154,730,118,000
| 27
| 72
| 0.570225
| false
| 3.534752
| false
| false
| false
|
scorpionis/docklet
|
src/slot_scheduler.py
|
1
|
10313
|
# coding=UTF-8
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import math
import random
import numpy as np
from mdkp import Colony
from machine import AllocationOfMachine
import heapq
from connection import *
import time
import _thread
import logging
import json
import jsonpickle
from log import slogger
#import log
machine_queue = []
# only used for test
task_requests = {}
tasks = {}
machines = {}
restricted_index = 0
node_manager = None
etcdclient = None
def generate_test_data(cpu,mem, count,type,id_base):
task_requests = {}
cpu_arr = np.random.binomial(cpu, 1/16, count)
mem_arr = np.random.binomial(mem, 1/16, count)
# cpu_arr = np.random.uniform(1,cpu,cpu*machines)
# mem_arr = np.random.uniform(1,mem,cpu*machines)
bids = np.random.uniform(1,100,count)
for i in range(0+id_base,count):
if cpu_arr[i]==0 or mem_arr[i] ==0:
continue
task = {
'id': str(i),
'cpus': str(int(math.ceil(cpu_arr[i]))),
'mems': str(int(math.ceil(mem_arr[i]))),
'bid': str(int(bids[i]))
}
key = str(i)
task_requests[key] = task
# write to a file
# with open('uniform_tasks.txt','w') as f:
# for key, task in tasks.items():
# f.write(str(task['cpus'])+' '+str(task['mems'])+' '+str(task['bid'])+'\n')
return task_requests
def parse_test_data(filename,cpus,mems, count):
global tasks
with open(filename,'r') as f:
i =0
for line in f.readlines()[0:count]:
arr = line.split()
task = {
'id': str(i),
'cpus': float(arr[0]),
'mems': float(arr[1]),
'bid': int(arr[2])
}
key = str(i)
task_requests[key] = task
i+=1
print(task)
def add_machine(id, cpus=24, mems=240000):
global machines
global machine_queue
machine = AllocationOfMachine(id, cpus, mems)
machines[id] = machine
heapq.heappush(machine_queue,machine)
# to-do:改成多线程,直接运行每个线程
# machine.colony.run()
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
# save_machine in etcd
# save_machine(machine)
return machine
def pre_allocate(task):
global restricted_index
if 'bid' in task and task['bid']!='0':
machine = heapq.heappop(machine_queue)
task['machineid'] = machine.machineid
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_mems_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
machine.total_value += int(task['bid'])
heapq.heappush(machine_queue,machine)
# save machine and task
# save_machine(machine)
# save_task(task)
else:
if(restricted_index >= len(machines)):
restricted_index = 0
slogger.debug("restricted_index: ", restricted_index)
values = list(machines.values())
task['machineid'] = values[restricted_index].machineid
restricted_index += 1
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_memsp_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
# save task
# save_task(task)
return task
def allocate(id):
task = tasks[id]
machineid = task['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
# slogger.debug("dispatch reliable")
task = machine.add_reliable_task(task)
# save task and machine
# save_task(task)
# save_machine(machine)
# slogger.debug("pop machine: id = %s", machine.machineid)
send_task(machine,task,"add")
else:
# slogger.debug("dispatch restricted")
task = machine.add_restricted_task(task)
# save task and machine
# save_task(task)
# save_machine(machine)
return task
def release(id):
task = tasks[id]
machineid = tasks[id]['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
slogger.debug("release reliable")
machine.release_reliable_task(id)
send_task(machine,task,'delete')
else:
slogger.debug("release restricted")
machine.release_restricted_task(id)
def after_release(id):
task = tasks[id]
for index,machine in enumerate(machine_queue):
if task['machineid'] == machine.machineid:
del machine_queue[index]
break
machine.total_value -= int(task['bid'])
heapq.heappush(machine_queue,machine)
del tasks[id]
def init_scheduler():
#启动c程序,后台运行
import os
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
slogger.setLevel(logging.INFO)
slogger.info("init scheduler!")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
_thread.start_new_thread(recv_result,(machines,))
def recover_scheduler():
global machines
global tasks
global machine_queue
#启动c程序,后台运行
import os
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
slogger.setLevel(logging.INFO)
slogger.info("recover scheduler!")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
# recover alll the machines
[status, runlist] = etcdclient.listdir("machines/runnodes")
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value'] == 'ok':
slogger.info ("running node %s" % nodeip)
# inform dscheduler the recovered running nodes
import dscheduler
slogger.info("recover machine %s to scheduler",nodeip)
machine = load_machine(nodeip)
# recover machine_queue
heapq.heappush(machine_queue,machine)
# send machine to C process
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
# recover recv_result thread
_thread.start_new_thread(recv_result,(machines,))
# recover all the tasks
load_tasks()
# send tasks to colony
for id,task in tasks.items():
machineid = task['machineid']
machine = machines[machineid]
send_task(machine,task,"add")
def save_machine(machine):
machine_str = jsonpickle.encode(machine)
etcdclient.setkey("/scheduler/machines/"+machine.machineid, machine_str)
def load_machine(ip):
global machines
[string,machine_str] = etcdclient.getkey("/scheduler/machines/"+ip)
machine = jsonpickle.decode(machine_str)
machines[machine.machineid]=machine
return machine
def load_machines():
global machines
[status,kvs] = etcdclient.listdir("/scheduler/machines/")
for kv in kvs:
machine_str = kv['value']
machine = jsonpickle.decode(machine_str)
machines[machine.id]=machine
def save_task(task):
task_str = json.dumps(task)
etcdclient.setkey("/scheduler/tasks/"+task['id'], task_str)
def load_tasks():
global tasks
[status,kvs] = etcdclient.listdir("/scheduler/tasks/")
for kv in kvs:
task_str = kv['value']
task = jsonpickle.decode(task_str)
if task['machineid'] in machines.keys():
tasks[kv['key']]=task
def test_all(requests):
init_scheduler()
for i in range(0,100):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
if not requests:
requests = generate_test_data(64,256,16*2*100,"reliable",0)
# generate_test_data(64,256,1,"restricted",192)
for index,request in requests.items():
pre_allocate(request)
slogger.info("pre allocate tasks done")
for index,request in requests.items():
allocate(request['id'])
slogger.info("allocate tasks done")
time.sleep(10)
# for index,request in requests.items():
# release(request['id'])
# slogger.info("release tasks done")
# for index,request in requests.items():
# after_release(request['id'])
# slogger.info("after release tasks done")
social_welfare = 0
for index,machine in machines.items():
total_value = 0
for taskid in machine.reliable_allocations:
total_value += int(tasks[taskid]['bid'])
print("machine %s has total_value %d" % (machine.machineid, total_value))
social_welfare += total_value
print("social welfare:", social_welfare)
def test_slot_allocate(requests):
if not requests:
requests = generate_test_data(64,256,16*2*10,'reliable',0)
slot_cpu = 4.0
slot_mem = 16.0
for index, request in requests.items():
slots_c = math.ceil(float(request['cpus']) / slot_cpu)
slots_m = math.ceil(float(request['mems']) / slot_mem)
slots = slots_c if slots_c > slots_m else slots_m
# print("slots: ", slots)
request['slots'] = slots
request['bid_per_slot']= float(request['bid'])/slots
sorted_requests = sorted(requests.values(), key=lambda k:k['bid_per_slot'], reverse = True)
slots_total = 1600
slots_tmp = 0
bids_tmp = 0
for sr in sorted_requests:
slots_tmp += sr['slots']
if slots_tmp <= slots_total:
bids_tmp += int(sr['bid'])
else:
break
print("total social welfare: ", bids_tmp)
def compare_with_slot():
requests = generate_test_data(64,256,16*4*100,'reliable',0)
test_all(requests)
test_slot_allocate(requests)
if __name__ == '__main__':
# test_pub_socket();
# test_colony_socket();
# test_all();
# test_slot_allocate(None)
compare_with_slot()
|
bsd-3-clause
| -4,433,797,019,322,345,000
| 27.312155
| 111
| 0.602205
| false
| 3.417472
| true
| false
| false
|
nicostephan/pypuf
|
pypuf/simulation/arbiter_based/ltfarray.py
|
1
|
15968
|
from numpy import prod, shape, sign, dot, array, tile, transpose, concatenate, dstack, swapaxes, sqrt, amax, vectorize
from numpy.random import RandomState
from pypuf import tools
from pypuf.simulation.base import Simulation
class LTFArray(Simulation):
"""
Class that simulates k LTFs with n bits and a constant term each
and constant bias added.
"""
@staticmethod
def combiner_xor(r):
"""
combines output responses with the XOR operation
:param r: a list with a number of vectors of single LTF results
:return: a list of full results, one for each
"""
return prod(r, 1)
@staticmethod
def combiner_ip_mod2(r):
"""
combines output responses with the inner product mod 2 operation
:param r: a list with a number of vectors of single LTF results
:return: a list of full results, one for each
"""
n = len(r[0])
assert n % 2 == 0, 'IP mod 2 is only defined for even n. Sorry!'
return prod(
transpose(
[
amax((r[:,i], r[:,i+1]), 0)
for i in range(0, n, 2)
])
, 1)
@staticmethod
def transform_id(cs, k):
"""
Input transformation that does nothing.
:return:
"""
return array([
tile(c, (k, 1)) # same unmodified challenge for all k LTFs
for c in cs
])
@staticmethod
def transform_atf(cs, k):
"""
Input transformation that simulates an Arbiter PUF
:return:
"""
# Transform with ATF monomials
cs = transpose(
array([
prod(cs[:,i:], 1)
for i in range(len(cs[0]))
])
)
# Same challenge for all k Arbiters
return __class__.transform_id(cs, k)
@staticmethod
def transform_mm(cs, k):
N = len(cs)
n = len(cs[0])
assert k == 2, 'MM transform currently only implemented for k=2. Sorry!'
assert n % 2 == 0, 'MM transform only defined for even n. Sorry!'
cs_1 = cs
cs_2 = transpose(
concatenate(
(
[ cs[:,0] ],
[ cs[:,i] * cs[:,i+1] for i in range(0, n, 2) ],
[ cs[:,i] * cs[:,i+1] * cs[:,i+2] for i in range(0, n-2, 2) ]
)
)
)
result = swapaxes(dstack((cs_1, cs_2)), 1, 2)
assert result.shape == (N, 2, n)
return result
@staticmethod
def transform_lightweight_secure(cs, k):
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Secure Lightweight Input Transformation only defined for even n. Sorry!'
cs = transpose(
concatenate(
(
[ cs[:,i] * cs[:,i+1] for i in range(0, n, 2) ], # ( x1x2, x3x4, ... xn-1xn )
[ cs[:,0] ], # ( x1 )
[ cs[:,i] * cs[:,i+1] for i in range(1, n-2, 2) ], # ( x2x3, x4x5, ... xn-2xn-1 )
)
)
)
assert cs.shape == (N, n)
return __class__.transform_shift(cs, k)
@staticmethod
def transform_shift_lightweight_secure(cs, k):
"""
Input transform as defined by Majzoobi et al. 2008, but with the shift
operation executed first.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Secure Lightweight Input Transformation only defined for even n. Sorry!'
shifted = __class__.transform_shift(cs, k)
cs = transpose(
concatenate(
(
[ shifted[:,:,i] * shifted[:,:,i+1] for i in range(0, n, 2) ],
[ shifted[:,:,0] ],
[ shifted[:,:,i] * shifted[:,:,i+1] for i in range(1, n-2, 2) ],
)
),
(1, 2, 0)
)
assert cs.shape == (N, k, n)
return cs
@staticmethod
def transform_soelter_lightweight_secure(cs, k):
"""
Input transformation like defined by Majzoobi et al. (cf. transform_lightweight_secure),
but differs in one bit. Introduced by Sölter.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, 'Sölter\'s Secure Lightweight Input Transformation only defined for even n. Sorry!'
n_half = int(n/2)
cs = transpose(
concatenate(
(
[cs[:, i] * cs[:, i + 1] for i in range(0, n, 2)], # ( x1x2, x3x4, ... xn-1xn )
[cs[:, n_half]], # ( x_(n/2+1) )
[cs[:, i] * cs[:, i + 1] for i in range(1, n - 2, 2)], # ( x2x3, x4x5, ... xn-2xn-1 )
)
)
)
assert cs.shape == (N, n)
return __class__.transform_shift(cs, k)
@staticmethod
def transform_shift(cs, k):
N = len(cs)
n = len(cs[0])
result = swapaxes(array([
concatenate((cs[:,l:], cs[:,:l]), axis=1)
for l in range(k)
]), 0, 1)
assert result.shape == (N, k, n)
return result
@staticmethod
def transform_1_n_bent(cs, k):
"""
For one LTF, we compute the input as follows: the i-th input bit will be the result
of the challenge shifted by i bits to the left, then input into inner product mod 2
function.
The other LTF get the original input.
"""
N = len(cs)
n = len(cs[0])
assert n % 2 == 0, '1-n bent transform only defined for even n. Sorry!'
shift_challenges = __class__.transform_shift(cs, n)
assert shift_challenges.shape == (N, n, n)
bent_challenges = transpose(
array(
[
__class__.combiner_ip_mod2(shift_challenges[:,i,:])
for i in range(n)
]
)
)
assert bent_challenges.shape == (N, n)
return array([
concatenate(
(
[bent_challenges[j]], # 'bent' challenge as generated above
tile(cs[j], (k - 1, 1)) # unmodified challenge for k-1 LTFs
),
axis=0
)
for j in range(N)
])
@staticmethod
def transform_1_1_bent(cs, k):
"""
For one LTF, we compute the input as follows: the first input bit will be
the result of IPmod2 of the original challenge, all other input bits will
remain the same.
The other LTF get the original input.
"""
N = len(cs)
n = len(cs[0])
assert k >= 2, '1-n bent transform currently only implemented for k>=2. Sorry!'
assert n % 2 == 0, '1-n bent transform only defined for even n. Sorry!'
bent_challenge_bits = __class__.combiner_ip_mod2(cs)
assert bent_challenge_bits.shape == (N, )
return array([
concatenate(
(
[concatenate(([[bent_challenge_bits[j]], cs[j][1:]]))], # 'bent' challenge bit plus remainder unchanged
tile(cs[j], (k - 1, 1)) # unmodified challenge for k-1 LTFs
),
axis=0
)
for j in range(N)
])
@staticmethod
def transform_polynomial(cs, k):
"""
This input transformation interprets a challenge c as a
polynomial over the finite field GF(2^n)=F2/f*F2, where f is a
irreducible polynomial of degree n.
The irreducible polynomial f is hard coded and
of degree 8, 16, 24, 32, 48, or 64.
Each Arbiter Chain i receives as input the polynomial c^i
as element of GF(2^n).
"""
N = len(cs)
n = len(cs[0])
assert n in [8, 16, 24, 32, 48, 64], 'Polynomial transformation is only implemented for challenges with n in {8, 16, 24, 32, 48, 64}. ' \
'Sorry!'
if n == 64:
f = [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 48:
f = [1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 1]
elif n == 32:
f = [1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 24:
f = [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
elif n == 16:
f = [1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
elif n == 8:
f = [1, 0, 1, 0, 0, 1, 1, 0, 1]
""" Transform challenge to 0,1 array to compute transformation with numpy. """
vtransform_to_01 = vectorize(tools.transform_challenge_11_to_01)
cs_01 = array([vtransform_to_01(c) for c in cs])
""" Compute c^i for each challenge for i from 1 to k. """
cs = concatenate([
[tools.poly_mult_div(c, f, k) for c in cs_01]
])
""" Transform challenges back to -1,1 notation. """
vtransform_to_11 = vectorize(tools.transform_challenge_01_to_11)
result = array([vtransform_to_11(c) for c in cs])
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_permutation_atf(cs, k):
"""
This transformation performs first a pseudorandom permutation of the challenge k times before applying the
ATF transformation to each challenge.
:param cs:
:param k:
:return:
"""
N = len(cs)
n = len(cs[0])
seed = 0x1234
""" Perform random permutations """
cs_permuted = array(
[
[RandomState(seed + i).permutation(c)
for i in range(k)]
for c in cs
]
)
""" Perform atf transform """
result = transpose(
array([
prod(cs_permuted[:, :, i:], 2)
for i in range(n)
]),
(1, 2, 0)
)
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_random(cs, k):
"""
This input transformation chooses for each Arbiter Chain an random challenge based on the initial challenge.
"""
N = len(cs)
n = len(cs[0])
vtransform_to_01 = vectorize(tools.transform_challenge_11_to_01)
cs_01 = array([vtransform_to_01(c) for c in cs])
result = array([RandomState(c).choice((-1, 1), (k, n)) for c in cs_01])
assert result.shape == (N, k, n), 'The resulting challenges have not the desired shape. Sorry!'
return result
@staticmethod
def transform_concat(transform_1, nn, transform_2):
"""
This input transformation will transform the first nn bit of each challenge using transform_1,
the remaining bits using transform_2.
:return: A function that can perform the desired transformation
"""
def transform(cs, k):
(N,n) = cs.shape
cs1 = cs[:,:nn]
cs2 = cs[:,nn:]
transformed_1 = transform_1(cs1, k)
transformed_2 = transform_2(cs2, k)
assert transformed_1.shape == (N, k, nn)
assert transformed_2.shape == (N, k, n - nn)
return concatenate(
(
transformed_1,
transformed_2
),
axis=2
)
transform.__name__ = 'transform_concat_%s_nn%i_%s' % \
(
transform_1.__name__.replace('transform_', ''),
nn,
transform_2.__name__.replace('transform_', '')
)
return transform
@staticmethod
def normal_weights(n, k, mu=0, sigma=1, random_instance=RandomState()):
"""
Returns weights for an array of k LTFs of size n each.
The weights are drawn from a normal distribution with given
mean and std. deviation, if parameters are omitted, the
standard normal distribution is used.
The `normal` method of the optionally provided PRNG instance
is used to obtain the weights. If no PRNG instance provided,
a fresh `numpy.random.RandomState` instance is used.
"""
return random_instance.normal(loc=mu, scale=sigma, size=(k, n))
def __init__(self, weight_array, transform, combiner, bias=False):
"""
Initializes an LTFArray based on given weight_array and
combiner function with appropriate transformation of challenges.
The bias is committed through the (n+1)th value in weight_array.
So the parameter bias only states if the given weight_array has
n+1 values (or not) while the challenges still has length n.
"""
(self.k, self.n) = shape(weight_array)
self.weight_array = weight_array
self.transform = transform
self.combiner = combiner
self.bias = bias
def eval(self, inputs):
"""
evaluates a given list of challenges regarding bias
:param x: list of challenges
:return: list of responses
"""
if self.bias:
inputs = tools.iter_append_last(inputs, 1)
return sign(self.val(inputs))
def val(self, inputs):
return self.combiner(self.ltf_eval(self.transform(inputs, self.k)))
def ltf_eval(self, inputs):
"""
:return: array
"""
return transpose(
array([
dot(
inputs[:,l],
self.weight_array[l]
)
for l in range(self.k)
])
)
class NoisyLTFArray(LTFArray):
"""
Class that simulates k LTFs with n bits and a constant term each
with noise effect and constant bias added.
"""
@staticmethod
def sigma_noise_from_random_weights(n, sigma_weight, noisiness=0.1):
"""
returns sd of noise (sigma_noise) out of n stages with
sd of weight differences (sigma_weight) and noisiness factor
"""
return sqrt(n) * sigma_weight * noisiness
def __init__(self, weight_array, transform, combiner, sigma_noise,
random_instance=RandomState(), bias=False):
"""
Initializes LTF array like in LTFArray and uses the provided
PRNG instance for drawing noise values. If no PRNG provided, a
fresh `numpy.random.RandomState` instance is used.
"""
super().__init__(weight_array, transform, combiner, bias)
self.sigma_noise = sigma_noise
self.random = random_instance
def ltf_eval(self, inputs):
"""
Calculates weight_array with given set of challenges including noise.
The noise effect is a normal distributed random variable with mu=0,
sigma=sigma_noise.
Random numbers are drawn from the PRNG instance generated when
initializing the NoisyLTFArray.
"""
noise = self.random.normal(loc=0, scale=self.sigma_noise, size=(1, self.k))
return super().ltf_eval(inputs) + noise
|
gpl-3.0
| -530,663,471,708,576,300
| 33.633406
| 145
| 0.500626
| false
| 3.812321
| false
| false
| false
|
arowser/wireshark-xcoin
|
tools/wireshark_gen.py
|
1
|
95941
|
# -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton (frank.singleton@ericsson.com)
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at http://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a plugin for the Wireshark IP Protocol Analyser.
#
#
# Strategy (sneaky but ...)
#
# problem: I dont know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
#
"""Wireshark IDL compiler back-end."""
from omniidl import idlast, idltype, idlutil, output
import sys, string
import tempfile
#
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
#
#
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Dont generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
#
#
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
#
#
class wireshark_gen_C:
#
# Turn DEBUG stuff on/off
#
DEBUG = 0
#
# Some string constants for our templates
#
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_"; # loop index
c_i_lim = "guint32 u_octet4_loop_"; # loop limit
c_u_disc = "guint32 disc_u_"; # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_"; # signed int union discriminant variable name (other cases, except Enum)
#
# Constructor
#
def __init__(self, st, protocol_name, dissector_name ,description):
self.st = output.Stream(tempfile.TemporaryFile(),4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
#
# genCode()
#
# Main entry point, controls sequence of
# generated code.
#
#
def genCode(self,oplist, atlist, enlist, stlist, unlist): # operation,attribute,enums,struct and union lists
self.genHelpers(oplist,stlist,unlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that operation later, I have the variables to
# declare already.
self.genExceptionHelpers(oplist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.genAttributeHelpers(atlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genEthCopyright() # Wireshark Copyright comments.
self.genGPL() # GPL license
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist,atlist,enlist,stlist,unlist)
if (len(atlist) > 0):
self.genAtList(atlist) # string constant declares for Attributes
if (len(enlist) > 0):
self.genEnList(enlist) # string constant declares for Enums
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if (len(atlist) > 0):
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist,stlist,unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
#self.dumpvars() # debug
self.genModelines();
#
# genHeader
#
# Generate Standard Wireshark Header Comments
#
#
def genHeader(self):
self.st.out(self.template_Header,dissector_name=self.dissname)
if self.DEBUG:
print "XXX genHeader"
#
# genEthCopyright
#
# Wireshark Copyright Info
#
#
def genEthCopyright(self):
if self.DEBUG:
print "XXX genEthCopyright"
self.st.out(self.template_wireshark_copyright)
#
# genModelines
#
# Modelines info
#
#
def genModelines(self):
if self.DEBUG:
print "XXX genModelines"
self.st.out(self.template_Modelines)
#
# genGPL
#
# GPL license
#
#
def genGPL(self):
if self.DEBUG:
print "XXX genGPL"
self.st.out(self.template_GPL)
#
# genIncludes
#
# GPL license
#
#
def genIncludes(self):
if self.DEBUG:
print "XXX genIncludes"
self.st.out(self.template_Includes)
#
# genOpDeclares()
#
# Generate hf variables for operation filters
#
# in: opnode ( an operation node)
#
def genOpDeclares(self, op):
if self.DEBUG:
print "XXX genOpDeclares"
print "XXX return type = " , op.returnType().kind()
sname = self.namespace(op, "_")
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.get_CDR_alias(rt, rt.name() )
self.st.out(self.template_hf, name=sname + "_return")
else:
self.st.out(self.template_hf, name=sname + "_return")
for p in op.parameters():
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
#
# genAtDeclares()
#
# Generate hf variables for attributes
#
# in: at ( an attribute)
#
def genAtDeclares(self, at):
if self.DEBUG:
print "XXX genAtDeclares"
for decl in at.declarators():
sname = self.namespace(decl, "_")
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
#
# genStDeclares()
#
# Generate hf variables for structs
#
# in: st ( a struct)
#
def genStDeclares(self, st):
if self.DEBUG:
print "XXX genStDeclares"
sname = self.namespace(st, "_")
for m in st.members():
for decl in m.declarators():
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genExDeclares()
#
# Generate hf variables for user exception filters
#
# in: exnode ( an exception node)
#
def genExDeclares(self,ex):
if self.DEBUG:
print "XXX genExDeclares"
sname = self.namespace(ex, "_")
for m in ex.members():
for decl in m.declarators():
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genUnionDeclares()
#
# Generate hf variables for union filters
#
# in: un ( an union)
#
def genUnionDeclares(self,un):
if self.DEBUG:
print "XXX genUnionDeclares"
sname = self.namespace(un, "_")
self.st.out(self.template_hf, name=sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
#
# genExpertInfoDeclares()
#
# Generate ei variables for expert info filters
#
def genExpertInfoDeclares(self):
if self.DEBUG:
print "XXX genExpertInfoDeclares"
self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
#
# genDeclares
#
# generate function prototypes if required
#
# Currently this is used for struct and union helper function declarations.
#
def genDeclares(self,oplist,atlist,enlist,stlist,unlist):
if self.DEBUG:
print "XXX genDeclares"
# prototype for operation filters
self.st.out(self.template_hf_operations)
#operation specific filters
if (len(oplist) > 0):
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOpDeclares(op)
#attribute filters
if (len(atlist) > 0):
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAtDeclares(at)
#struct filters
if (len(stlist) > 0):
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
self.genStDeclares(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if (len(exlist) > 0):
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genExDeclares(ex)
#union filters
if (len(unlist) > 0):
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnionDeclares(un)
#expert info filters
self.genExpertInfoDeclares()
# prototype for start_dissecting()
self.st.out(self.template_prototype_start_dissecting)
# struct prototypes
if len(stlist):
self.st.out(self.template_prototype_struct_start)
for st in stlist:
#print st.repoId()
sname = self.namespace(st, "_")
self.st.out(self.template_prototype_struct_body, stname=st.repoId(),name=sname)
self.st.out(self.template_prototype_struct_end)
# union prototypes
if len(unlist):
self.st.out(self.template_prototype_union_start)
for un in unlist:
sname = self.namespace(un, "_")
self.st.out(self.template_prototype_union_body, unname=un.repoId(),name=sname)
self.st.out(self.template_prototype_union_end)
#
# genPrototype
#
#
def genPrototype(self):
self.st.out(self.template_prototype, dissector_name=self.dissname)
#
# genProtocol
#
#
def genProtocol(self):
self.st.out(self.template_protocol, dissector_name=self.dissname)
self.st.out(self.template_init_boundary)
#
# genMainEntryStart
#
def genMainEntryStart(self,oplist):
self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
self.st.inc_indent()
self.st.out(self.template_main_dissector_switch_msgtype_start)
self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
self.st.inc_indent()
#
# genMainEntryEnd
#
def genMainEntryEnd(self):
self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
self.st.dec_indent()
self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
self.st.dec_indent()
self.st.out(self.template_main_dissector_end)
#
# genAtList
#
# in: atlist
#
# out: C code for IDL attribute decalarations.
#
# NOTE: Mapping of attributes to operation(function) names is tricky.
#
# The actual accessor function names are language-mapping specific. The attribute name
# is subject to OMG IDL's name scoping rules; the accessor function names are
# guaranteed not to collide with any legal operation names specifiable in OMG IDL.
#
# eg:
#
# static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
#
# or:
#
# static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
#
# TODO: Implement some language dependant templates to handle naming conventions
# language <=> attribute. for C, C++. Java etc
#
# OR, just add a runtime GUI option to select language binding for attributes -- FS
#
#
#
# ie: def genAtlist(self,atlist,language)
#
def genAtList(self,atlist):
self.st.out(self.template_comment_attributes_start)
for n in atlist:
for i in n.declarators(): #
sname = self.namespace(i, "_")
atname = i.identifier()
self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
if not n.readonly():
self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
self.st.out(self.template_comment_attributes_end)
#
# genEnList
#
# in: enlist
#
# out: C code for IDL Enum decalarations using "static const value_string" template
#
def genEnList(self,enlist):
self.st.out(self.template_comment_enums_start)
for enum in enlist:
sname = self.namespace(enum, "_")
self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
self.st.out(self.template_value_string_start, valstringname=sname)
for enumerator in enum.enumerators():
self.st.out(self.template_value_string_entry, intval=str(self.valFromEnum(enum,enumerator)), description=enumerator.identifier())
#atname = n.identifier()
self.st.out(self.template_value_string_end, valstringname=sname)
self.st.out(self.template_comment_enums_end)
#
# genExceptionDelegator
#
# in: oplist
#
# out: C code for User exception delegator
#
# eg:
#
#
def genExceptionDelegator(self,oplist):
self.st.out(self.template_main_exception_delegator_start)
self.st.inc_indent()
exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
for ex in exlist:
if self.DEBUG:
print "XXX Exception " , ex.repoId()
print "XXX Exception Identifier" , ex.identifier()
print "XXX Exception Scoped Name" , ex.scopedName()
if (ex.members()): # only if has members
sname = self.namespace(ex, "_")
exname = ex.repoId()
self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
self.st.dec_indent()
self.st.out(self.template_main_exception_delegator_end)
#
# genAttribueHelpers()
#
# Generate private helper functions to decode Attributes.
#
# in: atlist
#
# For readonly attribute - generate get_xxx()
# If NOT readonly attribute - also generate set_xxx()
#
def genAttributeHelpers(self,atlist):
if self.DEBUG:
print "XXX genAttributeHelpers: atlist = ", atlist
self.st.out(self.template_attribute_helpers_start)
for attrib in atlist:
for decl in attrib.declarators():
self.genAtHelper(attrib,decl,"get") # get accessor
if not attrib.readonly():
self.genAtHelper(attrib,decl,"set") # set accessor
self.st.out(self.template_attribute_helpers_end)
#
# genAtHelper()
#
# Generate private helper functions to decode an attribute
#
# in: at - attribute node
# in: decl - declarator belonging to this attribute
# in: order - to generate a "get" or "set" helper
def genAtHelper(self,attrib,decl,order):
if self.DEBUG:
print "XXX genAtHelper"
sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.getCDR(attrib.attrType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_attribute_helper_function_end)
#
# genExceptionHelpers()
#
# Generate private helper functions to decode Exceptions used
# within operations
#
# in: oplist
#
def genExceptionHelpers(self,oplist):
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if self.DEBUG:
print "XXX genExceptionHelpers: exlist = ", exlist
self.st.out(self.template_exception_helpers_start)
for ex in exlist:
if (ex.members()): # only if has members
#print "XXX Exception = " + ex.identifier()
self.genExHelper(ex)
self.st.out(self.template_exception_helpers_end)
#
# genExhelper()
#
# Generate private helper functions to decode User Exceptions
#
# in: exnode ( an exception node)
#
def genExHelper(self,ex):
if self.DEBUG:
print "XXX genExHelper"
sname = self.namespace(ex, "_")
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_exception_helper_function_start, sname=sname, exname=ex.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in ex.members():
if self.DEBUG:
print "XXX genExhelper, member = ", m, "member type = ", m.memberType()
for decl in m.declarators():
if self.DEBUG:
print "XXX genExhelper, d = ", decl
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_exception_helper_function_end)
#
# genHelpers()
#
# Generate private helper functions for each IDL operation.
# Generate private helper functions for each IDL struct.
# Generate private helper functions for each IDL union.
#
#
# in: oplist, stlist, unlist
#
def genHelpers(self,oplist,stlist,unlist):
for op in oplist:
self.genOperation(op)
for st in stlist:
self.genStructHelper(st)
for un in unlist:
self.genUnionHelper(un)
#
# genOperation()
#
# Generate private helper functions for a specificIDL operation.
#
# in: opnode
#
def genOperation(self,opnode):
if self.DEBUG:
print "XXX genOperation called"
sname = self.namespace(opnode, "_")
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.curr_sname = sname # update current opnode's scoped name
opname = opnode.identifier()
self.st.out(self.template_helper_function_comment, repoid=opnode.repoId() )
self.st.out(self.template_helper_function_start, sname=sname)
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.st.out(self.template_helper_switch_msgtype_start)
self.st.out(self.template_helper_switch_msgtype_request_start)
self.st.inc_indent()
self.genOperationRequest(opnode)
self.st.out(self.template_helper_switch_msgtype_request_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_start)
self.st.inc_indent()
self.st.out(self.template_helper_switch_rep_status_start)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
self.st.inc_indent()
self.genOperationReply(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
self.st.inc_indent()
self.genOpExceptions(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_reply_default_end)
self.st.out(self.template_helper_switch_rep_status_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_default_end)
self.st.out(self.template_helper_switch_msgtype_end)
self.st.dec_indent()
self.st.out(self.template_helper_function_end, sname=sname)
#
# Decode function parameters for a GIOP request message
#
#
def genOperationRequest(self,opnode):
for p in opnode.parameters():
if p.is_in():
if self.DEBUG:
print "XXX parameter = " ,p
print "XXX parameter type = " ,p.paramType()
print "XXX parameter type kind = " ,p.paramType().kind()
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#
# Decode function parameters for a GIOP reply message
#
def genOperationReply(self,opnode):
rt = opnode.returnType() # get return type
if self.DEBUG:
print "XXX genOperationReply"
print "XXX opnode = " , opnode
print "XXX return type = " , rt
print "XXX return type.unalias = " , rt.unalias()
print "XXX return type.kind() = " , rt.kind();
sname = self.namespace(opnode, "_")
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
self.get_CDR_alias(rt, sname + "_return" )
#self.get_CDR_alias(rt, rt.name() )
else:
self.getCDR(rt, sname + "_return") # return value is NOT an alias
for p in opnode.parameters():
if p.is_out(): # out or inout
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#self.st.dec_indent()
def genOpExceptions(self,opnode):
for ex in opnode.raises():
if ex.members():
#print ex.members()
for m in ex.members():
t=0
#print m.memberType(), m.memberType().kind()
#
# Delegator for Operations
#
def genOpDelegator(self,oplist):
for op in oplist:
iname = "/".join(op.scopedName()[:-1])
opname = op.identifier()
sname = self.namespace(op, "_")
self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
#
# Delegator for Attributes
#
def genAtDelegator(self,atlist):
for a in atlist:
for i in a.declarators():
atname = i.identifier()
sname = self.namespace(i, "_")
self.st.out(self.template_at_delegate_code_get, sname=sname)
if not a.readonly():
self.st.out(self.template_at_delegate_code_set, sname=sname)
#
# Add a variable declaration to the hash of list
#
def addvar(self, var):
if not ( var in self.fn_hash[self.curr_sname] ):
self.fn_hash[self.curr_sname].append(var)
#
# Print the variable declaration from the hash of list
#
def dumpvars(self):
for fn in self.fn_hash.keys():
print "FN = " + fn
for v in self.fn_hash[fn]:
print "-> " + v
#
# Print the "C" variable declaration from the hash of list
# for a given scoped operation name (eg: tux_penguin_eat)
#
def dumpCvars(self, sname):
for v in self.fn_hash[sname]:
self.st.out(v)
#
# Given an enum node, and a enumerator node, return
# the enumerator's numerical value.
#
# eg: enum Color {red,green,blue} should return
# val = 1 for green
#
def valFromEnum(self,enumNode, enumeratorNode):
if self.DEBUG:
print "XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId()
print "XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId()
if isinstance(enumeratorNode,idlast.Enumerator):
value = enumNode.enumerators().index(enumeratorNode)
return value
## tk_null = 0
## tk_void = 1
## tk_short = 2
## tk_long = 3
## tk_ushort = 4
## tk_ulong = 5
## tk_float = 6
## tk_double = 7
## tk_boolean = 8
## tk_char = 9
## tk_octet = 10
## tk_any = 11
## tk_TypeCode = 12
## tk_Principal = 13
## tk_objref = 14
## tk_struct = 15
## tk_union = 16
## tk_enum = 17
## tk_string = 18
## tk_sequence = 19
## tk_array = 20
## tk_alias = 21
## tk_except = 22
## tk_longlong = 23
## tk_ulonglong = 24
## tk_longdouble = 25
## tk_wchar = 26
## tk_wstring = 27
## tk_fixed = 28
## tk_value = 29
## tk_value_box = 30
## tk_native = 31
## tk_abstract_interface = 32
#
# getCDR()
#
# This is the main "iterator" function. It takes a node, and tries to output
# a get_CDR_XXX accessor method(s). It can call itself multiple times
# if I find nested structures etc.
#
def getCDR(self,type,name="fred"):
pt = type.unalias().kind() # param CDR type
pn = name # param name
if self.DEBUG:
print "XXX getCDR: kind = " , pt
print "XXX getCDR: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong(pn)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong(pn)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong(pn)
elif pt == idltype.tk_void:
self.get_CDR_void(pn)
elif pt == idltype.tk_short:
self.get_CDR_short(pn)
elif pt == idltype.tk_long:
self.get_CDR_long(pn)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort(pn)
elif pt == idltype.tk_float:
self.get_CDR_float(pn)
elif pt == idltype.tk_double:
self.get_CDR_double(pn)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed(type.unalias(),pn)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean(pn)
elif pt == idltype.tk_char:
self.get_CDR_char(pn)
elif pt == idltype.tk_octet:
self.get_CDR_octet(pn)
elif pt == idltype.tk_any:
self.get_CDR_any(pn)
elif pt == idltype.tk_string:
self.get_CDR_string(pn)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring(pn)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar(pn)
elif pt == idltype.tk_enum:
#print type.decl()
self.get_CDR_enum(pn,type)
#self.get_CDR_enum(pn)
elif pt == idltype.tk_struct:
self.get_CDR_struct(type,pn)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode(pn)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet(type,pn)
else:
self.get_CDR_sequence(type,pn)
elif pt == idltype.tk_objref:
self.get_CDR_objref(type,pn)
elif pt == idltype.tk_array:
pn = pn # Supported elsewhere
elif pt == idltype.tk_union:
self.get_CDR_union(type,pn)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type XXXXX " , type
self.get_CDR_alias(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX methods are here ..
#
#
def get_CDR_ulong(self,pn):
self.st.out(self.template_get_CDR_ulong, hfname=pn)
def get_CDR_short(self,pn):
self.st.out(self.template_get_CDR_short, hfname=pn)
def get_CDR_void(self,pn):
self.st.out(self.template_get_CDR_void, hfname=pn)
def get_CDR_long(self,pn):
self.st.out(self.template_get_CDR_long, hfname=pn)
def get_CDR_ushort(self,pn):
self.st.out(self.template_get_CDR_ushort, hfname=pn)
def get_CDR_float(self,pn):
self.st.out(self.template_get_CDR_float, hfname=pn)
def get_CDR_double(self,pn):
self.st.out(self.template_get_CDR_double, hfname=pn)
def get_CDR_longlong(self,pn):
self.st.out(self.template_get_CDR_longlong, hfname=pn)
def get_CDR_ulonglong(self,pn):
self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
def get_CDR_boolean(self,pn):
self.st.out(self.template_get_CDR_boolean, hfname=pn)
def get_CDR_fixed(self,type,pn):
if self.DEBUG:
print "XXXX calling get_CDR_fixed, type = ", type
print "XXXX calling get_CDR_fixed, type.digits() = ", type.digits()
print "XXXX calling get_CDR_fixed, type.scale() = ", type.scale()
string_digits = '%i ' % type.digits() # convert int to string
string_scale = '%i ' % type.scale() # convert int to string
string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
self.st.out(self.template_get_CDR_fixed, varname=pn, digits=string_digits, scale=string_scale, length=string_length )
self.addvar(self.c_seq)
def get_CDR_char(self,pn):
self.st.out(self.template_get_CDR_char, hfname=pn)
def get_CDR_octet(self,pn):
self.st.out(self.template_get_CDR_octet, hfname=pn)
def get_CDR_any(self,pn):
self.st.out(self.template_get_CDR_any, varname=pn)
def get_CDR_enum(self,pn,type):
#self.st.out(self.template_get_CDR_enum, hfname=pn)
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname,hfname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_string(self,pn):
self.st.out(self.template_get_CDR_string, hfname=pn)
def get_CDR_wstring(self,pn):
self.st.out(self.template_get_CDR_wstring, varname=pn)
self.addvar(self.c_u_octet4)
self.addvar(self.c_seq)
def get_CDR_wchar(self,pn):
self.st.out(self.template_get_CDR_wchar, varname=pn)
self.addvar(self.c_s_octet1)
self.addvar(self.c_seq)
def get_CDR_TypeCode(self,pn):
self.st.out(self.template_get_CDR_TypeCode, varname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_objref(self,type,pn):
self.st.out(self.template_get_CDR_object)
def get_CDR_sequence_len(self,pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
def get_CDR_union(self,type,pn):
if self.DEBUG:
print "XXX Union type =" , type, " pn = ",pn
print "XXX Union type.decl()" , type.decl()
print "XXX Union Scoped Name" , type.scopedName()
# If I am a typedef union {..}; node then find the union node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a union node
if self.DEBUG:
print "XXX Union ntype =" , ntype
sname = self.namespace(ntype, "_")
self.st.out(self.template_union_start, name=sname )
# Output a call to the union helper function so I can handle recursive union also.
self.st.out(self.template_decode_union,name=sname)
self.st.out(self.template_union_end, name=sname )
#
# getCDR_hf()
#
# This takes a node, and tries to output the appropriate item for the
# hf array.
#
def getCDR_hf(self,type,desc,filter,hf_name="fred"):
pt = type.unalias().kind() # param CDR type
pn = hf_name # param name
if self.DEBUG:
print "XXX getCDR_hf: kind = " , pt
print "XXX getCDR_hf: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_void:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_short:
self.get_CDR_short_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_long:
self.get_CDR_long_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_float:
self.get_CDR_float_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_double:
self.get_CDR_double_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_fixed:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_boolean:
self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_char:
self.get_CDR_char_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_octet:
self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_any:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_string:
self.get_CDR_string_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_enum:
self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
elif pt == idltype.tk_struct:
pt = pt # no hf_ variables needed (should be already contained in struct members)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
else:
self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
elif pt == idltype.tk_objref:
pt = pt # no object specific hf_ variables used, use generic ones from giop dissector
elif pt == idltype.tk_array:
pt = pt # Supported elsewhere
elif pt == idltype.tk_union:
pt = pt # no hf_ variables needed (should be already contained in union members)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type hf XXXXX " , type
self.get_CDR_alias_hf(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX_hf methods are here ..
#
#
def get_CDR_ulong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_short_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_long_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ushort_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_float_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_double_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_longlong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ulonglong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_boolean_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_char_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_octet_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_enum_hf(self,pn,type,desc,filter,diss):
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname,hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_string_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_wstring_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_u_octet4)
# self.addvar(self.c_seq)
def get_CDR_wchar_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_s_octet1)
# self.addvar(self.c_seq)
def get_CDR_TypeCode_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_octet_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_alias_hf(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
print "XXX get_CDR_alias_hf, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
#indices = self.get_indices_from_sizes(decl.sizes())
#string_indices = '%i ' % indices # convert int to string
#self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
#self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
#self.addvar(self.c_i + pn + ";")
#self.st.inc_indent()
self.getCDR_hf(type.decl().alias().aliasType(), pn )
#self.st.dec_indent()
#self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
self.getCDR_hf(type, decl.identifier() )
#
# Code to generate Union Helper functions
#
# in: un - a union node
#
#
def genUnionHelper(self,un):
if self.DEBUG:
print "XXX genUnionHelper called"
print "XXX Union type =" , un
print "XXX Union type.switchType()" , un.switchType()
print "XXX Union Scoped Name" , un.scopedName()
sname = self.namespace(un, "_")
self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
st = un.switchType().unalias() # may be typedef switch type, so find real type
self.st.out(self.template_comment_union_code_start, uname=un.repoId() )
self.getCDR(st, sname + "_" + un.identifier());
# Depending on what kind of discriminant I come accross (enum,integer,char,
# short, boolean), make sure I cast the return value of the get_XXX accessor
# to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
# return an integer, or an Enumerator object that is then converted to its
# integer equivalent.
#
#
# NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
#
if (st.kind() == idltype.tk_enum):
std = st.decl()
self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId() )
self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_long):
self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ulong):
self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_short):
self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ushort):
self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_boolean):
self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_char):
self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
else:
print "XXX Unknown st.kind() = ", st.kind()
#
# Loop over all cases in this union
#
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
# get integer value, even if discriminant is
# an Enumerator node
if isinstance(cl.value(),idlast.Enumerator):
if self.DEBUG:
print "XXX clv.identifier()", cl.value().identifier()
print "XXX clv.repoId()", cl.value().repoId()
print "XXX clv.scopedName()", cl.value().scopedName()
# find index of enumerator in enum declaration
# eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
clv = self.valFromEnum(std,cl.value())
else:
clv = cl.value()
#print "XXX clv = ",clv
#
# if char, dont convert to int, but put inside single quotes so that it is understood by C.
# eg: if (disc == 'b')..
#
# TODO : handle \xxx chars generically from a function or table lookup rather than
# a whole bunch of "if" statements. -- FS
if (st.kind() == idltype.tk_char):
if (clv == '\n'): # newline
string_clv = "'\\n'"
elif (clv == '\t'): # tab
string_clv = "'\\t'"
else:
string_clv = "'" + clv + "'"
else:
string_clv = '%i ' % clv
#
# If default case, then skp comparison with discriminator
#
if not cl.default():
self.st.out(self.template_comment_union_code_label_compare_start, discname=un.identifier(),labelval=string_clv )
self.st.inc_indent()
else:
self.st.out(self.template_comment_union_code_label_default_start )
self.getCDR(uc.caseType(),sname + "_" + uc.declarator().identifier())
if not cl.default():
self.st.dec_indent()
self.st.out(self.template_comment_union_code_label_compare_end )
else:
self.st.out(self.template_comment_union_code_label_default_end )
self.st.dec_indent()
self.st.out(self.template_union_helper_function_end)
#
# Currently, get_CDR_alias is geared to finding typdef
#
def get_CDR_alias(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias, type.decl() = " ,type.decl()
print "XXX get_CDR_alias, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.decl().alias().aliasType(), pn )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print "XXX get_CDR_alias, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias, type.decl() = " ,type.decl()
self.getCDR(type, pn )
#
# Handle structs, including recursive
#
def get_CDR_struct(self,type,pn):
# If I am a typedef struct {..}; node then find the struct node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a struct node
sname = self.namespace(ntype, "_")
self.st.out(self.template_structure_start, name=sname )
# Output a call to the struct helper function so I can handle recursive structs also.
self.st.out(self.template_decode_struct,name=sname)
self.st.out(self.template_structure_end, name=sname )
#
# genStructhelper()
#
# Generate private helper functions to decode a struct
#
# in: stnode ( a struct node)
#
def genStructHelper(self,st):
if self.DEBUG:
print "XXX genStructHelper"
sname = self.namespace(st, "_")
self.curr_sname = sname # update current opnode/exnode/stnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in st.members():
for decl in m.declarators():
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_struct_helper_function_end)
#
# Generate code to access a sequence of a type
#
def get_CDR_sequence(self,type, pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn )
self.st.out(self.template_get_CDR_sequence_loop_start, seqname=pn )
self.addvar(self.c_i_lim + pn + ";" )
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.unalias().seqType(), pn ) # and start all over with the type
self.st.dec_indent()
self.st.out(self.template_get_CDR_sequence_loop_end)
#
# Generate code to access a sequence of octet
#
def get_CDR_sequence_octet(self,type, pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar("gchar * binary_seq_" + pn + ";")
self.addvar("gchar * text_seq_" + pn + ";")
#
# namespace()
#
# in - op node
#
# out - scoped operation name, using sep character instead of "::"
#
# eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
#
#
def namespace(self,node,sep):
sname = string.replace(idlutil.ccolonName(node.scopedName()), '::', sep)
#print "XXX namespace: sname = " + sname
return sname
#
# generate code for plugin initialisation
#
def gen_plugin_register(self):
self.st.out(self.template_plugin_register, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# generate register_giop_user_module code, and register only
# unique interfaces that contain operations. Also output
# a heuristic register in case we want to use that.
#
# TODO - make this a command line option
#
# -e explicit
# -h heuristic
#
def gen_proto_reg_handoff(self, oplist):
self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
self.st.inc_indent()
for iname in self.get_intlist(oplist):
self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname, protocol_name=self.protoname, interface=iname )
self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname, protocol_name=self.protoname)
self.st.dec_indent()
self.st.out(self.template_proto_reg_handoff_end)
#
# generate hf_ array element for operation, attribute, enums, struct and union lists
#
def genOp_hf(self,op):
sname = self.namespace(op, "_")
opname = sname[string.find(sname, "_")+1:]
opname = opname[:string.find(opname, "_")]
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
self.getCDR_hf(rt, rt.name(),\
opname + "." + op.identifier() + ".return", sname + "_return")
else:
self.getCDR_hf(rt, "Return value",\
opname + "." + op.identifier() + ".return", sname + "_return")
for p in op.parameters():
self.getCDR_hf(p.paramType(), p.identifier(),\
opname + "." + op.identifier() + "." + p.identifier(), sname + "_" + p.identifier())
def genAt_hf(self,at):
for decl in at.declarators():
sname = self.namespace(decl, "_")
atname = sname[string.find(sname, "_")+1:]
atname = atname[:string.find(atname, "_")]
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
def genSt_hf(self,st):
sname = self.namespace(st, "_")
stname = sname[string.find(sname, "_")+1:]
stname = stname[:string.find(stname, "_")]
for m in st.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),\
st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
def genEx_hf(self,ex):
sname = self.namespace(ex, "_")
exname = sname[string.find(sname, "_")+1:]
exname = exname[:string.find(exname, "_")]
for m in ex.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),\
exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
def genUnion_hf(self,un):
sname = self.namespace(un, "_")
unname = sname[:string.rfind(sname, "_")]
unname = string.replace(unname, "_", ".")
self.getCDR_hf(un.switchType().unalias(), un.identifier(),\
unname + "." + un.identifier(), sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),\
unname + "." + un.identifier() + "." + uc.declarator().identifier(),\
sname + "_" + uc.declarator().identifier())
#
# generate proto_register_<protoname> code,
#
# in - oplist[], atlist[], stline[], unlist[]
#
def gen_proto_register(self, oplist, atlist, stlist, unlist):
self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
#operation specific filters
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOp_hf(op)
#attribute filters
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAt_hf(at)
#struct filters
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
if (st.members()): # only if has members
self.genSt_hf(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genEx_hf(ex)
# Union filters
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnion_hf(un)
self.st.out(self.template_proto_register_end, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# in - oplist[]
#
# out - a list of unique interface names. This will be used in
# register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
# name must be removed from the scope. And we also only want unique interfaces.
#
def get_intlist(self,oplist):
int_hash = {} # holds a hash of unique interfaces
for op in oplist:
sc = op.scopedName() # eg: penguin,tux,bite
sc1 = sc[:-1] # drop last entry
sn = idlutil.slashName(sc1) # penguin/tux
if not int_hash.has_key(sn):
int_hash[sn] = 0; # dummy val, but at least key is unique
ret = int_hash.keys()
ret.sort()
return ret
#
# in - oplist[]
#
# out - a list of exception nodes (unique). This will be used in
# to generate dissect_exception_XXX functions.
#
def get_exceptionList(self,oplist):
ex_hash = {} # holds a hash of unique exceptions.
for op in oplist:
for ex in op.raises():
if not ex_hash.has_key(ex):
ex_hash[ex] = 0; # dummy val, but at least key is unique
if self.DEBUG:
print "XXX Exception = " + ex.identifier()
ret = ex_hash.keys()
ret.sort()
return ret
#
# Simple function to take a list of array sizes and find the
# total number of elements
#
#
# eg: temp[4][3] = 12 elements
#
def get_indices_from_sizes(self,sizelist):
val = 1;
for i in sizelist:
val = val * i
return val
#
# Determine how many octets contain requested number
# of digits for an "fixed" IDL type "on the wire"
#
def dig_to_len(self,dignum):
return (dignum/2) + 1
#
# Output some TODO comment
#
def genTODO(self,message):
self.st.out(self.template_debug_TODO, message=message)
#
# Output some WARNING comment
#
def genWARNING(self,message):
self.st.out(self.template_debug_WARNING, message=message)
#
# Templates for C code
#
template_helper_function_comment = """\
/*
* @repoid@
*/"""
template_helper_function_vars_start = """\
/* Operation specific Variable declarations Begin */"""
template_helper_function_vars_end = """\
/* Operation specific Variable declarations End */
(void)item; /* Avoid coverity param_set_but_unused parse warning */
"""
template_helper_function_start = """\
static void
decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{"""
template_helper_function_end = """\
}
"""
#
# proto_reg_handoff() templates
#
template_proto_reg_handoff_start = """\
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_@dissector_name@(void)
{"""
template_proto_reg_handoff_body = """\
/* Register for Explicit Dissection */
register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
"""
template_proto_reg_handoff_heuristic = """\
/* Register for Heuristic Dissection */
register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
"""
template_proto_reg_handoff_end = """\
}
"""
#
# Prototype
#
template_prototype = """
void proto_register_giop_@dissector_name@(void);
void proto_reg_handoff_giop_@dissector_name@(void);"""
#
# Initialize the protocol
#
template_protocol = """
/* Initialise the protocol and subtree pointers */
static int proto_@dissector_name@ = -1;
static gint ett_@dissector_name@ = -1;
"""
#
# Initialize the boundary Alignment
#
template_init_boundary = """
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
#
# plugin_register and plugin_reg_handoff templates
#
template_plugin_register = """
#if 0
WS_DLL_PUBLIC_DEF void
plugin_register(void)
{
if (proto_@dissector_name@ == -1) {
proto_register_giop_@dissector_name@();
}
}
WS_DLL_PUBLIC_DEF void
plugin_reg_handoff(void){
proto_register_handoff_giop_@dissector_name@();
}
#endif
"""
#
# proto_register_<dissector name>(void) templates
#
template_proto_register_start = """
/* Register the protocol with Wireshark */
void proto_register_giop_@dissector_name@(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-@dissector_name@.Request_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_proto_register_end = """
};
static ei_register_info ei[] = {
{ &ei_@dissector_name@_unknown_giop_msg, { "giop-@dissector_name@.unknown_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_@dissector_name@_unknown_exception, { "giop-@dissector_name@.unknown_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_@dissector_name@_unknown_reply_status, { "giop-@dissector_name@.unknown_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_@dissector_name@,
};
expert_module_t* expert_@dissector_name@;
/* Register the protocol name and description */
proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"@protocol_name@\", \"giop-@dissector_name@\" );
proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
}
"""
template_proto_register_op_filter_comment = """\
/* Operation filters */"""
template_proto_register_at_filter_comment = """\
/* Attribute filters */"""
template_proto_register_st_filter_comment = """\
/* Struct filters */"""
template_proto_register_ex_filter_comment = """\
/* User exception filters */"""
template_proto_register_un_filter_comment = """\
/* Union filters */"""
template_proto_register_ei_filters = """\
/* Expert info filters */
static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
"""
#
# template for delegation code
#
template_op_delegate_code = """\
if (strcmp(operation, "@opname@") == 0
&& (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
#
# Templates for the helper functions
#
#
#
template_helper_switch_msgtype_start = """\
switch(header->message_type) {"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_msgtype_end = """\
} /* switch(header->message_type) */"""
template_helper_switch_msgtype_request_start = """\
case Request:"""
template_helper_switch_msgtype_request_end = """\
break;"""
template_helper_switch_msgtype_reply_start = """\
case Reply:"""
template_helper_switch_msgtype_reply_no_exception_start = """\
case NO_EXCEPTION:"""
template_helper_switch_msgtype_reply_no_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_user_exception_start = """\
case USER_EXCEPTION:"""
template_helper_switch_msgtype_reply_user_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_default_start = """\
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
template_helper_switch_msgtype_reply_default_end = """\
break;"""
template_helper_switch_msgtype_reply_end = """\
break;"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_rep_status_start = """\
switch(header->rep_status) {"""
template_helper_switch_rep_status_default_start = """\
default:
/* Unknown Reply Status */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
template_helper_switch_rep_status_default_end = """\
break;"""
template_helper_switch_rep_status_end = """\
} /* switch(header->rep_status) */
break;"""
#
# Templates for get_CDR_xxx accessors
#
template_get_CDR_ulong = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_short = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_void = """\
/* Function returns void */
"""
template_get_CDR_long = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ushort = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_float = """\
proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_double = """\
proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_longlong = """\
proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ulonglong = """\
proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_boolean = """\
proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
"""
template_get_CDR_char = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
"""
template_get_CDR_octet = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
"""
template_get_CDR_any = """\
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_fixed = """\
get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
proto_tree_add_text(tree,tvb,*offset-@length@, @length@, "@varname@ < @digits@, @scale@> = %s",seq);
"""
template_get_CDR_enum_symbolic = """\
u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
"""
template_get_CDR_string = """\
giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
"""
template_get_CDR_wstring = """\
u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
proto_tree_add_text(tree,tvb,*offset-u_octet4,u_octet4,"@varname@ (%u) = %s",
u_octet4, (u_octet4 > 0) ? seq : \"\");
"""
template_get_CDR_wchar = """\
s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
if (tree) {
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-1-s_octet1,1,"length = %u",s_octet1);
if (s_octet1 < 0)
s_octet1 = -s_octet1;
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-s_octet1,s_octet1,"@varname@ = %s",seq);
}
"""
template_get_CDR_TypeCode = """\
u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_object = """\
get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
"""
template_get_CDR_sequence_length = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@seqname@, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_loop_start = """\
for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
"""
template_get_CDR_sequence_loop_end = """\
}
"""
template_get_CDR_sequence_octet = """\
if (u_octet4_loop_@seqname@ > 0 && tree) {
get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
u_octet4_loop_@seqname@);
text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
u_octet4_loop_@seqname@);
proto_tree_add_text(tree, tvb, *offset - u_octet4_loop_@seqname@,
u_octet4_loop_@seqname@, \"@seqname@: %s\", text_seq_@seqname@);
}
"""
template_get_CDR_array_start = """\
for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
"""
template_get_CDR_array_end = """\
}
"""
template_get_CDR_array_comment = """\
/* Array: @aname@[ @asize@] */
"""
template_structure_start = """\
/* Begin struct \"@name@\" */"""
template_structure_end = """\
/* End struct \"@name@\" */"""
template_union_start = """\
/* Begin union \"@name@\" */"""
template_union_end = """\
/* End union \"@name@\" */"""
#
# Templates for get_CDR_xxx_hf accessors
#
template_get_CDR_ulong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_short_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_long_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ushort_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_float_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_double_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_boolean_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
template_get_CDR_char_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_enum_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_string_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wstring_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wchar_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_TypeCode_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_hf = """\
{&hf_@hfname@, {"Seq length of @descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
#
# Program Header Template
#
template_Header = """\
/* packet-@dissector_name@.c
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
*/
"""
template_wireshark_copyright = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs
* Copyright 1999 - 2012 Gerald Combs
*/
"""
#
# GPL Template
#
template_GPL = """\
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
"""
#
# Modelines Template
#
template_Modelines = """\
/*
* Editor modelines
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/"""
#
# Includes template
#
template_Includes = """\
#include "config.h"
#include <gmodule.h>
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif"""
#
# Main dissector entry templates
#
template_main_dissector_start = """\
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, -1, ENC_NA);
tree = proto_item_add_subtree(ti, ett_@dissname@);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
PROTO_ITEM_SET_GENERATED(pi);
return pi;
}
static gboolean
dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
"""
template_main_dissector_switch_msgtype_start = """\
switch(header->message_type) {
"""
template_main_dissector_switch_msgtype_start_request_reply = """\
case Request:
case Reply:
"""
template_main_dissector_switch_msgtype_end_request_reply = """\
break;
"""
template_main_dissector_switch_msgtype_all_other_msgtype = """\
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
"""
template_main_dissector_end = """\
return FALSE;
} /* End of main dissector */
"""
#-------------------------------------------------------------#
# Exception handling templates #
#-------------------------------------------------------------#
template_exception_helpers_start = """\
/* Begin Exception Helper Functions */
"""
template_exception_helpers_end = """\
/* End Exception Helper Functions */
"""
#
# template for Main delegator for exception handling
#
template_main_exception_delegator_start = """\
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
"""
#
# template for exception delegation code body
#
template_ex_delegate_code = """\
if (strcmp(header->exception_id, "@exname@") == 0) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
return TRUE;
}
"""
#
# End of Main delegator for exception handling
#
template_main_exception_delegator_end = """
return FALSE; /* user exception not found */
}
"""
#
# template for exception helper code
#
template_exception_helper_function_start = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item *item _U_;
"""
template_exception_helper_function_end = """\
}
"""
#
# template for struct helper code
#
template_struct_helper_function_start = """\
/* Struct = @stname@ */
static void
decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_struct_helper_function_end = """\
}
"""
#
# template for union helper code
#
template_union_helper_function_start = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_union_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Value string templates #
#-------------------------------------------------------------#
template_value_string_start = """\
static const value_string @valstringname@[] = {
"""
template_value_string_entry = """\
{ @intval@, \"@description@\" },"""
template_value_string_end = """\
{ 0, NULL },
};
"""
#-------------------------------------------------------------#
# Enum handling templates #
#-------------------------------------------------------------#
template_comment_enums_start = """\
/*
* IDL Enums Start
*/
"""
template_comment_enums_end = """\
/*
* IDL Enums End
*/
"""
template_comment_enum_comment = """\
/*
* Enum = @ename@
*/"""
#-------------------------------------------------------------#
# Attribute handling templates #
#-------------------------------------------------------------#
template_comment_attributes_start = """\
/*
* IDL Attributes Start
*/
"""
#
# get/set accessor method names are language mapping dependant.
#
template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
template_comment_attributes_end = """
/*
* IDL Attributes End
*/
"""
#
# template for Attribute delegation code
#
# Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
# Note: _set_xxx() should only be called for Request
#
#
template_at_delegate_code_get = """\
if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_at_delegate_code_set = """\
if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_attribute_helpers_start = """\
/* Begin Attribute Helper Functions */
"""
template_attribute_helpers_end = """\
/* End Attribute Helper Functions */
"""
#
# template for attribute helper code
#
template_attribute_helper_function_start = """\
/* Attribute = @atname@ */
static void
decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_attribute_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Debugging templates #
#-------------------------------------------------------------#
#
# Template for outputting TODO "C" comments
# so user know I need ti improve something.
#
template_debug_TODO = """\
/* TODO - @message@ */
"""
#
# Template for outputting WARNING "C" comments
# so user know if I have found a problem.
#
template_debug_WARNING = """\
/* WARNING - @message@ */
"""
#-------------------------------------------------------------#
# IDL Union templates #
#-------------------------------------------------------------#
template_comment_union_code_start = """\
/*
* IDL Union Start - @uname@
*/
"""
template_comment_union_code_end = """
/*
* IDL union End - @uname@
*/
"""
template_comment_union_code_discriminant = """\
/*
* IDL Union - Discriminant - @uname@
*/
"""
#
# Cast Unions types to something appropriate
# Enum value cast to guint32, all others cast to gint32
# as omniidl accessor returns integer or Enum.
#
template_union_code_save_discriminant_enum = """\
disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_long = """\
disc_s_@discname@ = (gint32) s_octet4; /* save gint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ulong = """\
disc_s_@discname@ = (gint32) u_octet4; /* save guint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_short = """\
disc_s_@discname@ = (gint32) s_octet2; /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ushort = """\
disc_s_@discname@ = (gint32) u_octet2; /* save guint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_char = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_boolean = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_comment_union_code_label_compare_start = """\
if (disc_s_@discname@ == @labelval@) {
"""
template_comment_union_code_label_compare_end = """\
return; /* End Compare for this discriminant type */
}
"""
template_comment_union_code_label_default_start = """
/* Default Union Case Start */
"""
template_comment_union_code_label_default_end = """\
/* Default Union Case End */
"""
#
# Templates for function prototypes.
# This is used in genDeclares() for declaring function prototypes
# for structs and union helper functions.
#
template_hf_operations = """
static int hf_operationrequest = -1;/* Request_Operation field */
"""
template_hf = """\
static int hf_@name@ = -1;"""
template_prototype_start_dissecting = """
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
"""
template_prototype_struct_start = """\
/* Struct prototype declaration Start */
"""
template_prototype_struct_end = """\
/* Struct prototype declaration End */
"""
template_prototype_struct_body = """\
/* Struct = @stname@ */
static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_struct = """\
decode_@name@_st(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);"""
template_prototype_union_start = """\
/* Union prototype declaration Start */"""
template_prototype_union_end = """\
/* Union prototype declaration End */"""
template_prototype_union_body = """
/* Union = @unname@ */
static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_union = """
decode_@name@_un(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
"""
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
|
gpl-2.0
| 8,077,415,331,128,886,000
| 31.867763
| 223
| 0.592677
| false
| 3.365524
| false
| false
| false
|
ngageoint/scale
|
scale/storage/migrations/0008_auto_20170609_1443.py
|
1
|
1859
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 14:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('batch', '0002_auto_20170412_1225'),
('recipe', '0018_recipefile_recipe_input'),
('storage', '0007_auto_20170412_1225'),
]
operations = [
migrations.AddField(
model_name='scalefile',
name='batch',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='batch.Batch'),
),
migrations.AddField(
model_name='scalefile',
name='job_output',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.Recipe'),
),
migrations.AddField(
model_name='scalefile',
name='recipe_job',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='scalefile',
name='recipe_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='recipe.RecipeType'),
),
migrations.AddField(
model_name='scalefile',
name='source_ended',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
migrations.AddField(
model_name='scalefile',
name='source_started',
field=models.DateTimeField(blank=True, db_index=True, null=True),
),
]
|
apache-2.0
| -3,557,771,446,890,161,700
| 34.075472
| 128
| 0.586337
| false
| 4.032538
| false
| false
| false
|
dganbold/reinforcement_learning
|
NeuralQLearning/cartpole_learn.py
|
1
|
3105
|
#
#
import gym
from NeuralQLearner import *
#
#
if __name__ == "__main__":
# ----------------------------------------
# Define parameters for greedy policy
epsilon = 0.5 # exploration
epsilon_floor = 0.1
exploration_decay = 0.998
# Define parameters for Q-learning
alpha = 0.2
gamma = 0.98
epoch = 1000
max_steps = 200
max_memory = max_steps*10
batch_size = int(32)
# ----------------------------------------
# Actions
# Type: Discrete(2)
# Num | Observation
# 0 | Push cart to the left
# 1 | Push cart to the right
n_action = 2
actions = np.array([0, 1])
# ----------------------------------------
# Observation
# Type: Box(4)
# Num | Observation | Min | Max
# 0 | Cart Position | -2.4 | 2.4
# 1 | Cart Velocity | -Inf | Inf
# 2 | Pole Angle | -41.8 | 41.8
# 3 | Pole Velocity | -Inf | Inf
n_input = 4
observation = []
# ----------------------------------------
# Define environment/game
env_name = 'CartPole-v0'
env = gym.make(env_name)
# ----------------------------------------
# Initialize Neural Q-Learn object
AI = NeuralQLearner(n_input, actions, batch_size, epsilon, alpha, gamma)
#AI.plotQ()
# Initialize experience replay object
exp = Experience(max_memory)
# ----------------------------------------
# Train
for e in range(epoch):
# Get initial input
observation = env.reset()
observation_init = observation
# Training for single episode
step = 0
total_reward = 0
game_over = False
while (not game_over):
observation_capture = observation
#env.render()
# Epsilon-Greedy policy
action = AI.eGreedy(observation)
# Apply action, get rewards and new state
observation, reward, game_over, info = env.step(action)
# Store experience
# input[i] = [[state_t, action_t, reward_t, state_t+1], game_over?]
exp.memorize([observation_capture, action, reward, observation], game_over)
# Recall and replay experience
miniBatch = exp.recall(batch_size)
# Refinement of model
if len(miniBatch) == batch_size:
AI.train_Q_network(miniBatch)
#
step += 1
total_reward += reward
# End of the single episode training
print('#TRAIN Episode:%3i, Reward:%7.3f, Steps:%3i, Exploration:%1.4f'%(e, total_reward, step, AI.epsilon))
# Update exploration
AI.epsilon *= exploration_decay
AI.epsilon = max(epsilon_floor, AI.epsilon)
# Plot
#AI.plotQupdate()
#
# ----------------------------------------
# Export trained Neural-Net
AI.exportNetwork('models/%s_Q_network_epoch_%d' % (env_name, epoch))
# ----------------------------------------
print("Done!.")
# Some delay
raw_input('Press enter to terminate:')
# Close environment
env.close()
# EOF
|
mit
| -7,300,266,675,279,254,000
| 30.683673
| 115
| 0.498229
| false
| 3.900754
| false
| false
| false
|
ondrejch/FSM
|
scripts/mk1/writecore.py
|
1
|
1343
|
#!/usr/bin/env python3
#
# Write the FastDrum Serpent deck
# Ondrej Chvala, ochvala@utk.edu
# 2016-08-02
import drumdeck
import os
import argparse
# Serpent deck file name
filename = "ffrr.inp"
dirname = "./"
# Command line argument
parser = argparse.ArgumentParser(description='Writes Serpent2 input deck of the Fast Cube Reactor.')
parser.add_argument('--latsize', metavar='N', type=int, nargs='?', default=27,
help='lattice size, default = 27') #, required=False)
parser.add_argument('--fuelradius', metavar='r', type=float, nargs='?', default=1.17,
help='fuel rod radius [cm], default = 1.17 cm')
parser.add_argument('--reflector', metavar='refl', type=float, nargs='?', default=50,
help='fuel rod radius [cm], default = 50 cm')
# Parse command line arguments
args = vars(parser.parse_args())
N = args['latsize']
r_fuel = args['fuelradius']
d_refl = args['reflector']
# Make the deck
s2_deck = drumdeck.write_deck(N, r_fuel, d_refl)
fname = dirname + filename
print("Writing deck for lattice size ",N,", fuel radius ",r_fuel," cm, reflector thickness ",d_refl, " cm.")
# Write the deck
try:
f = open(fname, 'w')
f.write(s2_deck)
f.close()
print("Deck written,")
except IOError as e:
print("Unable to write to file", fname)
print(e)
|
gpl-3.0
| -5,168,715,324,540,304,000
| 27.574468
| 108
| 0.647803
| false
| 3.045351
| false
| false
| false
|
pitunti/alfaPitunti
|
mediaserver/platformcode/controllers/html.py
|
1
|
33340
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Controlador para HTML
# ------------------------------------------------------------
import json
import os
import re
import threading
import time
import channelselector
from controller import Controller
from controller import Platformtools
from platformcode import config
from core.item import Item
from core.tmdb import Tmdb
from platformcode import launcher, logger
from core import filetools
# <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
data = filetools.read(filetools.join(config.get_runtime_path(), "addon.xml"))
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
class html(Controller):
pattern = re.compile("##")
name = "HTML"
def __init__(self, handler=None, ID=None):
super(html, self).__init__(handler, ID)
self.platformtools = platform(self)
self.data = {}
if self.handler:
self.client_ip = handler.client.getpeername()[0]
self.send_message({"action": "connect",
"data": {"version": "Alfa %s" % version,
"date": "--/--/----"}})
t = threading.Thread(target=launcher.start, name=ID)
t.setDaemon(True)
t.start()
def run(self, path):
if path:
item = Item().fromurl(path)
else:
item = Item(channel="channelselector", action="mainlist", viewmode="banner")
launcher.run(item)
def get_data(self, id):
while not "id" in self.data or not self.data["id"] == id:
time.sleep(0.1)
data = self.data["result"]
self.data = {}
return data
def send_message(self, data):
import random
ID = "%032x" % (random.getrandbits(128))
data["id"] = ID
self.handler.sendMessage(json.dumps(data))
return ID
class platform(Platformtools):
def __init__(self, controller):
self.controller = controller
self.handler = controller.handler
self.get_data = controller.get_data
self.send_message = controller.send_message
def render_items(self, itemlist, parent_item):
"""
Función encargada de mostrar el itemlist, se pasa como parametros el itemlist y el item del que procede
@type itemlist: list
@param itemlist: lista de elementos a mostrar
@type parent_item: item
@param parent_item: elemento padre
"""
# Si el itemlist no es un list salimos
if not type(itemlist) == list:
JsonData = {}
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
return
# Si no hay ningun item, mostramos un aviso
if not len(itemlist):
itemlist.append(Item(title="No hay elementos que mostrar"))
if parent_item.channel == "channelselector" and not parent_item.action == "filterchannels":
parent_item.viewmode = "banner"
elif parent_item.channel == "channelselector" and parent_item.action == "filterchannels":
parent_item.viewmode = "channel"
if not parent_item.viewmode:
parent_item.viewmode = "list"
# Item Atrás
if not (parent_item.channel == "channelselector" and parent_item.action == "mainlist") and not \
itemlist[0].action == "go_back":
if parent_item.viewmode in ["banner", "channel"]:
itemlist.insert(0, Item(title="Atrás", action="go_back",
thumbnail=channelselector.get_thumb("back.png", "banner_")))
else:
itemlist.insert(0, Item(title="Atrás", action="go_back",
thumbnail=channelselector.get_thumb("back.png")))
JsonData = {}
JsonData["action"] = "EndItems"
JsonData["data"] = {}
JsonData["data"]["itemlist"] = []
JsonData["data"]["viewmode"] = parent_item.viewmode
JsonData["data"]["category"] = parent_item.category.capitalize()
JsonData["data"]["host"] = self.controller.host
if parent_item.url: JsonData["data"]["url"] = parent_item.url
# Recorremos el itemlist
for item in itemlist:
if not item.thumbnail and item.action == "search": item.thumbnail = channelselector.get_thumb("search.png")
if not item.thumbnail and item.folder == True: item.thumbnail = channelselector.get_thumb("folder.png", "banner")
if not item.thumbnail and item.folder == False: item.thumbnail = channelselector.get_thumb("nofolder.png")
if "http://media.xxxxx/" in item.thumbnail and not item.thumbnail.startswith(
"http://media.xxxxxxxx/thumb_"):
if parent_item.viewmode in ["banner", "channel"]:
item.thumbnail = channelselector.get_thumbnail_path("banner") + os.path.basename(item.thumbnail)
else:
item.thumbnail = channelselector.get_thumbnail_path() + os.path.basename(item.thumbnail)
# Estas imagenes no estan en banner, asi que si queremos banner, para que no se vean mal las quitamos
elif parent_item.viewmode in ["banner", "channel"] and item.thumbnail.startswith(
"http://media.xxxxx/thumb_"):
item.thumbnail = ""
# Si el item no contiene categoria,le ponemos la del item padre
if item.category == "":
item.category = parent_item.category
# Si el item no contiene fanart,le ponemos la del item padre
if item.fanart == "":
item.fanart = parent_item.fanart
title = item.title.replace(item.title.lstrip(), "").replace(" ", " ") + item.title.lstrip()
# Formatear titulo
if item.text_color:
title = '[COLOR %s]%s[/COLOR]' % (item.text_color, title)
if item.text_bold:
title = '[B]%s[/B]' % title
if item.text_italic:
title = '[I]%s[/I]' % title
title = self.kodi_labels_to_html(title)
# Añade headers a las imagenes si estan en un servidor con cloudflare
from core import httptools
item.thumbnail = httptools.get_url_headers(item.thumbnail)
item.fanart = httptools.get_url_headers(item.fanart)
JsonItem = {}
JsonItem["title"] = title
JsonItem["thumbnail"] = item.thumbnail
JsonItem["fanart"] = item.fanart
JsonItem["plot"] = item.plot
JsonItem["action"] = item.action
JsonItem["url"] = item.tourl()
JsonItem["context"] = []
if not item.action == "go_back":
for Comando in self.set_context_commands(item, parent_item):
JsonItem["context"].append({"title": Comando[0], "url": Comando[1]})
JsonData["data"]["itemlist"].append(JsonItem)
ID = self.send_message(JsonData)
self.get_data(ID)
def set_context_commands(self, item, parent_item):
"""
Función para generar los menus contextuales.
1. Partiendo de los datos de item.context
a. Metodo antiguo item.context tipo str separando las opciones por "|" (ejemplo: item.context = "1|2|3")
(solo predefinidos)
b. Metodo list: item.context es un list con las diferentes opciones del menu:
- Predefinidos: Se cargara una opcion predefinida con un nombre.
item.context = ["1","2","3"]
- dict(): Se cargara el item actual modificando los campos que se incluyan en el dict() en caso de
modificar los campos channel y action estos serán guardados en from_channel y from_action.
item.context = [{"title":"Nombre del menu", "action": "action del menu", "channel",
"channel del menu"}, {...}]
2. Añadiendo opciones segun criterios
Se pueden añadir opciones al menu contextual a items que cumplan ciertas condiciones
3. Añadiendo opciones a todos los items
Se pueden añadir opciones al menu contextual para todos los items
@param item: elemento que contiene los menu contextuales
@type item: item
@param parent_item:
@type parent_item: item
"""
context_commands = []
# Creamos un list con las diferentes opciones incluidas en item.context
if type(item.context) == str:
context = item.context.split("|")
elif type(item.context) == list:
context = item.context
else:
context = []
# Opciones segun item.context
for command in context:
# Predefinidos
if type(command) == str:
if command == "buscar_trailer":
context_commands.append(("Buscar Trailer",
item.clone(channel="trailertools", action="buscartrailer",
contextual=True).tourl()))
# Formato dict
if type(command) == dict:
# Los parametros del dict, se sobreescriben al nuevo context_item en caso de sobreescribir "action" y
# "channel", los datos originales se guardan en "from_action" y "from_channel"
if "action" in command:
command["from_action"] = item.action
if "channel" in command:
command["from_channel"] = item.channel
context_commands.append(
(command["title"], item.clone(**command).tourl()))
# Opciones segun criterios
# Ir al Menu Principal (channel.mainlist)
if parent_item.channel not in ["news",
"channelselector"] and item.action != "mainlist" and parent_item.action != "mainlist":
context_commands.append(("Ir al Menu Principal", Item(channel=item.channel, action="mainlist").tourl()))
# Añadir a Favoritos
if item.channel not in ["favorites", "videolibrary", "help", "setting",
""] and not parent_item.channel == "favorites":
context_commands.append((config.get_localized_string(30155),
item.clone(channel="favorites", action="addFavourite", from_channel=item.channel,
from_action=item.action).tourl()))
# Añadimos opción contextual para Añadir la serie completa a la videoteca
if item.channel != "videolibrary" and item.action in ["episodios", "get_episodios"] \
and (item.contentSerieName or item.show):
context_commands.append(("Añadir Serie a Videoteca",
item.clone(action="add_serie_to_library", from_action=item.action).tourl()))
# Añadir Pelicula a videoteca
if item.channel != "videolibrary" and item.action in ["detail", "findvideos"] \
and item.contentType == 'movie':
context_commands.append(("Añadir Pelicula a Videoteca",
item.clone(action="add_pelicula_to_library", from_action=item.action).tourl()))
# Descargar pelicula
if item.contentType == "movie" and not item.channel == "downloads":
context_commands.append(("Descargar Pelicula",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar serie
if item.contentType == "tvshow" and not item.channel == "downloads":
context_commands.append(("Descargar Serie",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar episodio
if item.contentType == "episode" and not item.channel == "downloads":
context_commands.append(("Descargar Episodio",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Descargar temporada
if item.contentType == "season" and not item.channel == "downloads":
context_commands.append(("Descargar Temporada",
item.clone(channel="downloads", action="save_download", from_channel=item.channel,
from_action=item.action).tourl()))
# Abrir configuración
if parent_item.channel not in ["setting", "news", "search"]:
context_commands.append(("Abrir Configuración", Item(channel="setting", action="mainlist").tourl()))
return sorted(context_commands, key=lambda comand: comand[0])
def dialog_ok(self, heading, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "Alert"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = unicode(text, "utf8", "ignore").encode("utf8")
ID = self.send_message(JsonData)
self.get_data(ID)
def dialog_notification(self, heading, message, icon=0, time=5000, sound=True):
JsonData = {}
JsonData["action"] = "notification"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(heading)
JsonData["data"]["text"] = self.kodi_labels_to_html(message)
JsonData["data"]["icon"] = icon
JsonData["data"]["sound"] = sound
JsonData["data"]["time"] = time
self.send_message(JsonData)
return
def dialog_yesno(self, heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.kodi_labels_to_html(text)
heading = self.kodi_labels_to_html(heading)
JsonData = {}
JsonData["action"] = "AlertYesNo"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = text
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_select(self, heading, list):
JsonData = {}
heading = self.kodi_labels_to_html(heading)
JsonData["action"] = "List"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["list"] = []
for Elemento in list:
JsonData["data"]["list"].append(self.kodi_labels_to_html(Elemento))
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_progress(self, heading, line1, line2="", line3=""):
class Dialog(object):
def __init__(self, heading, line1, line2, line3, platformtools):
self.platformtools = platformtools
self.closed = False
self.heading = self.platformtools.kodi_labels_to_html(heading)
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.platformtools.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "Progress"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = text
JsonData["data"]["percent"] = 0
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
def iscanceled(self):
JsonData = {}
JsonData["action"] = "ProgressIsCanceled"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
response = self.platformtools.get_data(ID)
return response
def update(self, percent, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
text = self.platformtools.kodi_labels_to_html(text)
JsonData = {}
JsonData["action"] = "ProgressUpdate"
JsonData["data"] = {}
JsonData["data"]["title"] = self.heading
JsonData["data"]["text"] = text
JsonData["data"]["percent"] = percent
self.platformtools.send_message(JsonData)
def close(self):
JsonData = {}
JsonData["action"] = "ProgressClose"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
self.closed = True
return Dialog(heading, line1, line2, line3, self)
def dialog_progress_bg(self, heading, message=""):
class Dialog(object):
def __init__(self, heading, message, platformtools):
self.platformtools = platformtools
self.closed = False
self.heading = self.platformtools.kodi_labels_to_html(heading)
message = self.platformtools.kodi_labels_to_html(message)
JsonData = {}
JsonData["action"] = "ProgressBG"
JsonData["data"] = {}
JsonData["data"]["title"] = heading
JsonData["data"]["text"] = message
JsonData["data"]["percent"] = 0
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
def isFinished(self):
return not self.closed
def update(self, percent=0, heading="", message=""):
JsonData = {}
JsonData["action"] = "ProgressBGUpdate"
JsonData["data"] = {}
JsonData["data"]["title"] = self.platformtools.kodi_labels_to_html(heading)
JsonData["data"]["text"] = self.platformtools.kodi_labels_to_html(message)
JsonData["data"]["percent"] = percent
self.platformtools.send_message(JsonData)
def close(self):
JsonData = {}
JsonData["action"] = "ProgressBGClose"
JsonData["data"] = {}
ID = self.platformtools.send_message(JsonData)
self.platformtools.get_data(ID)
self.closed = True
return Dialog(heading, message, self)
def dialog_input(self, default="", heading="", hidden=False):
JsonData = {}
JsonData["action"] = "Keyboard"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(heading)
JsonData["data"]["text"] = default
JsonData["data"]["password"] = hidden
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def dialog_numeric(self, type, heading, default=""):
return self.dialog_input("", heading, False)
def itemlist_refresh(self):
JsonData = {}
JsonData["action"] = "Refresh"
JsonData["data"] = {}
ID = self.send_message(JsonData)
self.get_data(ID)
def itemlist_update(self, item):
JsonData = {}
JsonData["action"] = "Update"
JsonData["data"] = {}
JsonData["data"]["url"] = item.tourl()
ID = self.send_message(JsonData)
self.get_data(ID)
def is_playing(self):
JsonData = {}
JsonData["action"] = "isPlaying"
JsonData["data"] = {}
ID = self.send_message(JsonData)
response = self.get_data(ID)
return response
def play_video(self, item):
if item.contentTitle:
title = item.contentTitle
elif item.fulltitle:
title = item.fulltitle
else:
title = item.title
if item.contentPlot:
plot = item.contentPlot
else:
plot = item.plot
if item.server == "torrent":
self.play_torrent(item)
else:
JsonData = {}
JsonData["action"] = "Play"
JsonData["data"] = {}
JsonData["data"]["title"] = title
JsonData["data"]["plot"] = plot
JsonData["data"]["video_url"] = item.video_url
JsonData["data"]["url"] = item.url
JsonData["data"]["host"] = self.controller.host
ID = self.send_message(JsonData)
self.get_data(ID)
def play_torrent(self, item):
import time
import os
played = False
# Importamos el cliente
from btserver import Client
# Iniciamos el cliente:
c = Client(url=item.url, is_playing_fnc=self.is_playing, wait_time=None, timeout=5,
temp_path=os.path.join(config.get_data_path(), "torrent"))
# Mostramos el progreso
progreso = self.dialog_progress("Alfa - Torrent", "Iniciando...")
# Mientras el progreso no sea cancelado ni el cliente cerrado
while not progreso.iscanceled() and not c.closed:
try:
# Obtenemos el estado del torrent
s = c.status
# Montamos las tres lineas con la info del torrent
txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \
(s.progress_file, s.file_size, s.str_state, s._download_rate)
txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \
(
s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes,
s.trackers)
txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \
(s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers)
progreso.update(s.buffer, txt, txt2, txt3)
time.sleep(1)
# Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia
if s.buffer == 100 and not played:
# Cerramos el progreso
progreso.close()
# Obtenemos el playlist del torrent
item.video_url = c.get_play_list()
item.server = "directo"
self.play_video(item)
# Marcamos como reproducido para que no se vuelva a iniciar
played = True
# Y esperamos a que el reproductor se cierre
while self.is_playing():
time.sleep(1)
# Cuando este cerrado, Volvemos a mostrar el dialogo
progreso = self.dialog_progress("Alfa - Torrent", "Iniciando...")
except:
import traceback
logger.info(traceback.format_exc())
break
progreso.update(100, "Terminando y eliminando datos", " ", " ")
# Detenemos el cliente
if not c.closed:
c.stop()
# Y cerramos el progreso
progreso.close()
return
def open_settings(self, items):
from platformcode import config
JsonData = {}
JsonData["action"] = "OpenConfig"
JsonData["data"] = {}
JsonData["data"]["title"] = "Opciones"
JsonData["data"]["items"] = []
for item in items:
if item.get('option') == 'hidden':
item['hidden'] = True
for key in item:
if key in ["lvalues", "label", "category"]:
try:
ops = item[key].split("|")
for x, op in enumerate(ops):
ops[x] = config.get_localized_string(int(ops[x]))
item[key] = "|".join(ops)
except:
pass
JsonData["data"]["items"].append(item)
ID = self.send_message(JsonData)
response = self.get_data(ID)
if response:
from platformcode import config
config.set_settings(response)
JsonData = {}
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
def show_channel_settings(self, list_controls=None, dict_values=None, caption="", callback=None, item=None,
custom_button=None, channelpath=None):
from platformcode import config
from core import channeltools
from core import servertools
import inspect
if not os.path.isdir(os.path.join(config.get_data_path(), "settings_channels")):
os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))
title = caption
if type(custom_button) == dict:
custom_button = {"label": custom_button.get("label", ""),
"function": custom_button.get("function", ""),
"visible": bool(custom_button.get("visible", True)),
"close": bool(custom_button.get("close", False))}
else:
custom_button = None
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
ch_type = os.path.basename(os.path.dirname(channelpath))
# Si no tenemos list_controls, hay que sacarlos del json del canal
if not list_controls:
# Si la ruta del canal esta en la carpeta "channels", obtenemos los controles y valores mediante chaneltools
if os.path.join(config.get_runtime_path(), "channels") in channelpath:
# La llamada se hace desde un canal
list_controls, default_values = channeltools.get_channel_controls_settings(channelname)
kwargs = {"channel": channelname}
# Si la ruta del canal esta en la carpeta "servers", obtenemos los controles y valores mediante servertools
elif os.path.join(config.get_runtime_path(), "servers") in channelpath:
# La llamada se hace desde un server
list_controls, default_values = servertools.get_server_controls_settings(channelname)
kwargs = {"server": channelname}
# En caso contrario salimos
else:
return None
# Si no se pasan dict_values, creamos un dict en blanco
if dict_values == None:
dict_values = {}
# Ponemos el titulo
if caption == "":
caption = str(config.get_localized_string(30100)) + " -- " + channelname.capitalize()
elif caption.startswith('@') and unicode(caption[1:]).isnumeric():
caption = config.get_localized_string(int(caption[1:]))
JsonData = {}
JsonData["action"] = "OpenConfig"
JsonData["data"] = {}
JsonData["data"]["title"] = self.kodi_labels_to_html(caption)
JsonData["data"]["custom_button"] = custom_button
JsonData["data"]["items"] = []
# Añadir controles
for c in list_controls:
if not "default" in c: c["default"] = ""
if not "color" in c: c["color"] = "auto"
if not "label" in c: continue
# Obtenemos el valor
if "id" in c:
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = dict_values[c["id"]]
# Translation
if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric():
c['label'] = str(config.get_localized_string(c['label'][1:]))
if c["label"].endswith(":"): c["label"] = c["label"][:-1]
if c['type'] == 'list':
lvalues = []
for li in c['lvalues']:
if li.startswith('@') and unicode(li[1:]).isnumeric():
lvalues.append(str(config.get_localized_string(li[1:])))
else:
lvalues.append(li)
c['lvalues'] = lvalues
c["label"] = self.kodi_labels_to_html(c["label"])
JsonData["data"]["items"].append(c)
ID = self.send_message(JsonData)
close = False
while True:
data = self.get_data(ID)
if type(data) == dict:
JsonData["action"] = "HideLoading"
JsonData["data"] = {}
self.send_message(JsonData)
for v in data:
if data[v] == "true": data[v] = True
if data[v] == "false": data[v] = False
if unicode(data[v]).isnumeric(): data[v] = int(data[v])
if callback and '.' in callback:
package, callback = callback.rsplit('.', 1)
else:
package = '%s.%s' % (ch_type, channelname)
cb_channel = None
try:
cb_channel = __import__(package, None, None, [package])
except ImportError:
logger.error('Imposible importar %s' % package)
if callback:
# Si existe una funcion callback la invocamos ...
return getattr(cb_channel, callback)(item, data)
else:
# si no, probamos si en el canal existe una funcion 'cb_validate_config' ...
try:
return getattr(cb_channel, 'cb_validate_config')(item, data)
except AttributeError:
# ... si tampoco existe 'cb_validate_config'...
for v in data:
config.set_setting(v, data[v], **kwargs)
elif data == "custom_button":
if '.' in callback:
package, callback = callback.rsplit('.', 1)
else:
package = '%s.%s' % (ch_type, channelname)
try:
cb_channel = __import__(package, None, None, [package])
except ImportError:
logger.error('Imposible importar %s' % package)
else:
return_value = getattr(cb_channel, custom_button['function'])(item, dict_values)
if custom_button["close"] == True:
return return_value
else:
JsonData["action"] = "custom_button"
JsonData["data"] = {}
JsonData["data"]["values"] = dict_values
JsonData["data"]["return_value"] = return_value
ID = self.send_message(JsonData)
elif data == False:
return None
def show_video_info(self, data, caption="", item=None, scraper=Tmdb):
from platformcode import html_info_window
return html_info_window.InfoWindow().start(self, data, caption, item, scraper)
def show_recaptcha(self, key, url):
from platformcode import html_recaptcha
return html_recaptcha.recaptcha().start(self, key, url)
def kodi_labels_to_html(self, text):
text = re.sub(r"(?:\[I\])(.*?)(?:\[/I\])", r"<i>\1</i>", text)
text = re.sub(r"(?:\[B\])(.*?)(?:\[/B\])", r"<b>\1</b>", text)
text = re.sub(r"(?:\[COLOR (?:0x)?([0-f]{2})([0-f]{2})([0-f]{2})([0-f]{2})\])(.*?)(?:\[/COLOR\])",
lambda m: "<span style='color: rgba(%s,%s,%s,%s)'>%s</span>" % (
int(m.group(2), 16), int(m.group(3), 16), int(m.group(4), 16), int(m.group(1), 16) / 255.0,
m.group(5)), text)
text = re.sub(r"(?:\[COLOR (?:0x)?([0-f]{2})([0-f]{2})([0-f]{2})\])(.*?)(?:\[/COLOR\])",
r"<span style='color: #\1\2\3'>\4</span>", text)
text = re.sub(r"(?:\[COLOR (?:0x)?([a-z|A-Z]+)\])(.*?)(?:\[/COLOR\])", r"<span style='color: \1'>\2</span>",
text)
return text
|
gpl-3.0
| -5,294,933,708,679,458,000
| 40.85804
| 125
| 0.526156
| false
| 4.03622
| true
| false
| false
|
canihavesomecoffee/sample-platform
|
tests/test_regression/TestControllers.py
|
1
|
26178
|
from unittest import mock
from flask import g
from sqlalchemy import and_
from werkzeug.exceptions import NotFound
from mod_auth.models import Role
from mod_customized.models import CustomizedTest
from mod_regression.models import (Category, InputType, OutputType,
RegressionTest, RegressionTestOutput,
RegressionTestOutputFiles)
from mod_sample.models import Sample
from mod_test.models import Test, TestResultFile
from tests.base import BaseTestCase
class TestControllers(BaseTestCase):
def test_root(self):
response = self.app.test_client().get('/regression/')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/index.html')
def test_specific_regression_test_loads(self):
response = self.app.test_client().get('/regression/test/1/view')
self.assertEqual(response.status_code, 200)
self.assert_template_used('regression/test_view.html')
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
self.assertIn(regression_test.command, str(response.data))
def test_regression_test_status_toggle(self):
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first()
response = c.get('/regression/test/1/toggle')
self.assertEqual(response.status_code, 200)
self.assertEqual('success', response.json['status'])
if regression_test.active == 1:
self.assertEqual('False', response.json['active'])
else:
self.assertEqual('True', response.json['active'])
@mock.patch('mod_regression.controllers.RegressionTestOutput')
def test_download_result_file_not_found(self, mock_regression_output):
"""
Test that non-existent result file gives 404.
"""
from mod_regression.controllers import test_result_file
mock_regression_output.query.filter.return_value.first.return_value = None
with self.assertRaises(NotFound):
test_result_file(1)
mock_regression_output.query.filter.assert_called_once_with(mock_regression_output.id == 1)
@mock.patch('mod_regression.controllers.RegressionTestOutputFiles')
def test_download_result_file_not_found_variant(self, mock_regression_output_file):
"""
Test that non-existent result file gives 404.
"""
from mod_regression.controllers import multiple_test_result_file
mock_regression_output_file.query.filter.return_value.first.return_value = None
with self.assertRaises(NotFound):
multiple_test_result_file(1)
mock_regression_output_file.query.filter.assert_called_once_with(mock_regression_output_file.id == 1)
@mock.patch('mod_regression.controllers.serve_file_download')
@mock.patch('mod_regression.controllers.RegressionTestOutput')
def test_download_result_file(self, mock_regression_output, mock_serve):
"""
Test that correct result file triggers serve download.
"""
from mod_regression.controllers import test_result_file
response = test_result_file(1)
mock_regression_output.query.filter.assert_called_once_with(mock_regression_output.id == 1)
mock_serve.assert_called_once()
@mock.patch('mod_regression.controllers.serve_file_download')
@mock.patch('mod_regression.controllers.RegressionTestOutputFiles')
def test_download_result_file_variant(self, mock_regression_output_file, mock_serve):
"""
Test that correct result file triggers serve download for variants.
"""
from mod_regression.controllers import multiple_test_result_file
response = multiple_test_result_file(1)
mock_regression_output_file.query.filter.assert_called_once_with(mock_regression_output_file.id == 1)
mock_serve.assert_called_once()
def test_regression_test_deletion_Without_login(self):
response = self.app.test_client().get('/regression/test/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.test_delete', response.data)
def test_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/test/9432/delete')
self.assertEqual(response.status_code, 404)
def test_delete(self):
"""
Check it will delete RegressionTest as well as the Customized test
linked with it
"""
customized_test = CustomizedTest(test_id=1, regression_id=1)
g.db.add(customized_test)
g.db.commit()
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/test/1/delete')
self.assertEqual(response.status_code, 200)
response = c.post(
'/regression/test/1/delete', data=dict(
hidden='yes',
submit=True
)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(RegressionTest.query.filter(RegressionTest.id == 1).first(), None)
self.assertEqual(CustomizedTest.query.filter(CustomizedTest.regression_id == 1).first(), None)
def test_add_category(self):
"""
Check it will add a category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/category_add',
data=dict(category_name="Lost", category_description="And found", submit=True))
self.assertNotEqual(Category.query.filter(Category.name == "Lost").first(), None)
def test_add_category_empty(self):
"""
Check it won't add a category with an empty name
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/category_add', data=dict(category_name="", category_description="And Lost", submit=True))
self.assertEqual(Category.query.filter(Category.name == "").first(), None)
self.assertEqual(Category.query.filter(Category.description == "And Lost").first(), None)
def test_edit_category(self):
"""
Check it will edit a category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
c.post('/regression/category/1/edit',
data=dict(category_name="Sheldon", category_description="That's my spot", submit=True))
self.assertNotEqual(Category.query.filter(Category.name == "Sheldon").first(), None)
def test_edit_category_empty(self):
"""
Check it won't edit a category with an empty name
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
c.post('/regression/category/1/edit', data=dict(category_name="", category_description="GG", submit=True))
self.assertEqual(Category.query.filter(Category.name == "").first(), None)
self.assertEqual(Category.query.filter(Category.description == "GG").first(), None)
self.assertNotEqual(Category.query.filter(Category.name == "C-137").first(), None)
def test_edit_wrong_category(self):
"""
Check it will throw 404 if trying to edit a category which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
new_category = Category(name="C-137", description="Wubba lubba dub dub")
g.db.add(new_category)
g.db.commit()
response = c.post(
'regression/category/1729/edit',
data=dict(category_name="Sheldon", category_description="That's my spot", submit=True)
)
self.assertEqual(response.status_code, 404)
def test_add_test(self):
"""
Check it will add a regression test
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/new', data=dict(
sample_id=1,
command="-autoprogram -out=ttxt -latin1 -2",
input_type="file",
output_type="file",
category_id=1,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.id == 3).first(), None)
def test_add_test_empty_erc(self):
"""
Check it will not add a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/new', data=dict(
sample_id=1,
command="-autoprogram -out=ttxt -latin1 -2",
input_type=InputType.file,
output_type=OutputType.file,
category_id=1,
submit=True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.id == 3).first(), None)
def test_category_deletion_without_login(self):
response = self.app.test_client().get('/regression/category/9432/delete')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.category_delete', response.data)
def test_category_delete_if_will_throw_404(self):
"""
Check if it will throw an error 404
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.get('/regression/category/9432/delete')
self.assertEqual(response_regression.status_code, 404)
def test_category_delete(self):
"""
Check it will delete the Category
:return:
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/regression/category/1/delete')
self.assertEqual(response.status_code, 200)
response = c.post('/regression/category/1/delete', data=dict(
hidden='yes',
submit=True
))
self.assertEqual(response.status_code, 302)
def test_edit_test(self):
"""
Check it will edit a regression test
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=2,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
self.assertNotEqual(i.id, 2)
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.fail("No tests in category")
def test_edit_test_empty_erc(self):
"""
Check it will not edit a regression test with empty Expected Runtime Code
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/1/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=2,
submit=True,
))
self.assertEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 1:
break
else:
self.fail("No tests in category")
category = Category.query.filter(Category.id == 2).first()
for i in category.regression_tests:
self.assertNotEqual(i.id, 1)
def test_edit_wrong_test(self):
"""
Check it will throw 404 if trying to edit a regression test which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response_regression = c.post('/regression/test/42/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
expected_rc=25,
category_id=2,
submit=True,
))
self.assertEqual(response_regression.status_code, 404)
def test_edit_test_same_category(self):
"""
Check it won't create problems edit a regression test and not changing its category
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/edit', data=dict(
sample_id=1,
command="-demogorgans",
input_type="file",
output_type="file",
category_id=1,
expected_rc=25,
submit=True,
))
self.assertNotEqual(RegressionTest.query.filter(RegressionTest.command == "-demogorgans").first(), None)
category = Category.query.filter(Category.id == 1).first()
for i in category.regression_tests:
if i.id == 2:
break
else:
self.fail("No tests in category")
def test_if_test_regression_view_throws_a_not_found_error(self):
"""
Check if the test doesn't exist and will throw an error 404
"""
response = self.app.test_client().get('regression/test/1337/view')
self.assertEqual(response.status_code, 404)
def test_if_test_toggle_view_throws_a_not_found_error(self):
"""
Check if the test toggle doesn't exist and will throw an error 404
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('regression/test/1337/toggle')
self.assertEqual(response.status_code, 404)
def test_sample_view(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/1')
sample = Sample.query.filter(Sample.id == 1).first()
self.assertEqual(response.status_code, 200)
self.assert_context('sample', sample)
def test_sample_view_nonexistent(self):
"""
Test if it'll return a valid sample
"""
response = self.app.test_client().get('/regression/sample/13423423')
self.assertEqual(response.status_code, 404)
def test_add_output(self):
"""
Check if, it will add an output
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=2, test_id="Test id 2 with output out2", submit=True))
self.assertNotEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 2,
RegressionTestOutputFiles.file_hashes == "out2"
)
).first(),
None
)
def test_add_output_wrong_regression_test(self):
"""
Check it will throw 404 for a regression_test which does't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/69420/output/new',
data=dict(output_file=2, test_id="Test id 2 with output out2", submit=True)
)
self.assertEqual(response.status_code, 404)
def test_add_output_without_login(self):
response = self.app.test_client().get('/regression/test/69420/output/new')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.output_add', response.data)
def test_remove_output(self):
"""
Check if, it will remove an output
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
rtof = RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 2,
RegressionTestOutputFiles.file_hashes == "bluedabadee"
)
).first()
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/2/output/remove',
data=dict(output_file=rtof.id, submit=True)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.id == rtof.id
)
).first(),
None
)
def test_remove_output_wrong_regression_test(self):
"""
Check it will throw 404 for a regression_test which doesn't exist
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/regression/test/69420/output/remove',
data=dict(output_file=2, submit=True)
)
self.assertEqual(response.status_code, 404)
def test_remove_output_without_login(self):
"""
Check it removes output without login
"""
response = self.app.test_client().get('/regression/test/69420/output/remove')
self.assertEqual(response.status_code, 302)
self.assertIn(b'/account/login?next=regression.output_remove', response.data)
def test_add_output_empty_got(self):
"""
Check if, it will add an output with empty got
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=1, submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 1,
)
).count(),
0
)
def test_add_output_empty_output_file(self):
"""
Check if, it will add an output with empty rto
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(test_id="Test id 2 with output demogorgans", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.file_hashes == "demogorgans",
)
).count(),
0
)
def test_add_output_wrong_rto_id(self):
"""
Check if, it will add an output with wrong regression_test_output_id
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/2/output/new',
data=dict(output_file=69420, test_id="Test id 2 with output out2", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
and_(
RegressionTestOutputFiles.regression_test_output_id == 69420,
RegressionTestOutputFiles.file_hashes == "out2"
)
).first(),
None
)
def test_add_test_output_and_check_double_hashes(self):
"""
Check if the add output method checks for double hashes
"""
self.create_user_with_role(self.user.name, self.user.email, self.user.password, Role.admin)
add_rt_rto_trf = [
RegressionTest(1, "-autoprogram -out=ttxt -latin1 -2", InputType.file, OutputType.file, 3, 0),
RegressionTestOutput(3, "sample_out3", ".srt", ""),
RegressionTestOutput(3, "sample_out4", ".srt", ""),
TestResultFile(2, 3, 3, "sample_out3", "out3"),
TestResultFile(2, 3, 4, "sample_out4", "out3")
]
g.db.add_all(add_rt_rto_trf)
g.db.commit()
self.assertEqual(
TestResultFile.query.filter(
TestResultFile.got == "out3"
).count(),
2
)
with self.app.test_client() as c:
c.post('/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
c.post('/regression/test/3/output/new',
data=dict(output_file=3, test_id="Test id 2 with output out3", submit=True))
self.assertEqual(
RegressionTestOutputFiles.query.filter(
RegressionTestOutputFiles.file_hashes == "out3"
).count(),
1
)
|
isc
| 886,495,612,370,523,300
| 42.557404
| 118
| 0.596226
| false
| 3.937133
| true
| false
| false
|
bmars/sisko
|
sisko/utils.py
|
1
|
2204
|
# Copyright (C) 2014 Brian Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, Gio
# This mapping of icon names to descriptions was borrowed from Nautilus.
_BASIC_CONTENT_TYPES = {
'application-x-executable': _("Program"),
'audio-x-generic': _("Audio"),
'font-x-generic': _("Font"),
'image-x-generic': _("Image"),
'package-x-generic': _("Archive"),
'text-html': _("Markup"),
'text-x-generic': _("Text"),
'text-x-script': _("Program"),
'video-x-generic': _("Video"),
'x-office-document': _("Document"),
'x-office-presentation': _("Document"),
'x-office-spreadsheet': _("Document")
}
def format_time(time: GLib.TimeVal):
"""
Return a short string representing the time. The format will differ
depending on how close to the current date it is.
"""
dt = GLib.DateTime.new_from_timeval_local(time)
now = GLib.DateTime.new_now_local()
if dt.get_ymd() == now.get_ymd():
return dt.format(_("%-I:%M %P"))
elif dt.get_year() == now.get_year():
return dt.format(_("%b %-d"))
else:
return dt.format(_("%b %-d %Y"))
def get_basic_content_type(content_type: str):
"""
Return a short string describing the content type.
"""
if content_type == 'inode/directory':
return _("Folder")
elif content_type == 'inode/symlink':
return _("Link")
elif Gio.content_type_is_unknown(content_type):
return _("Binary")
else:
return _BASIC_CONTENT_TYPES.get(
Gio.content_type_get_generic_icon_name(content_type), _("Unknown"))
|
gpl-3.0
| -8,794,476,572,636,623,000
| 35.733333
| 79
| 0.648367
| false
| 3.630972
| false
| false
| false
|
mtbarta/neural-tagger
|
src/models/dconv3Experiments/skipconn.py
|
1
|
21980
|
#!/usr/bin/python
# -*- coding: utf8
import tensorflow as tf
import numpy as np
from src.util.tf import tensorToSeq, seqToTensor, revlut
import math
from google.protobuf import text_format
from tensorflow.python.platform import gfile
import json
import os
from collections import defaultdict
import src.models.tf_utils as tf_utils
from src.models.initializers import identity_initializer, orthogonal_initializer
def _xform(arr, words_vocab, chars_vocab, mxlen, maxw):
"""
transforms a single feature vector into a feed dict for the model.
"""
batch = defaultdict(list)
for i in arr:
xs_ch = np.zeros((mxlen, maxw), dtype=np.int)
xs = np.zeros((mxlen), dtype=np.int)
ys = np.zeros((mxlen), dtype=np.int)
v = i
length = mxlen
for j in range(mxlen):
if j == len(v):
length = j
break
w = v[j]
nch = min(len(w), maxw)
xs[j] = words_vocab.get(w, 0)
for k in range(nch):
xs_ch[j,k] = chars_vocab.get(w[k], 0)
batch['x'].append(xs)
batch['y'].append(ys)
batch['xch'].append(xs_ch)
batch['id'].append(i)
batch['length'].append(length)
return batch
class DConv():
def __init__(self, sess, name, version='1'):
self.sess = sess
self.name = name
self.version = version
def predict(self, batch, xform=True, training_phase=False, word_keep=1.0):
if not isinstance(batch, dict):
batch = _xform(batch, self.word_vocab, self.char_vocab, self.maxlen, self.maxw)
lengths = batch["length"]
feed_dict = {self.x: batch["x"],
self.xch: batch["xch"],
self.pkeep: 1.0,
self.word_keep: 1.0,
self.phase: training_phase}
# We can probably conditionally add the loss here
preds = []
with tf.variable_scope(self.name):
if self.crf is True:
probv, tranv = self.sess.run([self.probs, self.A], feed_dict=feed_dict)
for pij, sl in zip(probv, lengths):
unary = pij[:sl]
viterbi, _ = tf.contrib.crf.viterbi_decode(unary, tranv)
preds.append(viterbi)
else:
# Get batch (B, T)
bestv = self.sess.run(self.best, feed_dict=feed_dict)
# Each sentence, probv
for pij, sl in zip(bestv, lengths):
unary = pij[:sl]
preds.append(unary)
if xform:
# print(preds)
return [[self.y_lut[i] for i in sent] for sent in preds]
else:
return preds
@classmethod
def restore(cls, sess, indir, base, checkpoint_name=None):
"""
this method NEEDS to know the base name used in training for the model.
while i declare a variable scope, I still grab variables by names, so
we see duplication in using the base name to get the variables out. It
would be great to fix this at some point to be cleaner.
"""
klass = cls(sess, base)
basename = indir + '/' + base
checkpoint_name = checkpoint_name or basename
with open(basename + '.saver') as fsv:
saver_def = tf.train.SaverDef()
text_format.Merge(fsv.read(), saver_def)
print('Loaded saver def')
with gfile.FastGFile(basename + '.graph', 'r') as f:
gd = tf.GraphDef()
gd.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(gd, name='')
print('Imported graph def')
with tf.variable_scope(base):
sess.run(saver_def.restore_op_name,
{saver_def.filename_tensor_name: checkpoint_name})
klass.x = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'x:0')
klass.xch = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'xch:0')
klass.y = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'y:0')
klass.pkeep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'pkeep:0')
klass.word_keep = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'word_keep:0')
klass.phase = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'phase:0')
klass.best = tf.get_default_graph().get_tensor_by_name('output/ArgMax:0') # X
klass.probs = tf.get_default_graph().get_tensor_by_name('output/transpose:0') # X
try:
klass.A = tf.get_default_graph().get_tensor_by_name(base + '/'+ 'Loss/block/transitions:0')
print('Found transition matrix in graph, setting crf=True')
klass.crf = True
except:
print('Failed to get transition matrix, setting crf=False')
klass.A = None
klass.crf = False
with open(basename + '.labels', 'r') as f:
klass.labels = json.load(f)
klass.word_vocab = {}
if os.path.exists(basename + '-word.vocab'):
with open(basename + '-word.vocab', 'r') as f:
klass.word_vocab = json.load(f)
with open(basename + '-char.vocab', 'r') as f:
klass.char_vocab = json.load(f)
with open(basename + '-params', 'r') as f:
params = json.load(f)
klass.maxlen = params['maxlen']
klass.maxw = params['maxw']
# self.name = params['model_name']
klass.saver = tf.train.Saver(saver_def=saver_def)
klass.y_lut = revlut(klass.labels)
return klass
def ex2dict(self, batch, pkeep, phase, word_keep):
return {
self.x: batch["x"],
self.xch: batch["xch"],
self.y: batch["y"],
self.pkeep: pkeep,
self.word_keep: word_keep,
self.phase: phase
}
def createLoss(self):
with tf.name_scope("Loss"):
loss = tf.constant(0.0)
gold = tf.cast(self.y, tf.float32)
mask = tf.sign(gold)
lengths = tf.reduce_sum(mask, name="lengths",
reduction_indices=1)
all_total = tf.reduce_sum(lengths, name="total")
#block_scores = tf.unstack(self.intermediate_probs, axis=-1)
block_scores = self.intermediate_probs
print("block_sore length", len(block_scores))
block_no_dropout_scores, _ = self.forward(1.0, 1.0, 1.0, reuse=True)
print("block_score_no_dropout length", len(block_no_dropout_scores))
print("block_score length after anothe fwd", len(block_scores))
all_loss = []
for block, block_no_drop in zip(block_scores, block_no_dropout_scores):
print(block.get_shape())
# reuse = i != 0
# with tf.variable_scope('block', reuse=reuse):
if self.crf is True:
print('crf=True, creating SLL')
viterbi_loss = self._computeSentenceLevelLoss(self.y, mask, lengths, None, block)
all_loss.append(viterbi_loss)
else:
print('crf=False, creating WLL')
all_loss.append(self._computeWordLevelLoss(gold, mask, None, block))
l2_loss = tf.nn.l2_loss(tf.subtract(block, block_no_drop))
loss += self.drop_penalty * l2_loss
loss += tf.reduce_mean(all_loss)
return loss
def _computeSentenceLevelLoss(self, gold, mask, lengths, model, probs):
#zero_elements = tf.equal(lengths, tf.zeros_like(lengths))
#count_zeros_per_row = tf.reduce_sum(tf.to_int32(zero_elements), axis=1)
#flat_sequence_lengths = tf.add(tf.reduce_sum(lengths, 1),
# tf.scalar_mul(2, count_zeros_per_row))
print(probs.get_shape())
print(lengths.get_shape())
print(gold.get_shape())
ll, A = tf.contrib.crf.crf_log_likelihood(probs, gold, lengths, transition_params=self.A)
# print(model.probs)
#all_total = tf.reduce_sum(lengths, name="total")
return tf.reduce_mean(-ll)
def _computeWordLevelLoss(self, gold, mask, model, probs):
nc = len(self.labels)
# Cross entropy loss
cross_entropy = tf.one_hot(self.y, nc, axis=-1) * tf.log(
tf.clip_by_value(tf.nn.softmax(probs), 1e-10, 5.0))
cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)
cross_entropy *= mask
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
all_loss = tf.reduce_mean(cross_entropy, name="loss")
return all_loss
def block(self, wembed, kernel_sz, num_filt, num_layers, reuse=False):
dilation_rate = 2
initialization = 'identity'
nonlinearity = 'relu'
input_tensor = wembed
with tf.variable_scope('iterated-block', reuse=reuse):
for i in range(0, num_layers):
if i == num_layers-1:
dilation_rate = 1
filter_shape = [1, kernel_sz, num_filt, num_filt]
w = tf_utils.initialize_weights(filter_shape, 'conv-'+ str(i) + "_w", init_type=initialization, gain=nonlinearity, divisor=self.num_classes)
b = tf.get_variable('conv-'+ str(i) + "_b", initializer=tf.constant(0.0 if initialization == "identity" or initialization == "varscale" else 0.001, shape=[num_filt]))
conv = tf.nn.atrous_conv2d(input_tensor,
w,
rate=dilation_rate**i,
padding="SAME",
name='conv-'+ str(i))
conv_b = tf.nn.bias_add(conv, b)
nonlinearity = tf_utils.apply_nonlinearity(conv_b, "relu")
input_tensor = nonlinearity + input_tensor
tf.summary.histogram('conv-'+str(i), input_tensor)
# input_tensor = tf.nn.relu(input_tensor, name="relu-"+str(i))
return input_tensor
def params(self, labels, word_vec, char_vec, mxlen,
maxw, rnntype, wsz, hsz, filtsz, num_filt=64,
kernel_size=3, num_layers=4, num_iterations=3,
crf=False):
self.num_iterations = num_iterations
self.num_layers = num_layers
self.kernel_size = kernel_size
self.num_filt = num_filt
self.crf = crf
char_dsz = char_vec.dsz
nc = len(labels)
self.num_classes=nc
self.x = tf.placeholder(tf.int32, [None, mxlen], name="x")
self.xch = tf.placeholder(tf.int32, [None, mxlen, maxw], name="xch")
self.y = tf.placeholder(tf.int32, [None, mxlen], name="y")
self.intermediate_probs = tf.placeholder(tf.int32, [None, mxlen, nc, num_iterations+2], name="y")
self.pkeep = tf.placeholder(tf.float32, name="pkeep")
self.word_keep = tf.placeholder(tf.float32, name="word_keep")
self.labels = labels
self.y_lut = revlut(labels)
self.phase = tf.placeholder(tf.bool, name="phase")
self.l2_loss = tf.constant(0.0)
self.word_vocab = {}
if word_vec is not None:
self.word_vocab = word_vec.vocab
self.char_vocab = char_vec.vocab
self.char_dsz = char_dsz
self.wsz = wsz
self.mxlen = mxlen
self.drop_penalty = 0.001
self.A = tf.get_variable("transitions", [self.num_classes, self.num_classes])
# if num_filt != nc:
# raise RuntimeError('number of filters needs to be equal to number of classes!')
self.filtsz = [int(filt) for filt in filtsz.split(',') ]
with tf.variable_scope('output/'):
W = tf.Variable(tf.truncated_normal([self.num_filt, nc],
stddev = 0.1), name="W")
# W = tf.get_variable('W', initializer=tf.contrib.layers.xavier_initializer(), shape=[num_filt, nc])
b = tf.Variable(tf.constant(0.0, shape=[1,nc]), name="b")
intermediates = []
if word_vec is not None:
with tf.name_scope("WordLUT"):
self.Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name = "W")
self.we0 = tf.scatter_update(self.Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))
with tf.name_scope("CharLUT"):
self.Wc = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32), name = "W")
self.ce0 = tf.scatter_update(self.Wc, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, self.char_dsz]))
self.input_dropout_keep_prob = self.word_keep
self.middle_dropout_keep_prob = 1.00
self.hidden_dropout_keep_prob = self.pkeep
self.intermediate_probs, self.probs = self.forward(self.hidden_dropout_keep_prob,
self.input_dropout_keep_prob,
self.middle_dropout_keep_prob,
reuse=False)
self.loss = self.createLoss()
def forward(self, hidden_keep, input_keep, middle_keep, reuse=True):
"""
used to determine the actual graph.
returns (intermediate_probs, probs). technically probs is the last layer of
the intermediate probs.
"""
block_unflat_scores = []
with tf.variable_scope("forward", reuse=reuse):
with tf.control_dependencies([self.we0]):
wembed = tf.nn.embedding_lookup(self.Ww, self.x, name="embeddings")
with tf.control_dependencies([self.ce0]):
xch_seq = tensorToSeq(self.xch)
cembed_seq = []
for i, xch_i in enumerate(xch_seq):
cembed_seq.append(shared_char_word(self.Wc, xch_i, self.filtsz, self.char_dsz, self.wsz, None if (i == 0 and not reuse) else True))
word_char = seqToTensor(cembed_seq)
input_feats = tf.concat([wembed, word_char], 2)
input_feats_expanded = tf.expand_dims(input_feats, 1)
input_feats_expanded_drop = tf.nn.dropout(input_feats_expanded, self.input_dropout_keep_prob)
# first projection of embeddings
filter_shape = [1, self.kernel_size, input_feats.get_shape()[2], self.num_filt]
w = tf_utils.initialize_weights(filter_shape, "conv_start" + "_w", init_type='xavier', gain='relu')
b = tf.get_variable("conv_start" + "_b", initializer=tf.constant(0.01, shape=[self.num_filt]))
conv0 = tf.nn.conv2d(input_feats_expanded_drop, w, strides=[1, 1, 1, 1], padding="SAME", name="conv_start")
h0 = tf_utils.apply_nonlinearity(tf.nn.bias_add(conv0, b), 'relu')
initial_inputs = [h0]
last_dims = self.num_filt
self.share_repeats = True
self.projection = False
# Stacked atrous convolutions
last_output = tf.concat(axis=3, values=initial_inputs)
for iteration in range(self.num_iterations):
hidden_outputs = []
total_output_width = self.num_filt
reuse_block = (iteration != 0)
block_name_suff = "" if self.share_repeats else str(block)
inner_last_dims = last_dims
inner_last_output = last_output
with tf.variable_scope("block" + block_name_suff, reuse=reuse_block):
block_output = self.block(inner_last_output, self.kernel_size, self.num_filt, self.num_layers, reuse=reuse_block)
#legacy strubell logic. we only grab the last layer of the block here. always.
h_concat = tf.concat(axis=3, values=[block_output])
last_output = tf.nn.dropout(h_concat, self.middle_dropout_keep_prob)
last_dims = total_output_width
h_concat_squeeze = tf.squeeze(h_concat, [1])
h_concat_flat = tf.reshape(h_concat_squeeze, [-1, total_output_width])
# Add dropout
with tf.name_scope("hidden_dropout"):
h_drop = tf.nn.dropout(h_concat_flat, self.hidden_dropout_keep_prob)
def do_projection():
# Project raw outputs down
with tf.name_scope("projection"):
projection_width = int(total_output_width/(2*len(hidden_outputs)))
w_p = tf_utils.initialize_weights([total_output_width, projection_width], "w_p", init_type="xavier")
b_p = tf.get_variable("b_p", initializer=tf.constant(0.01, shape=[projection_width]))
projected = tf.nn.xw_plus_b(h_drop, w_p, b_p, name="projected")
projected_nonlinearity = tf_utils.apply_nonlinearity(projected, self.nonlinearity)
return projected_nonlinearity, projection_width
# only use projection if we wanted to, and only apply middle dropout here if projection
input_to_pred, proj_width = do_projection() if self.projection else (h_drop, total_output_width)
input_to_pred_drop = tf.nn.dropout(input_to_pred, self.middle_dropout_keep_prob) if self.projection else input_to_pred
# Final (unnormalized) scores and predictions
with tf.name_scope("output"+block_name_suff):
w_o = tf_utils.initialize_weights([proj_width, self.num_classes], "w_o", init_type="xavier")
b_o = tf.get_variable("b_o", initializer=tf.constant(0.01, shape=[self.num_classes]))
self.l2_loss += tf.nn.l2_loss(w_o)
self.l2_loss += tf.nn.l2_loss(b_o)
scores = tf.nn.xw_plus_b(input_to_pred_drop, w_o, b_o, name="scores")
unflat_scores = tf.reshape(scores, tf.stack([-1, self.mxlen, self.num_classes]))
block_unflat_scores.append(unflat_scores)
# probs = unflat_scores
# best = tf.argmax(self.probs, 2)
# intermediate_probs = tf.stack(block_unflat_scores, -1)
return block_unflat_scores, unflat_scores
def log(tensor):
print(tensor)
def highway_conns(inputs, wsz_all, n, reuse):
for i in range(n):
with tf.variable_scope("highway-%d" % i,reuse=reuse):
W_p = tf.get_variable("W_p", [wsz_all, wsz_all])
b_p = tf.get_variable("B_p", [1, wsz_all], initializer=tf.constant_initializer(0.0))
proj = tf.nn.relu(tf.matmul(inputs, W_p) + b_p, "relu-proj")
W_t = tf.get_variable("W_t", [wsz_all, wsz_all])
b_t = tf.get_variable("B_t", [1, wsz_all], initializer=tf.constant_initializer(-2.0))
transform = tf.nn.sigmoid(tf.matmul(inputs, W_t) + b_t, "sigmoid-transform")
inputs = tf.multiply(transform, proj) + tf.multiply(inputs, 1 - transform)
return inputs
def skip_conns(inputs, wsz_all, n, reuse):
for i in range(n):
with tf.variable_scope("skip-%d" % i, reuse=reuse):
W_p = tf.get_variable("W_p", [wsz_all, wsz_all])
b_p = tf.get_variable("B_p", [1, wsz_all], initializer=tf.constant_initializer(0.0))
proj = tf.nn.relu(tf.matmul(inputs, W_p) + b_p, "relu")
inputs = inputs + proj
return inputs
def char_word_conv_embeddings(char_vec, filtsz, char_dsz, wsz, reuse):
"""
char_vec:
filtsz: string of comma separated filter sizes. "1,2,3,"
"""
expanded = tf.expand_dims(char_vec, -1)
mots = []
for i, fsz in enumerate(filtsz):
with tf.variable_scope('cmot-%s' % fsz, reuse=reuse):
kernel_shape = [fsz, char_dsz, 1, wsz]
# Weight tying
W = tf.get_variable("W", kernel_shape)
b = tf.get_variable("b", [wsz], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(expanded,
W, strides=[1,1,1,1],
padding="VALID", name="conv")
activation = tf.nn.relu(tf.nn.bias_add(conv, b), "activation")
mot = tf.reduce_max(activation, [1], keep_dims=True)
# Add back in the dropout
mots.append(mot)
wsz_all = wsz * len(mots)
combine = tf.reshape(tf.concat(values=mots, axis=3), [-1, wsz_all])
joined = highway_conns(combine, wsz_all, 1, reuse)
# joined = skip_conns(combine, wsz_all, 1, reuse)
return joined
def shared_char_word(Wch, xch_i, filtsz, char_dsz, wsz, reuse):
with tf.variable_scope("SharedCharWord", reuse=reuse):
# Zeropad the letters out to half the max filter size, to account for
# wide convolution. This way we don't have to explicitly pad the
# data upfront, which means our Y sequences can be assumed not to
# start with zeros
mxfiltsz = np.max(filtsz)
halffiltsz = int(math.floor(mxfiltsz / 2))
zeropad = tf.pad(xch_i, [[0,0], [halffiltsz, halffiltsz]], "CONSTANT")
cembed = tf.nn.embedding_lookup(Wch, zeropad)
if len(filtsz) == 0 or filtsz[0] == 0:
return tf.reduce_sum(cembed, [1])
return char_word_conv_embeddings(cembed, filtsz, char_dsz, wsz, reuse)
def tensor2seq(tensor):
return tf.unstack(tf.transpose(tensor, perm=[1, 0, 2]))
def seq2tensor(sequence):
return tf.transpose(tf.stack(sequence), perm=[1, 0, 2])
|
gpl-3.0
| 1,162,317,117,842,288,600
| 41.514507
| 182
| 0.553321
| false
| 3.554334
| false
| false
| false
|
nlevitt/brozzler
|
vagrant/vagrant-brozzler-new-site.py
|
1
|
3061
|
#!/usr/bin/env python
'''
vagrant-brozzler-new-site.py - runs brozzler-new-site inside the vagrant vm to
queue a site for your vagrant brozzler deployment.
Fills in the --proxy option automatically. Some other options are passed
through.
This is a standalone script with no dependencies other than python, and should
work with python 2.7 or python 3.2+. The only reason it's not a bash script is
so we can use the argparse library.
Copyright (C) 2016 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import os
import argparse
import subprocess
try:
from shlex import quote
except:
from pipes import quote
def main(argv=[]):
arg_parser = argparse.ArgumentParser(prog=os.path.basename(argv[0]))
arg_parser.add_argument('seed', metavar='SEED', help='seed url')
arg_parser.add_argument(
'--time-limit', dest='time_limit', default=None,
help='time limit in seconds for this site')
arg_parser.add_argument(
'--ignore-robots', dest='ignore_robots', action='store_true',
help='ignore robots.txt for this site')
arg_parser.add_argument(
'--warcprox-meta', dest='warcprox_meta',
help=(
'Warcprox-Meta http request header to send with each request; '
'must be a json blob, ignored unless warcprox features are '
'enabled'))
arg_parser.add_argument(
'-q', '--quiet', dest='quiet', action='store_true')
arg_parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true')
args = arg_parser.parse_args(args=argv[1:])
options = []
if args.time_limit:
options.append('--time-limit=%s' % args.time_limit)
if args.ignore_robots:
options.append('--ignore-robots')
if args.warcprox_meta:
# I think this shell escaping is correct?
options.append(
'--warcprox-meta=%s' % quote(args.warcprox_meta))
if args.quiet:
options.append('--quiet')
if args.verbose:
options.append('--verbose')
# cd to path with Vagrantfile so "vagrant ssh" knows what to do
os.chdir(os.path.dirname(__file__))
cmd = (
'PYTHONPATH=/home/vagrant/brozzler-ve34/lib/python3.4/site-packages '
'/home/vagrant/brozzler-ve34/bin/python '
'/home/vagrant/brozzler-ve34/bin/brozzler-new-site '
'--proxy=localhost:8000 %s %s') % (
' '.join(options), args.seed)
subprocess.call(['vagrant', 'ssh', '--', cmd])
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
| -5,619,217,285,461,164,000
| 34.593023
| 79
| 0.656648
| false
| 3.705811
| false
| false
| false
|
jacor-/TuentiChallenge5
|
problem16/snippet.py
|
1
|
3091
|
"""
To solve this problem we have used the software
The Full Whiskas Model example in the package PuLP for python.
I have no clue about linear optimization... so this package has been intinitely
helpful. The code do not require much explanation and there is no much time
remaining in the contest... so I wont comment anything else!
"""
# Import PuLP modeler functions
from pulp import *
# Creates a list of the Ingredients
import numpy
import fileinput
inp = fileinput.input()
num_cases = int(inp.next());
for case in range(num_cases):
arboles, prediccio, lenadores = map(int,inp.next()[:-1].split(" ")[:3])
Treball_maxim = []
Work_required = []
for jj in range(lenadores):
work_list = [int(i) for i in inp.next()[:-1].split(" ") if len(i) > 0]
Treball_maxim.append(work_list[0])
Work_required.append(work_list[1:])
Dedicacio = []
for arbolito in range(arboles):
for lenador in range(lenadores):
Dedicacio.append("%d:%d"%(arbolito, lenador))
ArbolAssolible = []
for lenador in range(lenadores):
ArbolAssolible.append([])
for arbol in range(arboles):
ArbolAssolible[-1].append(float(Treball_maxim[lenador])/Work_required[lenador][arbol])
prob = LpProblem("My paranoia problem", LpMinimize)
ingredient_vars = LpVariable.dicts("Dedicacio ",Dedicacio,lowBound=0.,upBound=1.)#,0)
main_cost = []
### El coste total tiene buena pinta...
for lenador in range(lenadores):
main_cost.append(lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) *Treball_maxim[lenador])
prob += lpSum(main_cost)#, "Total Cost of Ingredients per can"
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] * ArbolAssolible[lenador][arbolito] ]) <= 1, ' garantizando que no curro por encima de mis posibilidades %d %d menor que uno' % (arbolito, lenador)
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) <= 1
for arbol in range(arboles):
prob += lpSum([ingredient_vars["%d:%d"%(arbol, lenador)]*ArbolAssolible[lenador][arbol] for lenador in range(lenadores)]) == 1, ' totalidad arbol %d cortado' % arbol
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)]]) >= 0, ' garantizando dedicacion %d %d positivo' % (arbolito, lenador)
# The problem data is written to an .lp file
prob.writeLP("WhiskasModel2.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
if LpStatus[prob.status] == "Infeasible":
print "Test case #%d: IMPOSSIBLE" % (case+1)
elif numpy.around(prediccio,2) < numpy.around(value(prob.objective),2):
print "Test case #%d: %0.2f" % (case+1, value(prob.objective)-prediccio)
else:
print "Test case #%d: RIGHT" % (case+1)
|
mit
| 1,703,887,847,516,692,000
| 38.126582
| 219
| 0.650922
| false
| 2.99806
| false
| false
| false
|
kdrone/crazyflie-python-client
|
build/lib.linux-i686-2.7/cfclient/ui/main.py
|
1
|
21200
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The main file for the Crazyflie control application.
"""
__author__ = 'Bitcraze AB'
__all__ = ['MainUI']
import sys
import logging
logger = logging.getLogger(__name__)
from PyQt4 import QtGui, uic
from PyQt4.QtCore import pyqtSignal, Qt, pyqtSlot, QDir, QUrl
from PyQt4.QtGui import QLabel, QActionGroup, QMessageBox, QAction, QDesktopServices
from dialogs.connectiondialogue import ConnectDialogue
from dialogs.inputconfigdialogue import InputConfigDialogue
from dialogs.cf2config import Cf2ConfigDialog
from dialogs.cf1config import Cf1ConfigDialog
from cflib.crazyflie import Crazyflie
from dialogs.logconfigdialogue import LogConfigDialogue
from cfclient.utils.input import JoystickReader
from cfclient.utils.guiconfig import GuiConfig
from cfclient.utils.logconfigreader import LogConfigReader
from cfclient.utils.config_manager import ConfigManager
import cfclient.ui.toolboxes
import cfclient.ui.tabs
import cflib.crtp
from cflib.crazyflie.log import Log, LogVariable, LogConfig
from cfclient.ui.dialogs.bootloader import BootloaderDialog
from cfclient.ui.dialogs.about import AboutDialog
(main_window_class,
main_windows_base_class) = (uic.loadUiType(sys.path[0] +
'/cfclient/ui/main.ui'))
class MyDockWidget(QtGui.QDockWidget):
closed = pyqtSignal()
def closeEvent(self, event):
super(MyDockWidget, self).closeEvent(event)
self.closed.emit()
class UIState:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
class MainUI(QtGui.QMainWindow, main_window_class):
connectionLostSignal = pyqtSignal(str, str)
connectionInitiatedSignal = pyqtSignal(str)
batteryUpdatedSignal = pyqtSignal(int, object, object)
connectionDoneSignal = pyqtSignal(str)
connectionFailedSignal = pyqtSignal(str, str)
disconnectedSignal = pyqtSignal(str)
linkQualitySignal = pyqtSignal(int)
_input_device_error_signal = pyqtSignal(str)
_input_discovery_signal = pyqtSignal(object)
_log_error_signal = pyqtSignal(object, str)
def __init__(self, *args):
super(MainUI, self).__init__(*args)
self.setupUi(self)
self.cf = Crazyflie(ro_cache=sys.path[0] + "/cflib/cache",
rw_cache=sys.path[1] + "/cache")
cflib.crtp.init_drivers(enable_debug_driver=GuiConfig()
.get("enable_debug_driver"))
# Create the connection dialogue
self.connectDialogue = ConnectDialogue()
# Create and start the Input Reader
self._statusbar_label = QLabel("Loading device and configuration.")
self.statusBar().addWidget(self._statusbar_label)
self.joystickReader = JoystickReader()
self._active_device = ""
self.configGroup = QActionGroup(self._menu_mappings, exclusive=True)
self._load_input_data()
self._update_input
ConfigManager().conf_needs_reload.add_callback(self._reload_configs)
# Connections for the Connect Dialogue
self.connectDialogue.requestConnectionSignal.connect(self.cf.open_link)
self.connectionDoneSignal.connect(self.connectionDone)
self.cf.connection_failed.add_callback(self.connectionFailedSignal.emit)
self.connectionFailedSignal.connect(self.connectionFailed)
self._input_device_error_signal.connect(self.inputDeviceError)
self.joystickReader.device_error.add_callback(
self._input_device_error_signal.emit)
self._input_discovery_signal.connect(self.device_discovery)
self.joystickReader.device_discovery.add_callback(
self._input_discovery_signal.emit)
# Connect UI signals
self.menuItemConnect.triggered.connect(self.connectButtonClicked)
self.logConfigAction.triggered.connect(self.doLogConfigDialogue)
self.connectButton.clicked.connect(self.connectButtonClicked)
self.quickConnectButton.clicked.connect(self.quickConnect)
self.menuItemQuickConnect.triggered.connect(self.quickConnect)
self.menuItemConfInputDevice.triggered.connect(self.configInputDevice)
self.menuItemExit.triggered.connect(self.closeAppRequest)
self.batteryUpdatedSignal.connect(self.updateBatteryVoltage)
self._menuitem_rescandevices.triggered.connect(self._rescan_devices)
self._menuItem_openconfigfolder.triggered.connect(self._open_config_folder)
self._auto_reconnect_enabled = GuiConfig().get("auto_reconnect")
self.autoReconnectCheckBox.toggled.connect(
self._auto_reconnect_changed)
self.autoReconnectCheckBox.setChecked(GuiConfig().get("auto_reconnect"))
# Do not queue data from the controller output to the Crazyflie wrapper
# to avoid latency
#self.joystickReader.sendControlSetpointSignal.connect(
# self.cf.commander.send_setpoint,
# Qt.DirectConnection)
self.joystickReader.input_updated.add_callback(
self.cf.commander.send_setpoint)
# Connection callbacks and signal wrappers for UI protection
self.cf.connected.add_callback(
self.connectionDoneSignal.emit)
self.connectionDoneSignal.connect(self.connectionDone)
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.disconnectedSignal.connect(
lambda linkURI: self.setUIState(UIState.DISCONNECTED,
linkURI))
self.cf.connection_lost.add_callback(self.connectionLostSignal.emit)
self.connectionLostSignal.connect(self.connectionLost)
self.cf.connection_requested.add_callback(
self.connectionInitiatedSignal.emit)
self.connectionInitiatedSignal.connect(
lambda linkURI: self.setUIState(UIState.CONNECTING,
linkURI))
self._log_error_signal.connect(self._logging_error)
# Connect link quality feedback
self.cf.link_quality_updated.add_callback(self.linkQualitySignal.emit)
self.linkQualitySignal.connect(
lambda percentage: self.linkQualityBar.setValue(percentage))
# Set UI state in disconnected buy default
self.setUIState(UIState.DISCONNECTED)
# Parse the log configuration files
self.logConfigReader = LogConfigReader(self.cf)
# Add things to helper so tabs can access it
cfclient.ui.pluginhelper.cf = self.cf
cfclient.ui.pluginhelper.inputDeviceReader = self.joystickReader
cfclient.ui.pluginhelper.logConfigReader = self.logConfigReader
self.logConfigDialogue = LogConfigDialogue(cfclient.ui.pluginhelper)
self._bootloader_dialog = BootloaderDialog(cfclient.ui.pluginhelper)
self._cf2config_dialog = Cf2ConfigDialog(cfclient.ui.pluginhelper)
self._cf1config_dialog = Cf1ConfigDialog(cfclient.ui.pluginhelper)
self.menuItemBootloader.triggered.connect(self._bootloader_dialog.show)
self._about_dialog = AboutDialog(cfclient.ui.pluginhelper)
self.menuItemAbout.triggered.connect(self._about_dialog.show)
self._menu_cf2_config.triggered.connect(self._cf2config_dialog.show)
self._menu_cf1_config.triggered.connect(self._cf1config_dialog.show)
# Loading toolboxes (A bit of magic for a lot of automatic)
self.toolboxes = []
self.toolboxesMenuItem.setMenu(QtGui.QMenu())
for t_class in cfclient.ui.toolboxes.toolboxes:
toolbox = t_class(cfclient.ui.pluginhelper)
dockToolbox = MyDockWidget(toolbox.getName())
dockToolbox.setWidget(toolbox)
self.toolboxes += [dockToolbox, ]
# Add menu item for the toolbox
item = QtGui.QAction(toolbox.getName(), self)
item.setCheckable(True)
item.triggered.connect(self.toggleToolbox)
self.toolboxesMenuItem.menu().addAction(item)
dockToolbox.closed.connect(lambda: self.toggleToolbox(False))
# Setup some introspection
item.dockToolbox = dockToolbox
item.menuItem = item
dockToolbox.dockToolbox = dockToolbox
dockToolbox.menuItem = item
# Load and connect tabs
self.tabsMenuItem.setMenu(QtGui.QMenu())
tabItems = {}
self.loadedTabs = []
for tabClass in cfclient.ui.tabs.available:
tab = tabClass(self.tabs, cfclient.ui.pluginhelper)
item = QtGui.QAction(tab.getMenuName(), self)
item.setCheckable(True)
item.toggled.connect(tab.toggleVisibility)
self.tabsMenuItem.menu().addAction(item)
tabItems[tab.getTabName()] = item
self.loadedTabs.append(tab)
if not tab.enabled:
item.setEnabled(False)
# First instantiate all tabs and then open them in the correct order
try:
for tName in GuiConfig().get("open_tabs").split(","):
t = tabItems[tName]
if (t != None and t.isEnabled()):
# Toggle though menu so it's also marked as open there
t.toggle()
except Exception as e:
logger.warning("Exception while opening tabs [%s]", e)
def setUIState(self, newState, linkURI=""):
self.uiState = newState
if (newState == UIState.DISCONNECTED):
self.setWindowTitle("HANSEI Not connected")
self.menuItemConnect.setText("Connect to Crazyflie")
self.connectButton.setText("Connect")
self.menuItemQuickConnect.setEnabled(True)
self.batteryBar.setValue(3000)
self._menu_cf2_config.setEnabled(False)
self.linkQualityBar.setValue(0)
self.menuItemBootloader.setEnabled(True)
self.logConfigAction.setEnabled(False)
if (len(GuiConfig().get("link_uri")) > 0):
self.quickConnectButton.setEnabled(True)
if (newState == UIState.CONNECTED):
s = "Connected on %s" % linkURI
self.setWindowTitle(s)
self.menuItemConnect.setText("Disconnect")
self.connectButton.setText("Disconnect")
self.logConfigAction.setEnabled(True)
self._menu_cf2_config.setEnabled(True)
if (newState == UIState.CONNECTING):
s = "Connecting to %s ..." % linkURI
self.setWindowTitle(s)
self.menuItemConnect.setText("Cancel")
self.connectButton.setText("Cancel")
self.quickConnectButton.setEnabled(False)
self.menuItemBootloader.setEnabled(False)
self.menuItemQuickConnect.setEnabled(False)
@pyqtSlot(bool)
def toggleToolbox(self, display):
menuItem = self.sender().menuItem
dockToolbox = self.sender().dockToolbox
if display and not dockToolbox.isVisible():
dockToolbox.widget().enable()
self.addDockWidget(dockToolbox.widget().preferedDockArea(),
dockToolbox)
dockToolbox.show()
elif not display:
dockToolbox.widget().disable()
self.removeDockWidget(dockToolbox)
dockToolbox.hide()
menuItem.setChecked(False)
def _rescan_devices(self):
self._statusbar_label.setText("No inputdevice connected!")
self._menu_devices.clear()
self._active_device = ""
self.joystickReader.stop_input()
for c in self._menu_mappings.actions():
c.setEnabled(False)
devs = self.joystickReader.getAvailableDevices()
if (len(devs) > 0):
self.device_discovery(devs)
def configInputDevice(self):
self.inputConfig = InputConfigDialogue(self.joystickReader)
self.inputConfig.show()
def _auto_reconnect_changed(self, checked):
self._auto_reconnect_enabled = checked
GuiConfig().set("auto_reconnect", checked)
logger.info("Auto reconnect enabled: %s", checked)
def doLogConfigDialogue(self):
self.logConfigDialogue.show()
def updateBatteryVoltage(self, timestamp, data, logconf):
self.batteryBar.setValue(int(data["pm.vbat"] * 1000))
def connectionDone(self, linkURI):
self.setUIState(UIState.CONNECTED, linkURI)
GuiConfig().set("link_uri", linkURI)
lg = LogConfig("Battery", 1000)
lg.add_variable("pm.vbat", "float")
self.cf.log.add_config(lg)
if lg.valid:
lg.data_received_cb.add_callback(self.batteryUpdatedSignal.emit)
lg.error_cb.add_callback(self._log_error_signal.emit)
lg.start()
else:
logger.warning("Could not setup loggingblock!")
def _logging_error(self, log_conf, msg):
QMessageBox.about(self, "Log error", "Error when starting log config"
" [%s]: %s" % (log_conf.name, msg))
def connectionLost(self, linkURI, msg):
if not self._auto_reconnect_enabled:
if (self.isActiveWindow()):
warningCaption = "Communication failure"
error = "Connection lost to %s: %s" % (linkURI, msg)
QMessageBox.critical(self, warningCaption, error)
self.setUIState(UIState.DISCONNECTED, linkURI)
else:
self.quickConnect()
def connectionFailed(self, linkURI, error):
if not self._auto_reconnect_enabled:
msg = "Failed to connect on %s: %s" % (linkURI, error)
warningCaption = "Communication failure"
QMessageBox.critical(self, warningCaption, msg)
self.setUIState(UIState.DISCONNECTED, linkURI)
else:
self.quickConnect()
def closeEvent(self, event):
self.hide()
self.cf.close_link()
GuiConfig().save_file()
def connectButtonClicked(self):
if (self.uiState == UIState.CONNECTED):
self.cf.close_link()
elif (self.uiState == UIState.CONNECTING):
self.cf.close_link()
self.setUIState(UIState.DISCONNECTED)
else:
self.connectDialogue.show()
def inputDeviceError(self, error):
self.cf.close_link()
QMessageBox.critical(self, "Input device error", error)
def _load_input_data(self):
self.joystickReader.stop_input()
# Populate combo box with available input device configurations
for c in ConfigManager().get_list_of_configs():
node = QAction(c,
self._menu_mappings,
checkable=True,
enabled=False)
node.toggled.connect(self._inputconfig_selected)
self.configGroup.addAction(node)
self._menu_mappings.addAction(node)
def _reload_configs(self, newConfigName):
# remove the old actions from the group and the menu
for action in self._menu_mappings.actions():
self.configGroup.removeAction(action)
self._menu_mappings.clear()
# reload the conf files, and populate the menu
self._load_input_data()
self._update_input(self._active_device, newConfigName)
def _update_input(self, device="", config=""):
self.joystickReader.stop_input()
self._active_config = str(config)
self._active_device = str(device)
GuiConfig().set("input_device", self._active_device)
GuiConfig().get(
"device_config_mapping"
)[self._active_device] = self._active_config
self.joystickReader.start_input(self._active_device,
self._active_config)
# update the checked state of the menu items
for c in self._menu_mappings.actions():
c.setEnabled(True)
if c.text() == self._active_config:
c.setChecked(True)
for c in self._menu_devices.actions():
c.setEnabled(True)
if c.text() == self._active_device:
c.setChecked(True)
# update label
if device == "" and config == "":
self._statusbar_label.setText("No input device selected")
elif config == "":
self._statusbar_label.setText("Using [%s] - "
"No input config selected" %
(self._active_device))
else:
self._statusbar_label.setText("Using [%s] with config [%s]" %
(self._active_device,
self._active_config))
def _inputdevice_selected(self, checked):
if (not checked):
return
self.joystickReader.stop_input()
sender = self.sender()
self._active_device = sender.text()
device_config_mapping = GuiConfig().get("device_config_mapping")
if (self._active_device in device_config_mapping.keys()):
self._current_input_config = device_config_mapping[
str(self._active_device)]
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
GuiConfig().set("input_device", str(self._active_device))
for c in self._menu_mappings.actions():
if (c.text() == self._current_input_config):
c.setChecked(True)
self.joystickReader.start_input(str(sender.text()),
self._current_input_config)
self._statusbar_label.setText("Using [%s] with config [%s]" % (
self._active_device,
self._current_input_config))
def _inputconfig_selected(self, checked):
if (not checked):
return
self._update_input(self._active_device, self.sender().text())
def device_discovery(self, devs):
group = QActionGroup(self._menu_devices, exclusive=True)
for d in devs:
node = QAction(d["name"], self._menu_devices, checkable=True)
node.toggled.connect(self._inputdevice_selected)
group.addAction(node)
self._menu_devices.addAction(node)
if (d["name"] == GuiConfig().get("input_device")):
self._active_device = d["name"]
if (len(self._active_device) == 0):
self._active_device = self._menu_devices.actions()[0].text()
device_config_mapping = GuiConfig().get("device_config_mapping")
if (device_config_mapping):
if (self._active_device in device_config_mapping.keys()):
self._current_input_config = device_config_mapping[
str(self._active_device)]
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
else:
self._current_input_config = self._menu_mappings.actions()[0].text()
# Now we know what device to use and what mapping, trigger the events
# to change the menus and start the input
for c in self._menu_mappings.actions():
c.setEnabled(True)
if (c.text() == self._current_input_config):
c.setChecked(True)
for c in self._menu_devices.actions():
if (c.text() == self._active_device):
c.setChecked(True)
def quickConnect(self):
try:
self.cf.open_link(GuiConfig().get("link_uri"))
except KeyError:
self.cf.open_link("")
def _open_config_folder(self):
QDesktopServices.openUrl(QUrl("file:///" + QDir.toNativeSeparators(sys.path[1])))
def closeAppRequest(self):
self.close()
sys.exit(0)
|
gpl-2.0
| 9,170,019,072,597,132,000
| 40.48728
| 89
| 0.614717
| false
| 4.073007
| true
| false
| false
|
Joev-/HoNCore
|
honcore/lib/construct/adapters.py
|
1
|
15909
|
from core import Adapter, AdaptationError, Pass
from lib import int_to_bin, bin_to_int, swap_bytes, StringIO
from lib import FlagsContainer, HexString
#===============================================================================
# exceptions
#===============================================================================
class BitIntegerError(AdaptationError):
__slots__ = []
class MappingError(AdaptationError):
__slots__ = []
class ConstError(AdaptationError):
__slots__ = []
class ValidationError(AdaptationError):
__slots__ = []
class PaddingError(AdaptationError):
__slots__ = []
#===============================================================================
# adapters
#===============================================================================
class BitIntegerAdapter(Adapter):
"""
Adapter for bit-integers (converts bitstrings to integers, and vice versa).
See BitField.
Parameters:
* subcon - the subcon to adapt
* width - the size of the subcon, in bits
* swapped - whether to swap byte order (little endian/big endian).
default is False (big endian)
* signed - whether the value is signed (two's complement). the default
is False (unsigned)
* bytesize - number of bits per byte, used for byte-swapping (if swapped).
default is 8.
"""
__slots__ = ["width", "swapped", "signed", "bytesize"]
def __init__(self, subcon, width, swapped = False, signed = False,
bytesize = 8):
Adapter.__init__(self, subcon)
self.width = width
self.swapped = swapped
self.signed = signed
self.bytesize = bytesize
def _encode(self, obj, context):
if obj < 0 and not self.signed:
raise BitIntegerError("object is negative, but field is not signed",
obj)
obj2 = int_to_bin(obj, width = self.width)
if self.swapped:
obj2 = swap_bytes(obj2, bytesize = self.bytesize)
return obj2
def _decode(self, obj, context):
if self.swapped:
obj = swap_bytes(obj, bytesize = self.bytesize)
return bin_to_int(obj, signed = self.signed)
class MappingAdapter(Adapter):
"""
Adapter that maps objects to other objects.
See SymmetricMapping and Enum.
Parameters:
* subcon - the subcon to map
* decoding - the decoding (parsing) mapping (a dict)
* encoding - the encoding (building) mapping (a dict)
* decdefault - the default return value when the object is not found
in the decoding mapping. if no object is given, an exception is raised.
if `Pass` is used, the unmapped object will be passed as-is
* encdefault - the default return value when the object is not found
in the encoding mapping. if no object is given, an exception is raised.
if `Pass` is used, the unmapped object will be passed as-is
"""
__slots__ = ["encoding", "decoding", "encdefault", "decdefault"]
def __init__(self, subcon, decoding, encoding,
decdefault = NotImplemented, encdefault = NotImplemented):
Adapter.__init__(self, subcon)
self.decoding = decoding
self.encoding = encoding
self.decdefault = decdefault
self.encdefault = encdefault
def _encode(self, obj, context):
try:
return self.encoding[obj]
except (KeyError, TypeError):
if self.encdefault is NotImplemented:
raise MappingError("no encoding mapping for %r" % (obj,))
if self.encdefault is Pass:
return obj
return self.encdefault
def _decode(self, obj, context):
try:
return self.decoding[obj]
except (KeyError, TypeError):
if self.decdefault is NotImplemented:
raise MappingError("no decoding mapping for %r" % (obj,))
if self.decdefault is Pass:
return obj
return self.decdefault
class FlagsAdapter(Adapter):
"""
Adapter for flag fields. Each flag is extracted from the number, resulting
in a FlagsContainer object. Not intended for direct usage.
See FlagsEnum.
Parameters
* subcon - the subcon to extract
* flags - a dictionary mapping flag-names to their value
"""
__slots__ = ["flags"]
def __init__(self, subcon, flags):
Adapter.__init__(self, subcon)
self.flags = flags
def _encode(self, obj, context):
flags = 0
for name, value in self.flags.iteritems():
if getattr(obj, name, False):
flags |= value
return flags
def _decode(self, obj, context):
obj2 = FlagsContainer()
for name, value in self.flags.iteritems():
setattr(obj2, name, bool(obj & value))
return obj2
class StringAdapter(Adapter):
"""
Adapter for strings. Converts a sequence of characters into a python
string, and optionally handles character encoding.
See String.
Parameters:
* subcon - the subcon to convert
* encoding - the character encoding name (e.g., "utf8"), or None to
return raw bytes (usually 8-bit ASCII).
"""
__slots__ = ["encoding"]
def __init__(self, subcon, encoding = None):
Adapter.__init__(self, subcon)
self.encoding = encoding
def _encode(self, obj, context):
if self.encoding:
obj = obj.encode(self.encoding)
return obj
def _decode(self, obj, context):
obj = "".join(obj)
if self.encoding:
obj = obj.decode(self.encoding)
return obj
class PaddedStringAdapter(Adapter):
r"""
Adapter for padded strings.
See String.
Parameters:
* subcon - the subcon to adapt
* padchar - the padding character. default is "\x00".
* paddir - the direction where padding is placed ("right", "left", or
"center"). the default is "right".
* trimdir - the direction where trimming will take place ("right" or
"left"). the default is "right". trimming is only meaningful for
building, when the given string is too long.
"""
__slots__ = ["padchar", "paddir", "trimdir"]
def __init__(self, subcon, padchar = "\x00", paddir = "right",
trimdir = "right"):
if paddir not in ("right", "left", "center"):
raise ValueError("paddir must be 'right', 'left' or 'center'",
paddir)
if trimdir not in ("right", "left"):
raise ValueError("trimdir must be 'right' or 'left'", trimdir)
Adapter.__init__(self, subcon)
self.padchar = padchar
self.paddir = paddir
self.trimdir = trimdir
def _decode(self, obj, context):
if self.paddir == "right":
obj = obj.rstrip(self.padchar)
elif self.paddir == "left":
obj = obj.lstrip(self.padchar)
else:
obj = obj.strip(self.padchar)
return obj
def _encode(self, obj, context):
size = self._sizeof(context)
if self.paddir == "right":
obj = obj.ljust(size, self.padchar)
elif self.paddir == "left":
obj = obj.rjust(size, self.padchar)
else:
obj = obj.center(size, self.padchar)
if len(obj) > size:
if self.trimdir == "right":
obj = obj[:size]
else:
obj = obj[-size:]
return obj
class LengthValueAdapter(Adapter):
"""
Adapter for length-value pairs. It extracts only the value from the
pair, and calculates the length based on the value.
See PrefixedArray and PascalString.
Parameters:
* subcon - the subcon returning a length-value pair
"""
__slots__ = []
def _encode(self, obj, context):
return (len(obj), obj)
def _decode(self, obj, context):
return obj[1]
class CStringAdapter(StringAdapter):
r"""
Adapter for C-style strings (strings terminated by a terminator char).
Parameters:
* subcon - the subcon to convert
* terminators - a sequence of terminator chars. default is "\x00".
* encoding - the character encoding to use (e.g., "utf8"), or None to
return raw-bytes. the terminator characters are not affected by the
encoding.
"""
__slots__ = ["terminators"]
def __init__(self, subcon, terminators = "\x00", encoding = None):
StringAdapter.__init__(self, subcon, encoding = encoding)
self.terminators = terminators
def _encode(self, obj, context):
return StringAdapter._encode(self, obj, context) + self.terminators[0]
def _decode(self, obj, context):
return StringAdapter._decode(self, obj[:-1], context)
class TunnelAdapter(Adapter):
"""
Adapter for tunneling (as in protocol tunneling). A tunnel is construct
nested upon another (layering). For parsing, the lower layer first parses
the data (note: it must return a string!), then the upper layer is called
to parse that data (bottom-up). For building it works in a top-down manner;
first the upper layer builds the data, then the lower layer takes it and
writes it to the stream.
Parameters:
* subcon - the lower layer subcon
* inner_subcon - the upper layer (tunneled/nested) subcon
Example:
# a pascal string containing compressed data (zlib encoding), so first
# the string is read, decompressed, and finally re-parsed as an array
# of UBInt16
TunnelAdapter(
PascalString("data", encoding = "zlib"),
GreedyRange(UBInt16("elements"))
)
"""
__slots__ = ["inner_subcon"]
def __init__(self, subcon, inner_subcon):
Adapter.__init__(self, subcon)
self.inner_subcon = inner_subcon
def _decode(self, obj, context):
return self.inner_subcon._parse(StringIO(obj), context)
def _encode(self, obj, context):
stream = StringIO()
self.inner_subcon._build(obj, stream, context)
return stream.getvalue()
class ExprAdapter(Adapter):
"""
A generic adapter that accepts 'encoder' and 'decoder' as parameters. You
can use ExprAdapter instead of writing a full-blown class when only a
simple expression is needed.
Parameters:
* subcon - the subcon to adapt
* encoder - a function that takes (obj, context) and returns an encoded
version of obj
* decoder - a function that takes (obj, context) and returns an decoded
version of obj
Example:
ExprAdapter(UBInt8("foo"),
encoder = lambda obj, ctx: obj / 4,
decoder = lambda obj, ctx: obj * 4,
)
"""
__slots__ = ["_encode", "_decode"]
def __init__(self, subcon, encoder, decoder):
Adapter.__init__(self, subcon)
self._encode = encoder
self._decode = decoder
class HexDumpAdapter(Adapter):
"""
Adapter for hex-dumping strings. It returns a HexString, which is a string
"""
__slots__ = ["linesize"]
def __init__(self, subcon, linesize = 16):
Adapter.__init__(self, subcon)
self.linesize = linesize
def _encode(self, obj, context):
return obj
def _decode(self, obj, context):
return HexString(obj, linesize = self.linesize)
class ConstAdapter(Adapter):
"""
Adapter for enforcing a constant value ("magic numbers"). When decoding,
the return value is checked; when building, the value is substituted in.
Parameters:
* subcon - the subcon to validate
* value - the expected value
Example:
Const(Field("signature", 2), "MZ")
"""
__slots__ = ["value"]
def __init__(self, subcon, value):
Adapter.__init__(self, subcon)
self.value = value
def _encode(self, obj, context):
if obj is None or obj == self.value:
return self.value
else:
raise ConstError("expected %r, found %r" % (self.value, obj))
def _decode(self, obj, context):
if obj != self.value:
raise ConstError("expected %r, found %r" % (self.value, obj))
return obj
class SlicingAdapter(Adapter):
"""
Adapter for slicing a list (getting a slice from that list)
Parameters:
* subcon - the subcon to slice
* start - start index
* stop - stop index (or None for up-to-end)
* step - step (or None for every element)
"""
__slots__ = ["start", "stop", "step"]
def __init__(self, subcon, start, stop = None):
Adapter.__init__(self, subcon)
self.start = start
self.stop = stop
def _encode(self, obj, context):
if self.start is None:
return obj
return [None] * self.start + obj
def _decode(self, obj, context):
return obj[self.start:self.stop]
class IndexingAdapter(Adapter):
"""
Adapter for indexing a list (getting a single item from that list)
Parameters:
* subcon - the subcon to index
* index - the index of the list to get
"""
__slots__ = ["index"]
def __init__(self, subcon, index):
Adapter.__init__(self, subcon)
if type(index) is not int:
raise TypeError("index must be an integer", type(index))
self.index = index
def _encode(self, obj, context):
return [None] * self.index + [obj]
def _decode(self, obj, context):
return obj[self.index]
class PaddingAdapter(Adapter):
r"""
Adapter for padding.
Parameters:
* subcon - the subcon to pad
* pattern - the padding pattern (character). default is "\x00"
* strict - whether or not to verify, during parsing, that the given
padding matches the padding pattern. default is False (unstrict)
"""
__slots__ = ["pattern", "strict"]
def __init__(self, subcon, pattern = "\x00", strict = False):
Adapter.__init__(self, subcon)
self.pattern = pattern
self.strict = strict
def _encode(self, obj, context):
return self._sizeof(context) * self.pattern
def _decode(self, obj, context):
if self.strict:
expected = self._sizeof(context) * self.pattern
if obj != expected:
raise PaddingError("expected %r, found %r" % (expected, obj))
return obj
#===============================================================================
# validators
#===============================================================================
class Validator(Adapter):
"""
Abstract class: validates a condition on the encoded/decoded object.
Override _validate(obj, context) in deriving classes.
Parameters:
* subcon - the subcon to validate
"""
__slots__ = []
def _decode(self, obj, context):
if not self._validate(obj, context):
raise ValidationError("invalid object", obj)
return obj
def _encode(self, obj, context):
return self._decode(obj, context)
def _validate(self, obj, context):
raise NotImplementedError()
class OneOf(Validator):
"""
Validates that the value is one of the listed values
Parameters:
* subcon - the subcon to validate
* valids - a set of valid values
"""
__slots__ = ["valids"]
def __init__(self, subcon, valids):
Validator.__init__(self, subcon)
self.valids = valids
def _validate(self, obj, context):
return obj in self.valids
class NoneOf(Validator):
"""
Validates that the value is none of the listed values
Parameters:
* subcon - the subcon to validate
* invalids - a set of invalid values
"""
__slots__ = ["invalids"]
def __init__(self, subcon, invalids):
Validator.__init__(self, subcon)
self.invalids = invalids
def _validate(self, obj, context):
return obj not in self.invalids
|
unlicense
| -5,460,892,987,634,537,000
| 32.006224
| 80
| 0.587466
| false
| 4.125778
| false
| false
| false
|
gamda/checkers
|
checkers/model.py
|
1
|
14118
|
# Copyright (c) 2015 Gamda Software, LLC
#
# See the file LICENSE.txt for copying permission.
from enum import Enum
from gameboard.gameboard import Gameboard
from gameboard.gameboard import Direction
from gameboard.coordinate import Coordinate
class Chip:
class Color(Enum):
white = True
black = False
class Type(Enum):
soldier = 0
queen = 1
def __init__(self, color):
if not isinstance(color, self.Color):
raise ValueError("Use Chip.Color values")
self.color = color
self.type = self.Type.soldier
def promote(self):
self.type = self.Type.queen
class Model:
class Gamestate(Enum):
invalidMove = -1
inProgress = 0
whiteWon = 1
blackWon = 2
tie = 3
def new_game(self):
self.__init__()
def __init__(self):
self.board = Gameboard()
self.chips = {Coordinate.a1: Chip(Chip.Color.white),
Coordinate.c1: Chip(Chip.Color.white),
Coordinate.e1: Chip(Chip.Color.white),
Coordinate.g1: Chip(Chip.Color.white),
Coordinate.b2: Chip(Chip.Color.white),
Coordinate.d2: Chip(Chip.Color.white),
Coordinate.f2: Chip(Chip.Color.white),
Coordinate.h2: Chip(Chip.Color.white),
Coordinate.a3: Chip(Chip.Color.white),
Coordinate.c3: Chip(Chip.Color.white),
Coordinate.e3: Chip(Chip.Color.white),
Coordinate.g3: Chip(Chip.Color.white),
Coordinate.b6: Chip(Chip.Color.black),
Coordinate.d6: Chip(Chip.Color.black),
Coordinate.f6: Chip(Chip.Color.black),
Coordinate.h6: Chip(Chip.Color.black),
Coordinate.a7: Chip(Chip.Color.black),
Coordinate.c7: Chip(Chip.Color.black),
Coordinate.e7: Chip(Chip.Color.black),
Coordinate.g7: Chip(Chip.Color.black),
Coordinate.b8: Chip(Chip.Color.black),
Coordinate.d8: Chip(Chip.Color.black),
Coordinate.f8: Chip(Chip.Color.black),
Coordinate.h8: Chip(Chip.Color.black)}
for k in self.chips.keys():
self.board.set_content(k,self.chips[k])
self.turn = Chip.Color.white
self._current_chip = None
def _neighbor_in_direction(self, square, direction):
neighborSquare = self.board.neighbor_in_direction(square, direction)
return neighborSquare
def _next_neighbor_in_direction(self, square, direction):
neighbor_square = self.board.neighbor_in_direction(square, direction)
if neighbor_square is not None: # check the next
new_neighbor = \
self.board.neighbor_in_direction(neighbor_square, direction)
if new_neighbor is not None:
return new_neighbor
return None
def _enemy_in_neighbor(self, square, direction):
neighbor = self._neighbor_in_direction(square, direction)
return neighbor is not None and \
self.board.get_content(neighbor) is not None and \
self.board.get_content(neighbor).color != self.turn
def _directions_for_soldier(self):
white_directions = [Direction.top_left, Direction.top_right]
black_directions = [Direction.btm_left, Direction.btm_right]
return white_directions \
if self.turn == Chip.Color.white \
else black_directions
def _soldier_available_jumps(self, square):
jumps = set()
for direction in self._directions_for_soldier():
if self._enemy_in_neighbor(square, direction):
next_neighbor = \
self._next_neighbor_in_direction(square, direction)
if next_neighbor is not None and \
self.board.get_content(next_neighbor) is None:
jumps.add((square, next_neighbor))
return jumps
def _soldier_available_regular_moves(self, square):
moves = set()
for direction in self._directions_for_soldier():
neighbor = self._neighbor_in_direction(square, direction)
if neighbor is not None and \
self.board.get_content(neighbor) is None:
# empty square, valid move
moves.add((square, neighbor))
return moves
def _soldier_can_jump(self, square):
return bool(self._soldier_available_jumps(square))
def _soldier_chip_available_moves(self, square):
moves = self._soldier_available_jumps(square)
if len(moves) > 0:
return moves, True
return self._soldier_available_regular_moves(square), False
def _queen_rival_found_moves(self,
origin,
square,
direction,
moves,
can_jump):
my_moves = moves
neighbor = self._neighbor_in_direction(square, direction)
if neighbor is not None:
content = self.board.get_content(neighbor)
if content is None and can_jump:
# another empty square after a jump
my_moves.add((origin, neighbor))
return my_moves, True
elif content is None and not can_jump:
# just found out queen can jump
my_moves = set([(origin, neighbor)])
return my_moves, True
return moves, can_jump # two chips in a row or out of bounds
def _queen_moves_in_direction(self, square, direction):
moves, can_jump = set(), False
neighbor = self._neighbor_in_direction(square, direction)
while neighbor is not None:
content = self.board.get_content(neighbor)
if content is None: # empty
moves.add((square, neighbor))
elif content.color != self. turn: # rival
# rival chip found
old_moves = moves
moves, can_jump = self._queen_rival_found_moves(square,
neighbor,
direction,
moves,
can_jump)
neighbor = self._neighbor_in_direction(neighbor, direction)
if moves == old_moves:
break # two chips in a row or out of bounds
else:
break # ally chip found
neighbor = self._neighbor_in_direction(neighbor, direction)
return moves, can_jump
def _queen_can_jump(self, square):
moves, can_jump = self._queen_chip_available_moves(square)
return can_jump
def _queen_chip_available_moves(self, square):
directions = [Direction.top_left, Direction.top_right,
Direction.btm_left, Direction.btm_right]
moves, can_jump = set(), False
for d in directions:
new_moves, new_can_jump = self._queen_moves_in_direction(square, d)
if can_jump == new_can_jump:
moves = moves | new_moves
elif not can_jump and new_can_jump:
moves = new_moves
can_jump = True
return moves, can_jump
def _chip_can_jump(self, square):
if square in self.chips:
if self.chips[square].type == Chip.Type.soldier:
return self._soldier_can_jump(square)
else:
return self._queen_can_jump(square)
return False
def chip_available_moves(self, square):
"""Return a tuple (set[available_moves], bool can_jump)
Args:
square (Coordinate): the square where the chip is/should be
Returns:
set: tuple of Coordinate values of valid moves for the chip. They
have the form (Coordinate.origin, Coordinate.destination)
bool: True if the chip can jump, False otherwise
"""
if not isinstance(square, Coordinate):
raise TypeError("square variable must be from Coordinate enum")
if square not in self.chips.keys() or \
self.board.get_content(square) is None:
# chip is not in the game anymore
return set(), False
chip = self.chips[square]
if chip.color != self.turn:
return set(), False
if chip.type == Chip.Type.soldier:
return self._soldier_chip_available_moves(square)
return self._queen_chip_available_moves(square)
def available_moves(self):
"""Return a set with tuples of Coordinate values of all available moves
Returns:
set: tuple of Coordinate values of valid moves for the chip. They
have the form (Coordinate.origin, Coordinate.destination)
"""
moves = set()
if self._current_chip is not None:
moves, can_jump = self.chip_available_moves(self._current_chip)
return moves
can_jump = False
for coord, chip in self.chips.items():
newMoves, newcan_jump = self.chip_available_moves(coord)
if can_jump == newcan_jump:
moves = moves | newMoves
elif not can_jump and newcan_jump: # found a jump, delete old moves
moves = newMoves
can_jump = True
# else found regular move, but jump found previously
return moves
def _promote(self, square):
startIndex = 0 if self.turn == Chip.Color.white else 7
promo_squares = []
for i in range(startIndex, 64, 8):
promo_squares.append(Coordinate(i))
if square in promo_squares:
self.chips[square].promote()
def _next_turn(self):
self.turn = Chip.Color.black \
if self.turn == Chip.Color.white \
else Chip.Color.white
def _gamestate(self):
if len(self.available_moves()) == 0:
return self.Gamestate.whiteWon \
if self.turn == Chip.Color.black \
else self.Gamestate.blackWon
return self.Gamestate.inProgress
def _remove_chips(self, origin, destination):
removed = []
direction = self._direction_of_move(origin, destination)
squares_jumped = self.board.path_in_direction(origin,
destination,
direction)
for s in squares_jumped:
if self.board.get_content(s) != None:
self.board.clear_square(s)
del self.chips[s]
removed.append(s)
return removed
def _direction_of_move(self, origin, destination):
distance = destination - origin
direction = None
if distance < 0: # moved left
if distance % 7 == 0: # moved top
direction = Direction.top_left
else: # distance % 9 == 0, moved btm
direction = Direction.btm_left
else: # moved right
if distance % 9 == 0:
direction = Direction.top_right
else:
direction = Direction.btm_right
return direction
def move(self, origin, destination):
"""Perform the requested move and returns a tuple (Gamestate, list)
Args:
origin (Coordinate): the square where the chip is currently
destination (Direction): the square where the chip will end
Returns:
Gamestate: value from enum
list: Coordinate values indicating the chip(s) removed
Raises:
TypeError: if origin or destination is not Coordinate
"""
if not isinstance(origin, Coordinate):
raise TypeError("origin variable must be from Coordinate enum")
if not isinstance(destination, Coordinate):
raise TypeError("destination must be from Coordinate enum")
if not (origin, destination) in self.available_moves():
return self.Gamestate.invalidMove, []
turnFinished = True
_, jumped = self.chip_available_moves(origin)
# move chip
self.board.move(origin, destination)
self.chips[destination] = self.chips[origin]
del self.chips[origin]
self._promote(destination)
# remove chips if jump occured
distance = destination - origin
removed = []
if jumped:
removed = self._remove_chips(origin, destination)
if self._chip_can_jump(destination):
turnFinished = False
self._current_chip = destination
if turnFinished:
self._next_turn()
self._current_chip = None
self._promote(destination)
return (self._gamestate(), removed)
def square_contains_teammate(self, square):
"""Returns True if the chip belongs to the team whose turn it is
Args:
square (Coordinate): the square to check for an ally chip
Returns:
bool: True if the chip belongs to the team whose turn it is
Raises:
TypeError: if square is not Coordinate
"""
if not isinstance(square, Coordinate):
raise TypeError("square variable must be from Coordinate enum")
# Python's lazy evaluation makes sure this expression will never
# throw KeyError because if the key is not in the dictionary, the
# second expression will not be evaluated
return square in self.chips.keys() and \
self.chips[square].color == self.turn
|
mit
| -4,265,342,469,476,370,000
| 39.107955
| 79
| 0.555461
| false
| 4.326693
| false
| false
| false
|
chiffa/PolyPharma
|
bioflow/molecular_network/interactome_analysis.py
|
1
|
22116
|
"""
New analytical routines for the interactome
"""
import pickle
from collections import namedtuple
from csv import reader
from csv import writer as csv_writer
from multiprocessing import Pool
from collections import defaultdict
import traceback
from pprint import pprint
import os
import psutil
from typing import Any, Union, TypeVar, NewType, Tuple, List
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import gumbel_r
from tabulate import tabulate
from bioflow.configs.main_configs import Dumps, estimated_comp_ops, NewOutputs, \
sparse_analysis_threshold, implicitely_threaded, p_val_cutoff, min_nodes_for_p_val
from bioflow.sample_storage.mongodb import find_interactome_rand_samp, count_interactome_rand_samp
from bioflow.configs.main_configs import output_location
from bioflow.molecular_network.InteractomeInterface import InteractomeInterface
from bioflow.utils.dataviz import kde_compute
from bioflow.utils.log_behavior import get_logger
from bioflow.utils.io_routines import get_source_bulbs_ids, get_background_bulbs_ids
from bioflow.utils.general_utils.high_level_os_io import mkdir_recursive
from bioflow.algorithms_bank.flow_significance_evaluation import get_neighboring_degrees, get_p_val_by_gumbel
log = get_logger(__name__)
def get_interactome_interface(background_up_ids=()) -> InteractomeInterface:
"""
Retrieves an "InteractomeInterface" object
:return:
"""
interactome_interface_instance = InteractomeInterface(background_up_ids=background_up_ids)
interactome_interface_instance.fast_load()
log.debug("get_interactome state e_p_u_b_i length: %s",
len(interactome_interface_instance.active_up_sample))
log.info("interactome interface loaded in %s" % interactome_interface_instance.pretty_time())
# is the case now
return interactome_interface_instance
def spawn_sampler(args_puck):
"""
Spawns a sampler initialized from the default GO_Interface.
:param args_puck: combined list of sample sizes, iterations, background sets, and sparse
sampling argument
"""
# log.info('Pool process %d started' % args_puck[-1])
background_set_arg = args_puck[3]
interactome_interface_instance = get_interactome_interface(background_set_arg)
sample_size_list = args_puck[0]
iteration_list = args_puck[1]
sparse_rounds = args_puck[2]
pool_no = args_puck[-1] # TODO: switch over to PID here
interactome_interface_instance.reset_thread_hex()
interactome_interface_instance.randomly_sample(
sample_size_list,
iteration_list,
sparse_rounds,
pool_no=pool_no
)
def spawn_sampler_pool(
pool_size,
sample_size_list,
interaction_list_per_pool,
background_set,
sparse_rounds=False):
"""
Spawns a pool of samplers of the information flow within the GO system
:param pool_size: number of processes that are performing the sample pooling and analyzing
:param sample_size_list: size of the sample list
:param interaction_list_per_pool: number of iterations performing the pooling of the samples
in each list
:param sparse_rounds: number of sparse rounds to run (or False if sampling is dense)
:param background_set: set of node ids that are to be sampled from
"""
payload = [
(sample_size_list,
interaction_list_per_pool,
sparse_rounds,
background_set)]
payload_list = payload * pool_size
payload_list = [list(item)+[i] for i, item in enumerate(payload_list)] # prepare the payload
global implicitely_threaded
if not implicitely_threaded:
with Pool(processes=pool_size) as pool: # This is the object we are using to spawn a thread pool
try:
log.debug('spawning the sampler with payload %s', payload)
pool.map(spawn_sampler, payload_list) # This what we spawn as a sampler
# KNOWNBUG: hangs with no message upon a second start attempt in Interactome
# analysis due to cholmod
except Exception as e:
msg = "{}\n\nOriginal {}".format(e, traceback.format_exc())
raise type(e)(msg)
# log.info('Last in-pool flag exiting')
pool.terminate()
# log.info('Pool terminated')
else:
log.debug('spawning single-thread sampler with payload %s', payload)
for _payload in payload_list:
spawn_sampler(_payload)
def local_indexed_select(bi_array, array_column, selection_span):
"""
Convenient small function to select a from tri_array all the elements where the column
number array_column is within the selection span
:param bi_array: the matrix on which we will be performing the selection
:param array_column: column number on which the selection span will be applied
:param selection_span: span for which we are going to keep the column.
"""
selector = np.logical_and(
selection_span[0] < bi_array[array_column, :],
bi_array[array_column, :] < selection_span[1])
if not any(selector):
return np.array([[0.0, 0.0, 0.0]])
filtered_bi_array = bi_array[:, selector]
return filtered_bi_array
def samples_scatter_and_hist(background_curr_deg_conf, true_sample_bi_corr_array,
save_path: NewOutputs = None, p_values: np.array = None):
"""
A general function that performs demonstration of an example of random samples of
the same size as our sample and of our sample and conducts the statistical tests
on wherther any of nodes or functional groups in our sample are non-random
:param background_curr_deg_conf: [[current, informativity, confusion_potential], ...] -
characteristics of the random samples
:param true_sample_bi_corr_array: [[current, informativity, confusion_potential], ...] -
characteristics of the true sample. If none, nothing happens
:param save_path: where the thing will be saved
:param p_values: p-value map that will be used to save things after the analysis
:return: None
"""
fig = plt.figure()
fig.set_size_inches(30, 20)
# bivect: [0, :] - current; [1, :] - informativity
plt.subplot(211)
plt.title('current through nodes')
bins = np.linspace(
background_curr_deg_conf[0, :].min(),
background_curr_deg_conf[0, :].max(), 100)
if true_sample_bi_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[0, :].min(),
true_sample_bi_corr_array[0, :].min()),
max(background_curr_deg_conf[0, :].max(),
true_sample_bi_corr_array[0, :].max()),
100)
plt.hist(background_curr_deg_conf[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_bi_corr_array is not None:
plt.hist(true_sample_bi_corr_array[0, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(212)
plt.scatter(background_curr_deg_conf[1, :],
background_curr_deg_conf[0, :], color='b', alpha=0.1)
if true_sample_bi_corr_array is not None:
if p_values is not None:
_filter = p_values < p_val_cutoff
anti_filter = np.logical_not(_filter)
plt.scatter(true_sample_bi_corr_array[1, anti_filter],
true_sample_bi_corr_array[0, anti_filter],
color='gray', alpha=0.25)
plt.scatter(true_sample_bi_corr_array[1, _filter],
true_sample_bi_corr_array[0, _filter],
color='r', alpha=0.7)
else:
plt.scatter(true_sample_bi_corr_array[1, :],
true_sample_bi_corr_array[0, :],
color='r', alpha=0.5)
# plt.show()
plt.savefig(save_path.interactome_network_scatterplot)
plt.clf()
def compare_to_blank(blank_model_size: int,
interactome_interface_instance: InteractomeInterface,
p_val: float = 0.05,
sparse_rounds: bool = False,
output_destination: NewOutputs = None) -> Tuple[list, dict]:
"""
Recovers the statistics on the circulation nodes and shows the visual of a circulation system.
There is no issue with using the same interactome interface instance, because they are forked when
threads are generated and will not interfere.
:param blank_model_size: the number of uniprots in the blank model
:param p_val: desired p_value for the returned terms
:param sparse_rounds: if set to a number, sparse computation technique would be used
with the number of rounds equal the integer value of that argument
:param interactome_interface_instance:
:return: None if no significant nodes, the node and group characteristic
dictionaries otherwise
"""
def get_max_for_each_degree(sample_sub_arrray):
# print('debug max_array_shape:', str(sample_sub_arrray.shape))
degrees = np.unique(sample_sub_arrray[1, :])
max_array = []
for degree in degrees:
filter = sample_sub_arrray[1, :] == degree
max_array.append([sample_sub_arrray[0, filter].max(), degree])
m_arr = np.array(max_array)
return m_arr.T
if interactome_interface_instance is None or interactome_interface_instance.node_current == {}:
raise Exception("tried to compare to blanc an empty interface instance")
md5_hash = interactome_interface_instance.md5_hash()
background_sub_array_list = []
max_sub_array_list = []
count = 0
log.info("looking to test against:"
"\t size: %s \t sys_hash: %s \t sparse_rounds: %s" %
(blank_model_size, md5_hash, sparse_rounds))
log.info("samples found to test against:\t %s" %
count_interactome_rand_samp({'size': blank_model_size,
'sys_hash': md5_hash,
'sparse_rounds': sparse_rounds}))
background_sample = find_interactome_rand_samp({'size': blank_model_size,
'sys_hash': md5_hash,
'sparse_rounds': sparse_rounds})
for i, sample in enumerate(background_sample):
_, node_currents = pickle.loads(sample['currents'])
dict_system = interactome_interface_instance.format_node_props(node_currents, limit=0)
background_sub_array = list(dict_system.values())
if np.array(background_sub_array).T.shape[0] < 2:
log.info(background_sub_array)
continue
background_sub_array_list.append(np.array(background_sub_array).T)
# print(np.array(background_sub_array).T.shape)
# pprint(background_sub_array)
max_arr = get_max_for_each_degree(np.array(background_sub_array).T)
max_sub_array_list.append(max_arr)
count = i
# This part declares the pre-operators required for the verification of a
# real sample
background_array = np.concatenate(tuple(background_sub_array_list), axis=1)
max_array = np.concatenate(tuple(max_sub_array_list), axis=1)
node_currents = interactome_interface_instance.node_current
dict_system = interactome_interface_instance.format_node_props(node_currents)
curr_inf_conf_tot = np.array([[int(key)] + list(val) for key, val in list(dict_system.items())]).T
node_ids, query_array = (curr_inf_conf_tot[0, :], curr_inf_conf_tot[(1, 2), :])
log.info("stats on %s samples" % count)
background_density = kde_compute(background_array[(1, 0), :], 50, count)
base_bi_corr = background_array[(0, 1), :]
r_rels = []
r_std_nodes = []
degrees = np.unique(query_array[1, :])
combined_p_vals = np.ones_like(query_array[1, :])
for degree in degrees.tolist():
_filter = query_array[1, :] == degree
entry = query_array[:, _filter]
background_set = background_array[:, background_array[1, :] == degree]
max_current_per_run = get_neighboring_degrees(degree,
max_array,
min_nodes=min_nodes_for_p_val)
p_vals = get_p_val_by_gumbel(entry, max_current_per_run)
combined_p_vals[_filter] = p_vals
samples_scatter_and_hist(background_array, query_array,
save_path=output_destination,
p_values=combined_p_vals)
r_nodes = background_density(query_array[(1, 0), :]) # legacy - unused now
r_nodes = combined_p_vals
for point in query_array.T:
selector = np.logical_and(base_bi_corr[1, :] > point[1]*0.9, base_bi_corr[1, :] < point[1]*1.1)
r_rels.append(point[0] / np.mean(base_bi_corr[0, selector]))
r_std_nodes.append((point[0] - np.mean(base_bi_corr[0, selector])) / np.std(base_bi_corr[0,
selector]))
r_rels = np.array(r_rels)
r_std_nodes = np.array(r_std_nodes)
not_random_nodes = [node_id for node_id in node_ids[r_nodes < p_val].tolist()]
# basically the second element below are the nodes that contribute to the
# information flow through the node that is considered as non-random
log.debug('debug, not random nodes: %s', not_random_nodes)
log.debug('debug bulbs_id_disp_name: %s',
list(interactome_interface_instance.neo4j_id_2_display_name.items())[:10])
node_char_list = [
[int(nr_node_id), interactome_interface_instance.neo4j_id_2_display_name[nr_node_id]] +
dict_system[nr_node_id] + r_nodes[node_ids == float(nr_node_id)].tolist()
for nr_node_id in not_random_nodes]
nodes_dict = np.hstack((node_ids[:, np.newaxis],
r_nodes[:, np.newaxis],
r_rels[:, np.newaxis],
r_std_nodes[:, np.newaxis]))
nodes_dict = dict((node[0], (node[1], node[2], node[3])) for node in nodes_dict.tolist())
nodes_dict = defaultdict(lambda: (1., 0., 0.), nodes_dict) # corresponds to the cases of super low flow - never significant
# TODO: pull the groups corresponding to non-random associations.
# => Will not implement, it's already done by Gephi
return sorted(node_char_list, key=lambda x: x[4]), nodes_dict
# TODO: [weighted inputs] add support for a dict as source_list, not only list
def auto_analyze(source_list: List[List[int]],
output_destinations_list: Union[List[str], None] = None,
desired_depth: int = 24,
processors: int = 0,
background_list: Union[List[int], None] = None,
skip_sampling: bool = False,
p_value_cutoff: float = -1,
) -> None:
"""
Automatically analyzes the itneractome synergetic action of the RNA_seq results
:param source_list: python list of hits for each condition
:param output_destinations_list: list of names for each condition
:param desired_depth: total samples we would like to compare each set of hits with
:param processors: number of processes that will be loaded. as a rule of thumb,
for max performance, use N-1 processors, where N is the number of physical cores on the
machine, which is the default
:param background_list list of physical entities that an experimental method can retrieve
:param skip_sampling: if true, will skip background sampling step
"""
# Multiple re-spawns of threaded processing are incompatbile with scikits.sparse.cholmod
if len(source_list) > 1:
global implicitely_threaded
implicitely_threaded = True
if len(output_destinations_list) != len(source_list):
log.warning('Output destination list has %d elements, whereas %d sources were supplied. '
'Falling back to default output structure')
output_destinations_list = None
if output_destinations_list is None:
output_destinations_list = list(range(len(source_list)))
if processors == 0:
processors = psutil.cpu_count() - 1
log.info("Setting processor count to default: %s" % processors)
# TODO: [Better Sampling]
# check MongoDb to see if we have enough samples of the needed type, adjust the sampling
# noinspection PyTypeChecker
if desired_depth % processors != 0:
desired_depth = desired_depth // processors + 1
else:
desired_depth = desired_depth // processors
if p_value_cutoff < 0:
p_value_cutoff = p_val_cutoff
for hits_list, output_destination in zip(source_list, output_destinations_list):
log.info('Auto analyzing list of interest: %s', len(hits_list))
outputs_subdirs = NewOutputs(output_destination)
interactome_interface = get_interactome_interface(background_up_ids=background_list)
interactome_interface.set_uniprot_source(list(hits_list))
log.debug(" e_p_u_b_i length after UP_source was set: %s",
len(interactome_interface.active_up_sample))
if not skip_sampling:
log.info("spawning a sampler for %s proteins @ %s compops/sec",
len(interactome_interface.active_up_sample), estimated_comp_ops)
# dense analysis
if len(interactome_interface.active_up_sample) < sparse_analysis_threshold:
if not skip_sampling:
log.info('length: %s \t sampling depth: %s \t, estimated round time: %s min',
len(interactome_interface.active_up_sample),
'full',
len(interactome_interface.active_up_sample) ** 2 /
estimated_comp_ops / 60)
spawn_sampler_pool(
processors,
[len(interactome_interface.active_up_sample)],
[desired_depth],
background_set=background_list)
interactome_interface.compute_current_and_potentials()
nr_nodes, p_val_dict = compare_to_blank(
len(interactome_interface.active_up_sample),
interactome_interface,
p_val=p_value_cutoff,
output_destination=outputs_subdirs
)
# sparse analysis
else:
ceiling = min(205, len(interactome_interface.active_up_sample))
sampling_depth = max((ceiling - 5) ** 2 //
len(interactome_interface.active_up_sample),
5)
if not skip_sampling:
log.info('length: %s \t sampling depth: %s \t, estimated round time: %s min',
len(interactome_interface.active_up_sample),
sampling_depth,
len(interactome_interface.active_up_sample) *
sampling_depth / 2 / 60 / estimated_comp_ops)
spawn_sampler_pool(processors,
[len(interactome_interface.active_up_sample)],
[desired_depth],
sparse_rounds=sampling_depth,
background_set=background_list)
log.info('real run characteristics: sys_hash: %s, size: %s, sparse_rounds: %s' %
(interactome_interface.md5_hash(),
len(interactome_interface.active_up_sample), sampling_depth))
interactome_interface.compute_current_and_potentials(sparse_samples=sampling_depth)
nr_nodes, p_val_dict = compare_to_blank(
len(interactome_interface.active_up_sample),
interactome_interface,
p_val=p_value_cutoff,
sparse_rounds=sampling_depth,
output_destination=outputs_subdirs
)
interactome_interface.export_conduction_system(p_val_dict,
output_location=outputs_subdirs.Interactome_GDF_output)
# # old results print-out
# log.info('\t %s \t %s \t %s \t %s \t %s', 'node id',
# 'display name', 'info flow', 'degree', 'p value')
#
# for node in nr_nodes:
# log.info('\t %s \t %s \t %.3g \t %d \t %.3g', *node)
with open(outputs_subdirs.interactome_network_output, 'wt') as output:
writer = csv_writer(output, delimiter='\t')
writer.writerow(['node id', 'display name', 'info flow', 'degree', 'p value'])
for node in nr_nodes:
writer.writerow(node)
# using tabulate
headers = ['node id', 'display name', 'info flow', 'degree', 'p value']
print(tabulate(nr_nodes, headers, tablefmt='simple', floatfmt=".3g"))
if __name__ == "__main__":
# pprinter = PrettyPrinter(indent=4)
# background_set = MatrixGetter(True, False)
# background_set.fast_load()
# dumplist = undump_object(Dumps.RNA_seq_counts_compare)
# MG1.randomly_sample([150], [1], chromosome_specific=15, No_add=True)
# nr_nodes, nr_groups = compare_to_blanc(150, [0.5, 0.6], MG1, p_val=0.9)
# MG1.export_conduction_system()
# for group in nr_groups:
# print group
# for node in nr_nodes:
# print node
# source = get_source_bulbs_ids()
# background_list = get_background_bulbs_ids()
# auto_analyze([source], desired_depth=5, processors=6,
# background_list=background_list, skip_sampling=True)
local_matrix = InteractomeInterface()
local_matrix.fast_load()
# spawn_sampler_pool(3, [50], [3], background_set=None)
spawn_sampler(([50], [3], False, None, 0))
# local_matrix.randomly_sample([195], [10], sparse_rounds=195)
|
bsd-3-clause
| 3,543,690,738,254,189,000
| 40.261194
| 128
| 0.614532
| false
| 3.810476
| false
| false
| false
|
mbylstra/django-wham
|
wham/fields.py
|
1
|
3400
|
from django.db import models
# the following will be required if we want to support south
# ----------------------------------------------------------------
# from south.modelsinspector import add_introspection_rules
#
# add_introspection_rules([], [
# "^wham\.models\.WhamCharField",
# "^wham\.models\.WhamTextField",
# "^wham\.models\.WhamIntegerField",
# "^wham\.models\.WhamFloatField",
# "^wham\.models\.WhamManyToManyField",
# "^wham\.models\.WhamDateField",
# "^wham\.models\.WhamDateTimeField",
# "^wham\.models\.WhamImageUrlField",
# ])
class WhamFieldMixin(object):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_can_lookup = kwargs.pop('wham_can_lookup', False)
self.wham_url_param = kwargs.pop('wham_url_param', None)
self.wham_detailed = kwargs.pop('wham_detailed', False)
return super(WhamFieldMixin, self).__init__(*args, **kwargs)
def get_result_path(self):
result_path = self.wham_result_path
if not result_path:
return (self.attname,)
else:
return result_path
def get_url_param(self):
return self.wham_url_param if self.wham_url_param else self.name
class WhamCharField(WhamFieldMixin, models.TextField):
@property
def type_repr(self):
return 'char'
class WhamTextField(WhamFieldMixin, models.TextField):
@property
def type_repr(self):
return 'text'
class WhamIntegerField(WhamFieldMixin, models.IntegerField):
@property
def type_repr(self):
return 'integer'
class WhamFloatField(WhamFieldMixin, models.FloatField):
@property
def type_repr(self):
return 'float'
class WhamDateField(WhamFieldMixin, models.DateField):
pass
class WhamDateTimeField(WhamFieldMixin, models.DateTimeField):
def __init__(self, *args, **kwargs):
self.wham_format = kwargs.pop('wham_format', None)
return super(WhamDateTimeField, self).__init__(*args, **kwargs)
class WhamManyToManyField(models.ManyToManyField):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_endpoint = kwargs.pop('wham_endpoint', None)
self.wham_results_path = kwargs.pop('wham_results_path', ())
self.wham_pk_param = kwargs.pop('wham_pk_param', None)
self.wham_params = kwargs.pop('wham_params', {})
return super(WhamManyToManyField, self).__init__(*args, **kwargs)
@property
def type_repr(self):
return 'many to many'
class WhamForeignKey(models.ForeignKey):
def __init__(self, *args, **kwargs):
self.wham_result_path = kwargs.pop('wham_result_path', None)
self.wham_endpoint = kwargs.pop('wham_endpoint', None)
self.wham_results_path = kwargs.pop('wham_results_path', ())
self.wham_pk_param = kwargs.pop('wham_pk_param', None)
self.wham_params = kwargs.pop('wham_params', {})
return super(WhamForeignKey, self).__init__(*args, **kwargs)
def get_result_path(self):
result_path = self.wham_result_path
if not result_path:
return (self.name,)
else:
return result_path
@property
def type_repr(self):
return 'foreign key'
class WhamImageUrlField(WhamTextField):
pass
|
mit
| 8,305,047,103,216,442,000
| 29.088496
| 73
| 0.631765
| false
| 3.530633
| false
| false
| false
|
wakiyamap/electrum-mona
|
electrum_mona/gui/qt/password_dialog.py
|
1
|
11128
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import math
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLineEdit, QLabel, QGridLayout, QVBoxLayout, QCheckBox
from electrum_mona.i18n import _
from electrum_mona.plugin import run_hook
from .util import (icon_path, WindowModalDialog, OkButton, CancelButton, Buttons,
PasswordLineEdit)
def check_password_strength(password):
'''
Check the strength of the password entered by the user and return back the same
:param password: password entered by user in New Password
:return: password strength Weak or Medium or Strong
'''
password = password
n = math.log(len(set(password)))
num = re.search("[0-9]", password) is not None and re.match("^[0-9]*$", password) is None
caps = password != password.upper() and password != password.lower()
extra = re.match("^[a-zA-Z0-9]*$", password) is None
score = len(password)*(n + caps + num + extra)/20
password_strength = {0:"Weak",1:"Medium",2:"Strong",3:"Very Strong"}
return password_strength[min(3, int(score))]
PW_NEW, PW_CHANGE, PW_PASSPHRASE = range(0, 3)
class PasswordLayout(object):
titles = [_("Enter Password"), _("Change Password"), _("Enter Passphrase")]
def __init__(self, msg, kind, OK_button, wallet=None, force_disable_encrypt_cb=False):
self.wallet = wallet
self.pw = PasswordLineEdit()
self.new_pw = PasswordLineEdit()
self.conf_pw = PasswordLineEdit()
self.kind = kind
self.OK_button = OK_button
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
if kind == PW_PASSPHRASE:
vbox.addWidget(label)
msgs = [_('Passphrase:'), _('Confirm Passphrase:')]
else:
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
m1 = _('New Password:') if kind == PW_CHANGE else _('Password:')
msgs = [m1, _('Confirm Password:')]
if wallet and wallet.has_password():
grid.addWidget(QLabel(_('Current Password:')), 0, 0)
grid.addWidget(self.pw, 0, 1)
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
grid.addWidget(QLabel(msgs[0]), 1, 0)
grid.addWidget(self.new_pw, 1, 1)
grid.addWidget(QLabel(msgs[1]), 2, 0)
grid.addWidget(self.conf_pw, 2, 1)
vbox.addLayout(grid)
# Password Strength Label
if kind != PW_PASSPHRASE:
self.pw_strength = QLabel()
grid.addWidget(self.pw_strength, 3, 0, 1, 2)
self.new_pw.textChanged.connect(self.pw_changed)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
self.encrypt_cb.setEnabled(False)
grid.addWidget(self.encrypt_cb, 4, 0, 1, 2)
if kind == PW_PASSPHRASE:
self.encrypt_cb.setVisible(False)
def enable_OK():
ok = self.new_pw.text() == self.conf_pw.text()
OK_button.setEnabled(ok)
self.encrypt_cb.setEnabled(ok and bool(self.new_pw.text())
and not force_disable_encrypt_cb)
self.new_pw.textChanged.connect(enable_OK)
self.conf_pw.textChanged.connect(enable_OK)
self.vbox = vbox
def title(self):
return self.titles[self.kind]
def layout(self):
return self.vbox
def pw_changed(self):
password = self.new_pw.text()
if password:
colors = {"Weak":"Red", "Medium":"Blue", "Strong":"Green",
"Very Strong":"Green"}
strength = check_password_strength(password)
label = (_("Password Strength") + ": " + "<font color="
+ colors[strength] + ">" + strength + "</font>")
else:
label = ""
self.pw_strength.setText(label)
def old_password(self):
if self.kind == PW_CHANGE:
return self.pw.text() or None
return None
def new_password(self):
pw = self.new_pw.text()
# Empty passphrases are fine and returned empty.
if pw == "" and self.kind != PW_PASSPHRASE:
pw = None
return pw
def clear_password_fields(self):
for field in [self.pw, self.new_pw, self.conf_pw]:
field.clear()
class PasswordLayoutForHW(object):
def __init__(self, msg, wallet=None):
self.wallet = wallet
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
if wallet and wallet.has_storage_encryption():
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
vbox.addLayout(grid)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
grid.addWidget(self.encrypt_cb, 1, 0, 1, 2)
self.vbox = vbox
def title(self):
return _("Toggle Encryption")
def layout(self):
return self.vbox
class ChangePasswordDialogBase(WindowModalDialog):
def __init__(self, parent, wallet):
WindowModalDialog.__init__(self, parent)
is_encrypted = wallet.has_storage_encryption()
OK_button = OkButton(self)
self.create_password_layout(wallet, is_encrypted, OK_button)
self.setWindowTitle(self.playout.title())
vbox = QVBoxLayout(self)
vbox.addLayout(self.playout.layout())
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(self), OK_button))
self.playout.encrypt_cb.setChecked(is_encrypted)
def create_password_layout(self, wallet, is_encrypted, OK_button):
raise NotImplementedError()
class ChangePasswordDialogForSW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
if not wallet.has_password():
self.playout.encrypt_cb.setChecked(True)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not wallet.has_password():
msg = _('Your wallet is not protected.')
msg += ' ' + _('Use this dialog to add a password to your wallet.')
else:
if not is_encrypted:
msg = _('Your bitcoins are password protected. However, your wallet file is not encrypted.')
else:
msg = _('Your wallet is password protected and encrypted.')
msg += ' ' + _('Use this dialog to change your password.')
self.playout = PasswordLayout(msg=msg,
kind=PW_CHANGE,
OK_button=OK_button,
wallet=wallet,
force_disable_encrypt_cb=not wallet.can_have_keystore_encryption())
def run(self):
try:
if not self.exec_():
return False, None, None, None
return True, self.playout.old_password(), self.playout.new_password(), self.playout.encrypt_cb.isChecked()
finally:
self.playout.clear_password_fields()
class ChangePasswordDialogForHW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not is_encrypted:
msg = _('Your wallet file is NOT encrypted.')
else:
msg = _('Your wallet file is encrypted.')
msg += '\n' + _('Note: If you enable this setting, you will need your hardware device to open your wallet.')
msg += '\n' + _('Use this dialog to toggle encryption.')
self.playout = PasswordLayoutForHW(msg)
def run(self):
if not self.exec_():
return False, None
return True, self.playout.encrypt_cb.isChecked()
class PasswordDialog(WindowModalDialog):
def __init__(self, parent=None, msg=None):
msg = msg or _('Please enter your password')
WindowModalDialog.__init__(self, parent, _("Enter Password"))
self.pw = pw = PasswordLineEdit()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(self), OkButton(self)))
self.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
def run(self):
try:
if not self.exec_():
return
return self.pw.text()
finally:
self.pw.clear()
|
mit
| 7,213,216,465,206,087,000
| 33.993711
| 118
| 0.604421
| false
| 3.961552
| false
| false
| false
|
ntthuy11/CodeFights
|
Arcade/04_Python/01_MeetPython/mexFunction.py
|
1
|
1362
|
# You've just started to study impartial games, and came across an interesting theory. The theory is quite complicated, but
# it can be narrowed down to the following statements: solutions to all such games can be found with the mex function.
# Mex is an abbreviation of minimum excludant: for the given set s it finds the minimum non-negative integer that is not
# present in s.
# You don't yet know how to implement such a function efficiently, so would like to create a simplified version. For the
# given set s and given an upperBound, implement a function that will find its mex if it's smaller than upperBound or
# return upperBound instead.
#
# Example
# For s = [0, 4, 2, 3, 1, 7] and upperBound = 10,
# the output should be
# mexFunction(s, upperBound) = 5.
# 5 is the smallest non-negative integer that is not present in s, and it is smaller than upperBound.
#
# For s = [0, 4, 2, 3, 1, 7] and upperBound = 3,
# the output should be
# mexFunction(s, upperBound) = 3.
# The minimum excludant for the given set is 5, but it's greater than upperBound, so the output should be 3.
def mexFunction(s, upperBound):
found = -1
for i in range(upperBound):
if not i in s:
found = i
break
else:
found = upperBound # this line is what CodeFights asks for
return found
|
mit
| -4,172,678,401,009,438,700
| 45.965517
| 124
| 0.690896
| false
| 3.556136
| false
| false
| false
|
flower-pot/xf-indicator
|
xf_indicator/build_status.py
|
1
|
2244
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Frederic Branczyk fbranczyk@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
from gi.repository import Gtk, GObject
from enum import Enum
class BuildStatus(Enum):
active = (1)
failing = (2)
not_existing = (3)
unknown = (4)
passing = (5)
def __init__(self, number):
self._value_ = number
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
return NotImplemented
|
mit
| 7,255,914,806,074,165,000
| 37.033898
| 79
| 0.666667
| false
| 4.290631
| false
| false
| false
|
rasmusprentow/mvln
|
mvln/test/test_converter.py
|
1
|
1427
|
#converter_test.py
import sys
sys.path.insert(0,"..")
sys.path.insert(0,"mvln")
from mvln import *
import unittest, os, shutil
testfolder = os.getcwd()+ "/__tmptest__/src/testfolder/"
testfolder_dest = os.getcwd()+"/__tmptest__/dst/testfolder/"
testfolder2 = os.getcwd()+ "/__tmptest__/src/testfolder2/"
testfolder2_dest = os.getcwd()+"/__tmptest__/dst/testfolder2/"
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
try:
shutil.rmtree("__tmptest__")
except OSError:
pass
os.mkdir("__tmptest__")
os.mkdir("__tmptest__/dst")
os.mkdir("__tmptest__/src")
os.mkdir(testfolder)
os.mkdir(testfolder2)
f = open(testfolder+"testfile",'w')
f.write("testestest")
f.close()
f = open(testfolder2+"testfile",'w')
f.write("testestest")
f.close()
self.converter = Converter( testfolder + " " + testfolder_dest + "\n" +
testfolder2 + " " + testfolder2_dest + "\n")
def test_getlines(self):
result = self.converter.getLines()
self.assertEqual(result[1], testfolder2 + " " + testfolder2_dest)
def test_convert(self):
result = self.converter.getFiles()
self.assertIsInstance(result[1], MvLnFile)
self.assertEqual(result[1].dst, testfolder2_dest)
self.assertEqual(result[1].src, testfolder2)
self.assertEqual(result[0].dst, testfolder_dest)
self.assertEqual(result[0].src, testfolder)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 1,918,018,458,159,021,300
| 22.8
| 75
| 0.662228
| false
| 2.854
| true
| false
| false
|
ddico/odoo
|
addons/project_timesheet_holidays/models/hr_holidays.py
|
2
|
5160
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class HolidaysType(models.Model):
_inherit = "hr.leave.type"
def _default_project_id(self):
company = self.company_id if self.company_id else self.env.company
return company.leave_timesheet_project_id.id
def _default_task_id(self):
company = self.company_id if self.company_id else self.env.company
return company.leave_timesheet_task_id.id
timesheet_generate = fields.Boolean('Generate Timesheet', default=True, help="If checked, when validating a time off, timesheet will be generated in the Vacation Project of the company.")
timesheet_project_id = fields.Many2one('project.project', string="Project", default=_default_project_id, domain="[('company_id', '=', company_id)]", help="The project will contain the timesheet generated when a time off is validated.")
timesheet_task_id = fields.Many2one('project.task', string="Task for timesheet", default=_default_task_id, domain="[('project_id', '=', timesheet_project_id), ('company_id', '=', company_id)]")
@api.onchange('timesheet_task_id')
def _onchange_timesheet_generate(self):
if self.timesheet_task_id or self.timesheet_project_id:
self.timesheet_generate = True
else:
self.timesheet_generate = False
@api.onchange('timesheet_project_id')
def _onchange_timesheet_project(self):
company = self.company_id if self.company_id else self.env.company
default_task_id = company.leave_timesheet_task_id
if default_task_id and default_task_id.project_id == self.timesheet_project_id:
self.timesheet_task_id = default_task_id
else:
self.timesheet_task_id = False
if self.timesheet_project_id:
self.timesheet_generate = True
else:
self.timesheet_generate = False
@api.constrains('timesheet_generate', 'timesheet_project_id', 'timesheet_task_id')
def _check_timesheet_generate(self):
for holiday_status in self:
if holiday_status.timesheet_generate:
if not holiday_status.timesheet_project_id or not holiday_status.timesheet_task_id:
raise ValidationError(_("Both the internal project and task are required to "
"generate a timesheet for the time off. If you don't want a timesheet, you should "
"leave the internal project and task empty."))
class Holidays(models.Model):
_inherit = "hr.leave"
timesheet_ids = fields.One2many('account.analytic.line', 'holiday_id', string="Analytic Lines")
def _validate_leave_request(self):
""" Timesheet will be generated on leave validation only if a timesheet_project_id and a
timesheet_task_id are set on the corresponding leave type. The generated timesheet will
be attached to this project/task.
"""
# create the timesheet on the vacation project
for holiday in self.filtered(
lambda request: request.holiday_type == 'employee' and
request.holiday_status_id.timesheet_project_id and
request.holiday_status_id.timesheet_task_id):
holiday._timesheet_create_lines()
return super(Holidays, self)._validate_leave_request()
def _timesheet_create_lines(self):
self.ensure_one()
vals_list = []
work_hours_data = self.employee_id.list_work_time_per_day(
self.date_from,
self.date_to,
)
for index, (day_date, work_hours_count) in enumerate(work_hours_data):
vals_list.append(self._timesheet_prepare_line_values(index, work_hours_data, day_date, work_hours_count))
timesheets = self.env['account.analytic.line'].sudo().create(vals_list)
return timesheets
def _timesheet_prepare_line_values(self, index, work_hours_data, day_date, work_hours_count):
self.ensure_one()
return {
'name': "%s (%s/%s)" % (self.holiday_status_id.name or '', index + 1, len(work_hours_data)),
'project_id': self.holiday_status_id.timesheet_project_id.id,
'task_id': self.holiday_status_id.timesheet_task_id.id,
'account_id': self.holiday_status_id.timesheet_project_id.analytic_account_id.id,
'unit_amount': work_hours_count,
'user_id': self.employee_id.user_id.id,
'date': day_date,
'holiday_id': self.id,
'employee_id': self.employee_id.id,
'company_id': self.holiday_status_id.timesheet_task_id.company_id.id or self.holiday_status_id.timesheet_project_id.company_id.id,
}
def action_refuse(self):
""" Remove the timesheets linked to the refused holidays """
result = super(Holidays, self).action_refuse()
timesheets = self.sudo().mapped('timesheet_ids')
timesheets.write({'holiday_id': False})
timesheets.unlink()
return result
|
agpl-3.0
| 7,238,839,353,363,624,000
| 48.142857
| 239
| 0.645736
| false
| 3.836431
| false
| false
| false
|
MycroftAI/adapt
|
adapt/expander.py
|
1
|
10830
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from six.moves import xrange
__author__ = 'seanfitz'
class SimpleGraph(object):
"""This class is to graph connected nodes
Note:
hash is a type that is hashable so independant values and tuples
but not objects, classes or lists.
"""
def __init__(self):
"""init an empty set"""
self.adjacency_lists = {}
def add_edge(self, a, b):
"""Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
"""
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a)
def get_neighbors_of(self, a):
"""This will return the neighbors of the vertex
Args:
a (hash): is the vertex to get the neighbors for
Returns:
[] : a list of neighbors_of 'a'
Will return an empty set if 'a' doesn't exist or has no
neightbors.
"""
return self.adjacency_lists.get(a, set())
def vertex_set(self):
"""This returns a list of vertexes included in graph
Returns:
[] : a list of vertexes include in graph
"""
return list(self.adjacency_lists)
def bronk(r, p, x, graph):
"""This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
"""
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex)
def get_cliques(vertices, graph):
"""get cliques
Args:
verticies (list) : list of the verticies to search for cliques
graph (graph) : a graph used to find the cliques using verticies
Yields:
list: a clique from the graph
"""
for clique in bronk([], vertices, [], graph):
yield clique
def graph_key_from_tag(tag, entity_index):
"""Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
"""
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence'))
class Lattice(object):
"""This manages a list of items or lists
Attributes:
nodes (list) : is a list of items or lists.
This is used to track items and lists that are a part of the
Lattice
"""
def __init__(self):
"""Creates the Lattice with an empty list"""
self.nodes = []
def append(self, data):
"""Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice
"""
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data])
def traverse(self, index=0):
""" This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination.
"""
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield []
class BronKerboschExpander(object):
"""
BronKerboschExpander
Given a list of tagged entities (from the existing entity tagger implementation or another), expand out
valid parse results.
A parse result is considered valid if it contains no overlapping spans.
Since total confidence of a parse result is based on the sum of confidences of the entities, there is no sense
in yielding any potential parse results that are a subset/sequence of a larger valid parse result. By comparing
this concept to that of maximal cliques (https://en.wikipedia.org/wiki/Clique_problem), we can use well known
solutions to the maximal clique problem like the Bron/Kerbosch algorithm (https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm).
By considering tagged entities that do not overlap to be "neighbors", BronKerbosch will yield a set of maximal
cliques that are also valid parse results.
"""
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph
def _sub_expand(self, tags):
"""This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique
"""
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result
def expand(self, tags, clique_scoring_func=None):
"""This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
"""
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse()
|
apache-2.0
| 3,364,695,214,877,201,400
| 34.048544
| 141
| 0.574515
| false
| 4.0577
| false
| false
| false
|
LegionXI/pydarkstar
|
scrub.py
|
1
|
5707
|
"""
Create item database.
"""
import logging
import os
import re
import pydarkstar.logutils
import pydarkstar.scrubbing.ffxiah
import pydarkstar.itemlist
import pydarkstar.options
import pydarkstar.common
class Options(pydarkstar.options.Options):
"""
Reads options from config file, then from command line.
"""
def __init__(self):
super(Options, self).__init__(config='scrub.yaml', description=__doc__)
self.verbose = False # error, info, and debug
self.silent = False # error only
self.stub = 'items' # output file stub
self.overwrite = False # overwrite output
self.backup = False # backup output
self.save = False # save config
self.force = False # redownload
self.pkl = False # save pkl files
self.threads = -1 # cpu threads during download
self.stock01 = 5 # default stock for singles
self.stock12 = 5 # default stock for stacks
self.itemids = [] # a list of item ids
self.urls = [] # a list of category urls
# logging
self.add_argument('--verbose', action='store_true',
help='report debug, info, and error')
self.add_argument('--silent', action='store_true',
help='report error only')
# output
self.add_argument(dest='stub', nargs='?', type=str, default=self.stub,
help='output file stub')
self.add_argument('--overwrite', action='store_true',
help='overwrite output file')
self.add_argument('--backup', action='store_true',
help='backup output file')
self.add_argument('--save', action='store_true',
help='save config file (and exit)')
# scrub parameters
self.add_argument('--force', action='store_true',
help='start from scratch')
self.add_argument('--pkl', action='store_true',
help='save pkl files')
self.add_argument('--threads', type=int, default=self.threads, metavar=self.threads,
help='number of cpu threads to use')
self.add_argument('--urls', type=str, nargs='*', action='append', default=self.urls, metavar='url',
help='a list of category urls')
self.add_argument('--itemids', type=int, nargs='*', action='append', default=self.itemids, metavar='itemids',
help='a list of item ids')
# defaults
self.add_argument('--stock01', type=int, default=self.stock01, metavar=self.stock01,
help='default stock for singles')
self.add_argument('--stock12', type=int, default=self.stock12, metavar=self.stock12,
help='default stock for stacks')
self.exclude('itemids')
self.exclude('urls')
def parse_args(self, args=None):
super(Options, self).parse_args(args)
urls = []
for obj in self.urls:
if isinstance(obj, list):
urls.extend(obj)
else:
urls.append(obj)
self.urls = urls
if not self.urls:
self.urls = None
itemids = []
for obj in self.itemids:
if isinstance(obj, list):
itemids.extend(obj)
else:
itemids.append(obj)
self.itemids = itemids
if not self.itemids:
self.itemids = None
def main():
"""
Main function.
"""
# get options
opts = Options()
opts.parse_args()
pydarkstar.logutils.basicConfig(
verbose=opts.verbose, silent=opts.silent, fname='scrub.log')
logging.info('start')
# log options
opts.log_values(level=logging.INFO)
# save options
if opts.save:
opts.save = False
opts.dump()
return
# check output file name validity
oname = os.path.abspath('{}.csv'.format(re.sub(r'\.csv$', '', opts.stub)))
if not opts.overwrite and not opts.backup:
if os.path.exists(oname):
logging.error('output file already exists!\n\t%s', oname)
logging.error('please use --overwrite or --backup')
exit(-1)
# scub data
scrubber = pydarkstar.scrubbing.ffxiah.FFXIAHScrubber()
scrubber.save = opts.pkl
data = scrubber.scrub(force=opts.force, threads=opts.threads, urls=opts.urls, ids=opts.itemids)
# create item list from data
ilist = pydarkstar.itemlist.ItemList()
for itemid in data:
# singles
try:
price01, sell01 = data[itemid]['median'], True
# do not sell items without a price
if price01 <= 0:
price01, sell01 = None, False
except KeyError:
price01, sell01 = None, False
# stacks
try:
price12, sell12 = data[itemid]['stack price'], True
# do not sell items without a price
if price12 <= 0:
price12, sell12 = None, False
except KeyError:
price12, sell12 = None, False
# the name doesn't really matter
try:
name = data[itemid]['name']
except KeyError:
name=None
ilist.add(itemid, name=name,
price01=price01, stock01=opts.stock01, sell01=sell01, buy01=True,
price12=price12, stock12=opts.stock12, sell12=sell12, buy12=True)
# backup file
if opts.backup:
pydarkstar.common.backup(oname)
# overwrites if exists, but we checked already
ilist.savecsv(oname)
def cleanup():
logging.info('exit\n')
if __name__ == '__main__':
with pydarkstar.logutils.capture():
main()
cleanup()
|
mit
| -6,963,018,258,926,190,000
| 31.061798
| 117
| 0.574908
| false
| 3.914266
| false
| false
| false
|
RickyCook/AliasKeepr
|
aliaskeepr.py
|
1
|
2237
|
#!/usr/bin/env python
import argparse
import os
import re
import shutil
import sys
from ConfigParser import SafeConfigParser
PARSER = argparse.ArgumentParser(description="Import some aliases")
PARSER.add_argument('profile', help="Profile to output config for")
PARSER.add_argument('-c', '--config',
default='~/.akrc',
help="Directory where profiles are stored")
PARSER.add_argument('--init-alias',
default='ak',
help="When using the 'init' profile, the alias "
"name to insert")
ALIAS_RE = re.compile('^[A-Za-z0-9 _-]+$')
def profile_filename(config_dir, profile_name):
return os.path.expanduser('%s/%s.ini' % (config_dir, profile_name))
def main():
args = PARSER.parse_args()
if args.profile == 'init':
write_init_profile(args.config, args.init_alias)
profile_fn = profile_filename(args.config, args.profile)
profile_commands_dir = os.path.expanduser('%s/.%s' % (args.config, args.profile))
sys.stderr.write("Using profile in '%s'\n" % profile_fn)
config = SafeConfigParser()
config.read(profile_fn)
try:
shutil.rmtree(profile_commands_dir, ignore_errors=True)
except OSError:
pass
os.mkdir(profile_commands_dir)
for alias, command in config.items('aliases'):
if not ALIAS_RE.match(alias):
sys.stderr.write("Alias '%s' not allowed; skipped\n" % alias)
continue
if '$@' not in command:
command = '%s "$@"' % command
command_fn = '%s/%s' % (profile_commands_dir, alias)
with open(command_fn, 'w') as handle:
handle.write(command)
print "function '%s' { eval \"$(cat '%s')\" }" % (alias, command_fn)
print
print '# USAGE: eval "$("%s" "%s")"' % (__file__, args.profile)
def write_init_profile(config_dir, init_alias):
try:
os.mkdir(os.path.expanduser(config_dir))
except OSError:
pass
my_abs_path = os.path.abspath(os.path.expanduser(__file__))
with open(profile_filename(config_dir, 'init'), 'w') as handle:
handle.write('[aliases]\n')
handle.write('{init_alias} = eval "$("{my_path}" "$@")"'.format(
init_alias=init_alias,
my_path=my_abs_path,
))
if __name__ == '__main__':
main()
|
mit
| -4,410,401,499,480,091,600
| 25.630952
| 83
| 0.621815
| false
| 3.415267
| true
| false
| false
|
michaelimfeld/notipy-server
|
notipyserver/backends/telegram/userregistration.py
|
1
|
1411
|
"""
`notipyserver` - User-Notification-Framework server
Provides a telegram handler function
for user registration.
:copyright: (c) by Michael Imfeld
:license: MIT, see LICENSE for details
"""
import telegram
from .usermanager import add_user, add_group
def register(bot, update):
"""
Saves the telegram username and the chat_id from the given
update object to a file.
Args:
bot (telegram.Bot): The bot instance.
update (telegram.Update): The message update.
"""
if update.message.chat.type == "group":
recipient_name = update.message.chat.title
register_function = add_group
name = update.message.chat.title
else:
if not update.message.chat.username:
message = "Please setup a telegram username to use this bot."
bot.sendMessage(chat_id=update.message.chat_id, text=message)
return
recipient_name = update.message.chat.username
register_function = add_user
name = update.message.chat.first_name
is_new = register_function(recipient_name, update.message.chat_id)
if is_new:
message = """
Hi {}!
Your registration was *successful* 🎉.
""".format(name).strip()
else:
message = "Already registered!"
bot.sendMessage(
chat_id=update.message.chat_id,
text=message,
parse_mode=telegram.ParseMode.MARKDOWN)
|
mit
| -8,052,298,776,417,543,000
| 27.16
| 73
| 0.653409
| false
| 3.846995
| false
| false
| false
|
tracon/dragontail
|
site_specific/tracon2016/management/commands/setup_tracon2016.py
|
1
|
6630
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
from datetime import datetime, timedelta, date
from django.core.files import File
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from wagtail.wagtailcore.models import Site
from dragontail.content.models import BasicPage, TemplateSettings
class Command(BaseCommand):
args = ''
help = 'Setup example content'
def add_arguments(self, parser):
parser.add_argument('hostname', type=str)
def handle(self, *args, **options):
Setup(hostname=options['hostname']).setup()
class Setup(object):
def __init__(self, hostname):
self.hostname = hostname
def setup(self):
print('NOTE: Setting up Tracon (2016) site at {hostname}'.format(hostname=self.hostname))
self.setup_content()
# self.setup_ads()
# self.setup_blog()
def setup_content(self):
t = now()
is_default_site = not Site.objects.exists()
self.root_page, unused = BasicPage.objects.get_or_create(
slug='index',
defaults=dict(
title='Tracon Tampere-talossa 3.–4. syyskuuta 2016',
depth=0
)
)
self.site, unused = Site.objects.get_or_create(hostname=self.hostname, defaults=dict(
is_default_site=is_default_site,
root_page=self.root_page,
))
self.template_settings, unused = TemplateSettings.objects.get_or_create(
site=self.site,
defaults=dict(
base_template='tracon11_base.jade',
basic_page_template='tracon11_page.jade',
blog_index_template='tracon11_blog_index.jade',
blog_post_template='tracon11_blog_post.jade',
)
)
return
ordering = 0
for page_slug, page_title, child_pages in [
('front-page', 'Tracon Tampere-talossa 3.–4. syyskuuta 2016', []),
('blog', 'Ajankohtaista', []), # pseudo page for menu, actually taken over by blog
('tapahtuma', 'Tapahtuma', [
('tyovoima', 'Vänkäriksi'),
('jarjestyssaannot', 'Järjestyssäännöt'),
('tapahtumapaikka', 'Tapahtumapaikka'),
]),
('ohjelma', 'Ohjelma', [
('ohjelmanjarjestajaksi', 'Ohjelmanjärjestäjäksi'),
]),
('liput', 'Liput', []),
('yhteys', 'Ota yhteyttä!', [
('conitea', 'Järjestäjät'),
('media', 'Tiedotusvälineille'),
('sponsorit', 'Yhteistyökumppaneille'),
])
]:
ordering += 10
parent_page, unused = Page.objects.get_or_create(
site=self.site,
parent=None,
slug=page_slug,
defaults=dict(
title=page_title,
body='Placeholder for {slug}'.format(slug=page_slug),
public_from=t,
visible_from=t,
order=ordering,
)
)
# v2
child_ordering = 0
for child_slug, child_title in child_pages:
child_ordering += 10
child_page, unused = Page.objects.get_or_create(
site=self.site,
parent=parent_page,
slug=child_slug,
defaults=dict(
title=child_title,
body='Placeholder for {slug}'.format(slug=child_slug),
public_from=t,
visible_from=t,
order=child_ordering,
)
)
# v2
if child_page.order == 0:
child_page.order = child_ordering
child_page.save()
front_page = Page.objects.get(site=self.site, slug='front-page')
if not front_page.override_menu_text:
front_page.override_menu_text = 'Etusivu'
# v11
if not front_page.override_page_template:
front_page.override_page_template = 'tracon11_front_page.jade'
if not front_page.page_controller_code or front_page.page_controller_code == 'events.tracommon.views:front_page_controller':
front_page.page_controller_code = 'site_specific.tracommon.views:front_page_controller'
front_page.save()
for path, target in [
('admin', '/admin/'),
]:
redirect, unused = Redirect.objects.get_or_create(
site=self.site,
path=path,
defaults=dict(
target=target
),
)
def setup_ads(self):
for banner_title, banner_url, banner_path in [
('Säätöyhteisö B2 ry', 'http://b2.fi', 'site_specific/tracon11/static/tracon11/img/b2-saatoa2008-wh-200.png'),
]:
try:
Banner.objects.get(sites=self.site, url=banner_url)
except Banner.DoesNotExist:
with open(banner_path, 'rb') as banner_file:
banner = Banner(
title=banner_title,
url=banner_url,
image_file=File(banner_file),
)
banner.save()
banner.sites = [self.site,]
banner.save()
def setup_blog(self):
"""
Set up a stub of the blog.tracon.fi site required by the front page blog box.
"""
blog_site, unused = Site.objects.get_or_create(hostname='blog.tracon.fi', defaults=dict(
name='Traconin blogi'
))
blog_site_settings, unused = SiteSettings.objects.get_or_create(site=blog_site, defaults=dict(
base_template='tracon11_base.jade',
page_template='tracon11_page.jade',
blog_index_template='tracon11_blog_index.jade',
blog_post_template='tracon11_blog_post.jade',
))
for category_slug, category_title in [
('conzine', 'Conzine'),
('palaute', 'Palaute'),
('jarjestaminen', 'Traconin järjestäminen'),
]:
BlogCategory.objects.get_or_create(
site=blog_site,
slug=category_slug,
defaults=dict(
title=category_title,
)
)
|
mit
| 6,052,349,529,121,051,000
| 33.581152
| 132
| 0.521575
| false
| 3.931548
| false
| false
| false
|
mdoucet/reflectivity_ui
|
test/notebooks/event_reduction.py
|
1
|
18262
|
import sys
import time
import multiprocessing
import mantid.simpleapi as api
import numpy as np
from reflectivity_ui.interfaces.data_handling import instrument
def load_data(run="REF_M_30769"):
if run.startswith("/SNS"):
filepath = run
else:
filepath = '/SNS/REF_M/IPTS-21391/nexus/' + run + '.nxs.h5'
_instrument = instrument.Instrument()
ws_list = _instrument.load_data(filepath)
_n_counts = 0
_high_count_ws = None
for _ws in ws_list:
_i_counts = _ws.getNumberEvents()
if _n_counts < _i_counts:
_n_counts = _i_counts
_high_count_ws = _ws
return _high_count_ws
def get_peak(center, width, max_pixel=None):
peak_min = int(round(float(center) - float(width)/2.0))
peak_max = int(round(float(center) + float(width)/2.0+1.0))
if max_pixel is not None:
if peak_min < 0: peak_min = 0
if peak_max >= max_pixel: peak_max = max_pixel-1
return peak_min, peak_max
def get_wl_range(ws):
"""
Determine TOF range from the data
:param workspace ws: workspace to work with
"""
run_object = ws.getRun()
wl = run_object.getProperty('LambdaRequest').value[0]
chopper_speed = run_object.getProperty('SpeedRequest1').value[0]
# Cut the edges by using a width of 2.6 A
wl_min = (wl - 1.3 * 60.0 / chopper_speed)
wl_max = (wl + 1.3 * 60.0 / chopper_speed)
return [wl_min, wl_max]
def get_q_binning(q_min=0.001, q_max=0.15, q_step=-0.02):
if q_step > 0:
n_steps = np.int((q_max-q_min)/q_step)
return q_min + np.asarray([q_step * i for i in range(n_steps)])
else:
_step = 1.0+np.abs(q_step)
n_steps = np.int(np.log(q_max/q_min)/np.log(_step))
return q_min * np.asarray([_step**i for i in range(n_steps)])
def quicknxs_scale(theta, peak, low_res, norm_peak, norm_low_res):
"""
Scaling factor to multiply by to be compatible with QuickNXS 1.0.
"""
quicknxs_scale = (float(norm_peak[1])-float(norm_peak[0])) * (float(norm_low_res[1])-float(norm_low_res[0]))
quicknxs_scale /= (float(peak[1])-float(peak[0])) * (float(low_res[1])-float(low_res[0]))
_scale = 0.005 / np.sin(theta) if theta > 0.0002 else 1.0
quicknxs_scale *= _scale
return quicknxs_scale
class EventReflectivity(object):
"""
Event based reflectivit calculation.
List of items to be taken care of outside this class:
- Edge points cropping
- Calculate theta using SANGLE or not
- Angle offset
- Direct pixel overwrite
- DANGLE0 overwrite
Options that are left out:
- rounding up pixel to assign the proper Qx
"""
QX_VS_QZ = 0
KZI_VS_KZF = 1
DELTA_KZ_VS_QZ = 3
INSTRUMENT_4A = 0
INSTRUMENT_4B = 1
def __init__(self, scattering_workspace, direct_workspace,
signal_peak, signal_bck, norm_peak, norm_bck,
specular_pixel, signal_low_res, norm_low_res,
q_min=None, q_step=-0.02, q_max=None,
tof_range=None, theta=1.0, sample_length=10,
instrument=None):
"""
Pixel ranges include the min and max pixels.
:param scattering_workspace: Mantid workspace containing the reflected data
:param direct_workspace: Mantid workspace containing the direct beam data [if None, normalization won't be applied]
:param signal_peak: pixel min and max for the specular peak
:param signal_bck: pixel range of the background [if None, the background won't be subtracted]
:param norm_peak: pixel range of the direct beam peak
:param norm_bck: pixel range of the direct beam background [if None, the background won't be subtracted]
:param specular_pixel: pixel of the specular peak
:param signal_low_res: pixel range of the specular peak out of the scattering plane
:param norm_low_res: pixel range of the direct beam out of the scattering plane
:param q_min: value of lowest q point
:param q_step: step size in Q. Enter a negative value to get a log scale
:param q_min: value of largest q point
:param tof_range: TOF range,or None
:param theta: theta scattering angle in radians
:param sample_length: sample size, for resolution calculation
"""
if instrument in [self.INSTRUMENT_4A, self.INSTRUMENT_4B]:
self.instrument = instrument
else:
self.instrument = self.INSTRUMENT_4A
self.signal_peak = signal_peak
self.signal_bck = signal_bck
self.norm_peak = norm_peak
self.norm_bck = norm_bck
self.signal_low_res = signal_low_res
self.norm_low_res = norm_low_res
self.specular_pixel = specular_pixel
self.q_min = q_min
self.q_max = q_max
self.q_step = q_step
self.tof_range = tof_range
self.theta = theta
self.sample_length = sample_length
self._offspec_x_bins = None
self._offspec_z_bins = None
# Process workspaces
if self.tof_range is not None:
self._ws_sc = api.CropWorkspace(InputWorkspace=scattering_workspace,
XMin=tof_range[0], XMax=tof_range[1],
OutputWorkspace='_'+str(scattering_workspace))
self._ws_db = api.CropWorkspace(InputWorkspace=direct_workspace,
XMin=tof_range[0], XMax=tof_range[1],
OutputWorkspace='_'+str(direct_workspace))
else:
self._ws_sc = scattering_workspace
self._ws_db = direct_workspace
# Extract meta data
self.extract_meta_data()
def extract_meta_data(self):
# Set up basic data
self.n_x = int(self._ws_sc.getInstrument().getNumberParameter("number-of-x-pixels")[0])
self.n_y = int(self._ws_sc.getInstrument().getNumberParameter("number-of-y-pixels")[0])
self.pixel_width = float(self._ws_sc.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0
if self.instrument == self.INSTRUMENT_4B:
self.extract_meta_data_4B()
else:
self.extract_meta_data_4A()
h = 6.626e-34 # m^2 kg s^-1
m = 1.675e-27 # kg
self.constant = 1e-4 * m * self.source_detector_distance / h
if self.tof_range is None:
self.wl_range = get_wl_range(self._ws_sc)
else:
self.wl_range = [self.tof_range[0] / self.constant, self.tof_range[1] / self.constant]
if self.q_min is None:
self.q_min = 4.0*np.pi/self.wl_range[1] * np.sin(self.theta)
if self.q_max is None:
self.q_max = 4.0*np.pi/self.wl_range[0] * np.sin(self.theta)
# Q binning to use
self.q_bins = get_q_binning(self.q_min, self.q_max, self.q_step)
def extract_meta_data_4A(self):
run_object = self._ws_sc.getRun()
self.det_distance = run_object['SampleDetDis'].getStatistics().mean
source_sample_distance = run_object['ModeratorSamDis'].getStatistics().mean
if not run_object['SampleDetDis'].units in ['m', 'meter']:
self.det_distance /= 1000.0
if not run_object['ModeratorSamDis'].units in ['m', 'meter']:
source_sample_distance /= 1000.0
self.source_detector_distance = source_sample_distance + self.det_distance
def extract_meta_data_4B(self):
self.det_distance = 1.83
source_sample_distance = 13.63
self.source_detector_distance = source_sample_distance + self.det_distance
def __repr__(self):
output = "sample-det: %s\n" % self.det_distance
output += "pixel: %s\n" % self.pixel_width
output += "WL: %s %s\n" % (self.wl_range[0], self.wl_range[1])
output += "Q: %s %s\n" % (self.q_min, self.q_max)
output += "Theta = %s" % self.theta
return output
def specular(self, q_summing=True):
# Scattering data
refl, d_refl = self._reflectivity(self._ws_sc, peak_position=self.specular_pixel,
peak=self.signal_peak, low_res=self.signal_low_res,
theta=self.theta, q_summing=q_summing)
norm, d_norm = self._reflectivity(self._ws_db, peak_position=0,
peak=self.norm_peak, low_res=self.norm_low_res,
theta=self.theta, q_summing=False)
if False and self.norm_bck is not None:
norm_bck, d_norm_bck = self._norm_bck_in_pixel()
norm -= norm_bck
d_norm = np.sqrt(d_norm**2 + d_norm_bck**2)
db_bins = norm>0
if False and self.signal_bck is not None:
refl_bck, d_refl_bck = self._signal_bck_in_pixel()
refl -= refl_bck
d_refl = np.sqrt(d_refl**2 + d_refl_bck**2)
self.refl_bck = refl_bck[db_bins]/norm[db_bins]
self.d_refl_bck = np.sqrt(d_refl_bck[db_bins]**2 / norm[db_bins]**2 + refl_bck[db_bins]**2 * d_norm[db_bins]**2 / norm[db_bins]**4)
refl[db_bins] = refl[db_bins]/norm[db_bins]
d_refl[db_bins] = np.sqrt(d_refl[db_bins]**2 / norm[db_bins]**2 + refl[db_bins]**2 * d_norm[db_bins]**2 / norm[db_bins]**4)
self.refl = refl
self.d_refl = d_refl
return self.q_bins, refl, d_refl
def _signal_bck_in_pixel(self, normalize_to_single_pixel=False, q_bins=None):
q_bins = self.q_bins if q_bins is None else q_bins
refl_bck, d_refl_bck = self._reflectivity(self._ws_sc, peak_position=0, q_bins=q_bins,
peak=self.signal_bck, low_res=self.signal_low_res,
theta=self.theta, q_summing=False)
_pixel_area = (self.signal_bck[1]-self.signal_bck[0]+1.0)
if not normalize_to_single_pixel:
_pixel_area /= (self.signal_peak[1]-self.signal_peak[0]+1.0)
refl_bck /= _pixel_area
d_refl_bck /= _pixel_area
return refl_bck, d_refl_bck
def _norm_bck_in_pixel(self, q_bins=None):
if q_bins is None:
q_bins = self.q_bins
norm_bck, d_norm_bck = self._reflectivity(self._ws_db, peak_position=0, q_bins=q_bins,
peak=self.norm_bck, low_res=self.norm_low_res,
theta=self.theta, q_summing=False)
_pixel_area = (self.norm_bck[1]-self.norm_bck[0]+1.0) / (self.norm_peak[1]-self.norm_peak[0]+1.0)
norm_bck /= _pixel_area
d_norm_bck /= _pixel_area
return norm_bck, d_norm_bck
def slice(self, x_min=0.002, x_max=0.004, x_bins=None, z_bins=None,
refl=None, d_refl=None, normalize=False):
x_bins = self._offspec_x_bins if x_bins is None else x_bins
z_bins = self._offspec_z_bins if z_bins is None else z_bins
refl = self._offspec_refl if refl is None else refl
d_refl = self._offspec_d_refl if d_refl is None else d_refl
i_min = len(x_bins[x_bins<x_min])
i_max = len(x_bins[x_bins<x_max])
_spec = np.sum(refl[i_min:i_max], axis=0)
_d_spec = np.sum( (d_refl[i_min:i_max])**2, axis=0)
_d_spec = np.sqrt(_d_spec)
if normalize:
_spec /= (i_max-i_min)
_d_spec /= (i_max-i_min)
return z_bins, _spec, _d_spec
def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_summing=False):
"""
Assumes that the input workspace is normalized by proton charge.
"""
charge = ws.getRun()['gd_prtn_chrg'].value
_q_bins = self.q_bins if q_bins is None else q_bins
refl = np.zeros(len(_q_bins)-1)
_pixel_width = self.pixel_width if q_summing else 0.0
for i in range(low_res[0], int(low_res[1]+1)):
for j in range(peak[0], int(peak[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
if evt_list.getNumberEvents() == 0:
continue
wl_list = evt_list.getTofs() / self.constant
x_distance = _pixel_width * (peak_position - j)
delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0
qz=4.0*np.pi/wl_list * np.sin(theta + delta_theta_f) * np.cos(delta_theta_f)
_counts, _ = np.histogram(qz, bins=_q_bins)
refl += _counts
d_refl_sq = np.sqrt(refl) / charge
refl /= charge
return refl, d_refl_sq
def _get_events(self, ws, peak, low_res):
"""
Return an array of wavelengths for a given workspace.
"""
wl_events = np.asarray([])
for i in range(low_res[0], int(low_res[1]+1)):
for j in range(peak[0], int(peak[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
wl_list = evt_list.getTofs() / self.constant
wl_events = np.concatenate((wl_events, wl_list))
return wl_events
def off_specular(self, x_axis=None, x_min=-0.015, x_max=0.015, x_npts=50,
z_min=None, z_max=None, z_npts=-120, bck_in_q=None):
"""
Compute off-specular
:param x_axis: Axis selection
:param x_min: Min value on x-axis
:param x_max: Max value on x-axis
:param x_npts: Number of points in x (negative will produce a log scale)
:param z_min: Min value on z-axis (if none, default Qz will be used)
:param z_max: Max value on z-axis (if none, default Qz will be used)
:param z_npts: Number of points in z (negative will produce a log scale)
"""
# Z axis binning
qz_bins = self.q_bins
if z_min is not None and z_max is not None:
if z_npts < 0:
qz_bins = np.logspace(np.log10(z_min), np.log10(z_max), num=np.abs(z_npts))
else:
qz_bins = np.linspace(z_min, z_max, num=z_npts)
# X axis binning
if x_npts > 0:
qx_bins = np.linspace(x_min, x_max, num=x_npts)
else:
qx_bins = np.logspace(np.log10(x_min), np.log10(x_max), num=np.abs(x_npts))
wl_events = self._get_events(self._ws_db, self.norm_peak, self.norm_low_res)
wl_dist, wl_bins = np.histogram(wl_events, bins=60)
wl_middle = [(wl_bins[i+1]+wl_bins[i])/2.0 for i in range(len(wl_bins)-1)]
_refl, _d_refl = self._off_specular(self._ws_sc, wl_dist, wl_middle, qx_bins, qz_bins,
self.specular_pixel, self.theta, x_axis=x_axis)
db_charge = self._ws_db.getRun()['gd_prtn_chrg'].value
_refl *= db_charge * (wl_bins[1]-wl_bins[0])
_d_refl *= db_charge * (wl_bins[1]-wl_bins[0])
# Background
if self.signal_bck:
if bck_in_q is None:
print("Not implemented")
#refl_bck, d_refl_bck = self._signal_bck_in_pixel(normalize_to_single_pixel=True, q_bins=qz_bins)
else:
_, refl_bck, d_refl_bck = self.slice(bck_in_q[0], bck_in_q[1],
x_bins=qx_bins, z_bins=qz_bins,
refl=_refl, d_refl=_d_refl,
normalize=True)
_refl -= refl_bck
_d_refl = np.sqrt(_d_refl**2 + d_refl_bck**2)
self._offspec_x_bins = qx_bins
self._offspec_z_bins = qz_bins
self._offspec_refl = _refl
self._offspec_d_refl = _d_refl
return qx_bins, qz_bins, _refl, _d_refl
def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, theta, x_axis=None):
charge = ws.getRun()['gd_prtn_chrg'].value
refl = np.zeros([len(x_bins)-1, len(z_bins)-1])
counts = np.zeros([len(x_bins)-1, len(z_bins)-1])
for j in range(0, self.n_x):
wl_list = np.asarray([])
for i in range(self.signal_low_res[0], int(self.signal_low_res[1]+1)):
if self.instrument == self.INSTRUMENT_4A:
pixel = j * self.n_y + i
else:
pixel = i * self.n_y + j
evt_list = ws.getSpectrum(pixel)
wl_events = evt_list.getTofs() / self.constant
wl_list = np.concatenate((wl_events, wl_list))
k = 2.0 * np.pi / wl_list
wl_weights = 1.0/np.interp(wl_list, wl_bins, wl_dist, np.inf, np.inf)
x_distance = float(peak_position-j) * self.pixel_width
delta_theta_f = np.arctan(x_distance / self.det_distance)
theta_f = theta + delta_theta_f
qz = k * (np.sin(theta_f) + np.sin(theta))
qx = k * (np.cos(theta_f) - np.cos(theta))
ki_z = k * np.sin(theta)
kf_z = k * np.sin(theta_f)
_x = qx
_z = qz
if x_axis == EventReflectivity.DELTA_KZ_VS_QZ:
_x = (ki_z - kf_z)
elif x_axis == EventReflectivity.KZI_VS_KZF:
_x = ki_z
_z = kf_z
histo_weigths = wl_weights * _z / wl_list
_counts, _, _ = np.histogram2d(_x, _z, bins=[x_bins, z_bins], weights=histo_weigths)
refl += _counts
_counts, _, _ = np.histogram2d(_x, _z, bins=[x_bins, z_bins])
counts += _counts
bin_size = z_bins[1] - z_bins[0]
d_refl_sq = refl / np.sqrt(counts) / charge / bin_size
refl /= charge * bin_size
return refl, d_refl_sq
|
apache-2.0
| -5,111,205,279,837,215,000
| 41.078341
| 143
| 0.544464
| false
| 3.166638
| false
| false
| false
|
hajicj/safire
|
scripts/profile-training.py
|
1
|
3750
|
#!/usr/bin/env python
"""Testing script for the loader-setup-learner scenario. Runs a miniature
experiment."""
import argparse
import cProfile
import logging
import os
import pstats
import StringIO
from gensim import corpora
from safire.data.loaders import MultimodalDatasetLoader
from safire.learning.models.logistic_regression import LogisticRegression
from safire.learning.learners.base_sgd_learner import BaseSGDLearner
def profile_run(learner, model_handle, dataset):
pr = cProfile.Profile()
pr.enable()
learner.run(model_handle, dataset)
pr.disable()
s = StringIO.StringIO()
sortby='tottime'
ps = pstats.Stats(pr, stream = s).sort_stats(sortby)
ps.print_stats(.1)
return s.getvalue()
def main(args):
serializer = corpora.MmCorpus
if args.serializer:
if args.serializer == 'SvmLight':
serializer = corpora.SvmLightCorpus
elif args.serializer == 'Blei':
serializer = corpora.BleiCorpus
elif args.serializer == 'Low':
serializer = corpora.LowCorpus
elif serializer == 'Mm':
serializer = corpora.MmCorpus
logging.info('Initializing loader...')
loader = MultimodalDatasetLoader(args.root, args.name,
text_serializer=serializer)
logging.info('Loading dataset...')
dataset = loader.load(text_infix=args.text_label, img_infix=args.img_label)
dataset.set_mode(1)
logging.info('Setting up model...')
model_handle = LogisticRegression.setup(dataset, batch_size=args.batch_size)
logging.info('Setting up learner...')
learner = BaseSGDLearner(n_epochs=args.n_epochs, b_size=args.batch_size,
validation_frequency=args.validation_frequency)
logging.info('Running learner with profiling...')
profiler_results = profile_run(learner, model_handle, dataset)
print profiler_results
def build_argument_parser():
parser = argparse.ArgumentParser(description = __doc__, add_help=True)
parser.add_argument('-r', '--root', required=True,
help='The root dataset directory, passed to Loader.')
parser.add_argument('-n', '--name', required=True,
help='The name passed to Loader.')
parser.add_argument('--text_label', default=None, help='Text corpus label.')
parser.add_argument('--img_label', default=None, help='Image corpus label.')
parser.add_argument('-b', '--batch_size', type=int, default=1,
help='SGD batch size')
parser.add_argument('-e', '--n_epochs', type=int, default=5,
help='Number of SGD epochs.')
parser.add_argument('-f', '--validation_frequency', type=int, default=3,
help='Validation will be run once every -v batches.')
parser.add_argument('--serializer', help='Use this gensim.corpora class'+
' to load the serialized text corpora. Accepts: Mm,'+
' Blei, SVMlight, Low; defaults to MmCorpus')
parser.add_argument('-v', '--verbose', action='store_true',
help='Will output INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Will output DEBUG messages.')
return parser
def _set_logging(args):
level = logging.WARN
if args.debug:
level = logging.DEBUG
elif args.verbose:
level = logging.INFO
logging.basicConfig(format='%(levelname)s : %(message)s', level=level)
####################################################
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
_set_logging(args)
main(args)
|
gpl-3.0
| 4,754,184,727,170,040,000
| 30.25
| 80
| 0.623467
| false
| 4.067245
| false
| false
| false
|
peter17/pijnu
|
test/timer.py
|
1
|
1850
|
# -*- coding: utf8 -*-
from __future__ import print_function
'''
Copyright 2009 Denis Derman <denis.spir@gmail.com> (former developer)
Copyright 2011-2012 Peter Potrowl <peter017@gmail.com> (current developer)
This file is part of Pijnu.
Pijnu is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pijnu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Pijnu. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Overall timer func -- just case needed
'''
from time import time
__all__ = ['timer']
def timer(n, f, *args, **kw_args):
t0 = time()
for i in range(n):
f(*args, **kw_args)
t = time() - t0
arg_str = ','.join(repr(arg) for arg in args)
kw_arg_str = (',' + str(kw_args)[1:-1]) if kw_args else ''
print("%s(%s%s) %s time(s) <--> %0.3f s" \
% (f.__name__, arg_str, kw_arg_str, n, t))
########## test ##########
if __name__ == "__main__":
def sum2(x, y):
return x + y
timer(100000, sum2, 2, 3)
from math import sqrt
def mean(seq, geom=False):
def sum(seq):
sum = 0
for x in seq:
sum += x
return sum
if geom:
squares = (x * x for x in seq)
return sqrt(sum(squares))
else:
return sum(seq) / len(seq)
seq = [1, 2, 3, 4, 5, 6, 7, 8, 9]
timer(100000, mean, seq)
timer(100000, mean, seq, geom=True)
|
gpl-3.0
| 4,233,844,960,566,852,600
| 26.61194
| 74
| 0.603243
| false
| 3.357532
| false
| false
| false
|
AlexanderSk/fail2ban
|
fail2ban/server/jail.py
|
1
|
6789
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
__license__ = "GPL"
import Queue, logging
from .actions import Actions
# Gets the instance of the logger.
logSys = logging.getLogger(__name__)
class Jail:
"""Fail2Ban jail, which manages a filter and associated actions.
The class handles the initialisation of a filter, and actions. It's
role is then to act as an interface between the filter and actions,
passing bans detected by the filter, for the actions to then act upon.
Parameters
----------
name : str
Name assigned to the jail.
backend : str
Backend to be used for filter. "auto" will attempt to pick
the most preferred backend method. Default: "auto"
db : Fail2BanDb
Fail2Ban persistent database instance. Default: `None`
Attributes
----------
name
database
filter
actions
idle
status
"""
#Known backends. Each backend should have corresponding __initBackend method
# yoh: stored in a list instead of a tuple since only
# list had .index until 2.6
_BACKENDS = ['pyinotify', 'gamin', 'polling', 'systemd']
def __init__(self, name, backend = "auto", db=None):
self.__db = db
# 26 based on iptable chain name limit of 30 less len('f2b-')
if len(name) >= 26:
logSys.warning("Jail name %r might be too long and some commands "
"might not function correctly. Please shorten"
% name)
self.__name = name
self.__queue = Queue.Queue()
self.__filter = None
logSys.info("Creating new jail '%s'" % self.name)
self._setBackend(backend)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
def _setBackend(self, backend):
backend = backend.lower() # to assure consistent matching
backends = self._BACKENDS
if backend != 'auto':
# we have got strict specification of the backend to use
if not (backend in self._BACKENDS):
logSys.error("Unknown backend %s. Must be among %s or 'auto'"
% (backend, backends))
raise ValueError("Unknown backend %s. Must be among %s or 'auto'"
% (backend, backends))
# so explore starting from it till the 'end'
backends = backends[backends.index(backend):]
for b in backends:
initmethod = getattr(self, '_init%s' % b.capitalize())
try:
initmethod()
if backend != 'auto' and b != backend:
logSys.warning("Could only initiated %r backend whenever "
"%r was requested" % (b, backend))
else:
logSys.info("Initiated %r backend" % b)
self.__actions = Actions(self)
return # we are done
except ImportError, e:
# Log debug if auto, but error if specific
logSys.log(
logging.DEBUG if backend == "auto" else logging.ERROR,
"Backend %r failed to initialize due to %s" % (b, e))
# log error since runtime error message isn't printed, INVALID COMMAND
logSys.error(
"Failed to initialize any backend for Jail %r" % self.name)
raise RuntimeError(
"Failed to initialize any backend for Jail %r" % self.name)
def _initPolling(self):
from filterpoll import FilterPoll
logSys.info("Jail '%s' uses poller" % self.name)
self.__filter = FilterPoll(self)
def _initGamin(self):
# Try to import gamin
from filtergamin import FilterGamin
logSys.info("Jail '%s' uses Gamin" % self.name)
self.__filter = FilterGamin(self)
def _initPyinotify(self):
# Try to import pyinotify
from filterpyinotify import FilterPyinotify
logSys.info("Jail '%s' uses pyinotify" % self.name)
self.__filter = FilterPyinotify(self)
def _initSystemd(self): # pragma: systemd no cover
# Try to import systemd
from filtersystemd import FilterSystemd
logSys.info("Jail '%s' uses systemd" % self.name)
self.__filter = FilterSystemd(self)
@property
def name(self):
"""Name of jail.
"""
return self.__name
@property
def database(self):
"""The database used to store persistent data for the jail.
"""
return self.__db
@property
def filter(self):
"""The filter which the jail is using to monitor log files.
"""
return self.__filter
@property
def actions(self):
"""Actions object used to manage actions for jail.
"""
return self.__actions
@property
def idle(self):
"""A boolean indicating whether jail is idle.
"""
return self.filter.idle or self.actions.idle
@idle.setter
def idle(self, value):
self.filter.idle = value
self.actions.idle = value
@property
def status(self):
"""The status of the jail.
"""
return [
("Filter", self.filter.status),
("Actions", self.actions.status),
]
def putFailTicket(self, ticket):
"""Add a fail ticket to the jail.
Used by filter to add a failure for banning.
"""
self.__queue.put(ticket)
if self.database is not None:
self.database.addBan(self, ticket)
def getFailTicket(self):
"""Get a fail ticket from the jail.
Used by actions to get a failure for banning.
"""
try:
return self.__queue.get(False)
except Queue.Empty:
return False
def start(self):
"""Start the jail, by starting filter and actions threads.
Once stated, also queries the persistent database to reinstate
any valid bans.
"""
self.filter.start()
self.actions.start()
# Restore any previous valid bans from the database
if self.database is not None:
for ticket in self.database.getBansMerged(
jail=self, bantime=self.actions.getBanTime()):
if not self.filter.inIgnoreIPList(ticket.getIP()):
self.__queue.put(ticket)
logSys.info("Jail '%s' started" % self.name)
def stop(self):
"""Stop the jail, by stopping filter and actions threads.
"""
self.filter.stop()
self.actions.stop()
self.filter.join()
self.actions.join()
logSys.info("Jail '%s' stopped" % self.name)
def is_alive(self):
"""Check jail "is_alive" by checking filter and actions threads.
"""
return self.filter.is_alive() or self.actions.is_alive()
|
gpl-2.0
| 600,484,904,292,077,600
| 28.262931
| 98
| 0.689645
| false
| 3.263942
| false
| false
| false
|
cprov/snapcraft
|
snapcraft/internal/elf.py
|
2
|
26365
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import glob
import logging
import os
import re
import shutil
import subprocess
import tempfile
from typing import Dict, FrozenSet, List, Set, Sequence, Tuple, Union # noqa
import elftools.elf.elffile
from pkg_resources import parse_version
from snapcraft import file_utils
from snapcraft.internal import common, errors, os_release, repo
logger = logging.getLogger(__name__)
class NeededLibrary:
"""Represents an ELF library version."""
def __init__(self, *, name: str) -> None:
self.name = name
self.versions = set() # type: Set[str]
def add_version(self, version: str) -> None:
self.versions.add(version)
ElfArchitectureTuple = Tuple[str, str, str]
ElfDataTuple = Tuple[
ElfArchitectureTuple, str, str, Dict[str, NeededLibrary], bool
] # noqa: E501
SonameCacheDict = Dict[Tuple[ElfArchitectureTuple, str], str]
# Old pyelftools uses byte strings for section names. Some data is
# also returned as bytes, which is handled below.
if parse_version(elftools.__version__) >= parse_version("0.24"):
_DYNAMIC = ".dynamic" # type: Union[str, bytes]
_GNU_VERSION_R = ".gnu.version_r" # type: Union[str, bytes]
_INTERP = ".interp" # type: Union[str, bytes]
else:
_DYNAMIC = b".dynamic"
_GNU_VERSION_R = b".gnu.version_r"
_INTERP = b".interp"
class SonameCache:
"""A cache for sonames."""
def __getitem__(self, key):
return self._soname_paths[key]
def __setitem__(self, key, item):
# Initial API error checks
if not isinstance(key, tuple):
raise EnvironmentError(
"The key for SonameCache has to be a (arch, soname) tuple."
)
if not isinstance(key[0], tuple) or len(key[0]) != 3:
raise EnvironmentError(
"The first element of the key needs to of type ElfArchitectureTuple."
)
if not isinstance(key[1], str):
raise EnvironmentError(
"The second element of the key needs to be "
"of type str representing the soname."
)
self._soname_paths[key] = item
def __contains__(self, key):
return key in self._soname_paths
def __init__(self):
"""Initialize a cache for sonames"""
self._soname_paths = dict() # type: SonameCacheDict
def reset_except_root(self, root):
"""Reset the cache values that aren't contained within root."""
new_soname_paths = dict() # type: SonameCacheDict
for key, value in self._soname_paths.items():
if value is not None and value.startswith(root):
new_soname_paths[key] = value
self._soname_paths = new_soname_paths
class Library:
"""Represents the SONAME and path to the library."""
def __init__(
self,
*,
soname: str,
path: str,
root_path: str,
core_base_path: str,
arch: ElfArchitectureTuple,
soname_cache: SonameCache
) -> None:
self.soname = soname
# We need to always look for the soname inside root first,
# and after exhausting all options look in core_base_path.
if path.startswith(root_path):
self.path = path
else:
self.path = _crawl_for_path(
soname=soname,
root_path=root_path,
core_base_path=core_base_path,
arch=arch,
soname_cache=soname_cache,
)
if not self.path and path.startswith(core_base_path):
self.path = path
# Required for libraries on the host and the fetching mechanism
if not self.path:
self.path = path
system_libs = _get_system_libs()
if soname in system_libs:
self.system_lib = True
else:
self.system_lib = False
# self.path has the correct resulting path.
if self.path.startswith(core_base_path):
self.in_base_snap = True
else:
self.in_base_snap = False
def _crawl_for_path(
*,
soname: str,
root_path: str,
core_base_path: str,
arch: ElfArchitectureTuple,
soname_cache: SonameCache
) -> str:
# Speed things up and return what was already found once.
if (arch, soname) in soname_cache:
return soname_cache[arch, soname]
logger.debug("Crawling to find soname {!r}".format(soname))
for path in (root_path, core_base_path):
if not os.path.exists(path):
continue
for root, directories, files in os.walk(path):
for file_name in files:
if file_name == soname:
file_path = os.path.join(root, file_name)
if ElfFile.is_elf(file_path):
# We found a match by name, anyway. Let's verify that
# the architecture is the one we want.
elf_file = ElfFile(path=file_path)
if elf_file.arch == arch:
soname_cache[arch, soname] = file_path
return file_path
# If not found we cache it too
soname_cache[arch, soname] = None
return None
# Old versions of pyelftools return bytes rather than strings for
# certain APIs. So we pass those values through this function to get
# a consistent result.
def _ensure_str(s):
if isinstance(s, bytes):
return s.decode("ascii")
assert isinstance(s, str)
return s
class ElfFile:
"""ElfFile represents and elf file on a path and its attributes."""
@classmethod
def is_elf(cls, path: str) -> bool:
if not os.path.isfile(path):
# ELF binaries are regular files
return False
with open(path, "rb") as bin_file:
return bin_file.read(4) == b"\x7fELF"
def __init__(self, *, path: str) -> None:
"""Initialize an ElfFile instance.
:param str path: path to an elf_file within a snapcraft project.
"""
self.path = path
self.dependencies = set() # type: Set[Library]
elf_data = self._extract(path)
self.arch = elf_data[0]
self.interp = elf_data[1]
self.soname = elf_data[2]
self.needed = elf_data[3]
self.execstack_set = elf_data[4]
def _extract(self, path: str) -> ElfDataTuple: # noqa: C901
arch = None # type: ElfArchitectureTuple
interp = str()
soname = str()
libs = dict()
execstack_set = False
with open(path, "rb") as fp:
elf = elftools.elf.elffile.ELFFile(fp)
# A set of fields to identify the architecture of the ELF file:
# EI_CLASS: 32/64 bit (e.g. amd64 vs. x32)
# EI_DATA: byte orer (e.g. ppc64 vs. ppc64le)
# e_machine: instruction set (e.g. x86-64 vs. arm64)
#
# For amd64 binaries, this will evaluate to:
# ('ELFCLASS64', 'ELFDATA2LSB', 'EM_X86_64')
arch = (
elf.header.e_ident.EI_CLASS,
elf.header.e_ident.EI_DATA,
elf.header.e_machine,
)
# If we are processing a detached debug info file, these
# sections will be present but empty.
interp_section = elf.get_section_by_name(_INTERP)
if (
interp_section is not None
and interp_section.header.sh_type != "SHT_NOBITS"
):
interp = interp_section.data().rstrip(b"\x00").decode("ascii")
dynamic_section = elf.get_section_by_name(_DYNAMIC)
if (
dynamic_section is not None
and dynamic_section.header.sh_type != "SHT_NOBITS"
):
for tag in dynamic_section.iter_tags("DT_NEEDED"):
needed = _ensure_str(tag.needed)
libs[needed] = NeededLibrary(name=needed)
for tag in dynamic_section.iter_tags("DT_SONAME"):
soname = _ensure_str(tag.soname)
verneed_section = elf.get_section_by_name(_GNU_VERSION_R)
if (
verneed_section is not None
and verneed_section.header.sh_type != "SHT_NOBITS"
):
for library, versions in verneed_section.iter_versions():
library_name = _ensure_str(library.name)
# If the ELF file only references weak symbols
# from a library, it may be absent from DT_NEEDED
# but still have an entry in .gnu.version_r for
# symbol versions.
if library_name not in libs:
continue
lib = libs[library_name]
for version in versions:
lib.add_version(_ensure_str(version.name))
for segment in elf.iter_segments():
if segment["p_type"] == "PT_GNU_STACK":
# p_flags holds the bit mask for this segment.
# See `man 5 elf`.
mode = segment["p_flags"]
if mode & elftools.elf.constants.P_FLAGS.PF_X:
execstack_set = True
return arch, interp, soname, libs, execstack_set
def is_linker_compatible(self, *, linker_version: str) -> bool:
"""Determines if linker will work given the required glibc version."""
version_required = self.get_required_glibc()
r = parse_version(version_required) <= parse_version(linker_version)
logger.debug(
"Checking if linker {!r} will work with "
"GLIBC_{} required by {!r}: {!r}".format(
linker_version, version_required, self.path, r
)
)
return r
def get_required_glibc(self) -> str:
"""Returns the required glibc version for this ELF file."""
with contextlib.suppress(AttributeError):
return self._required_glibc # type: ignore
version_required = ""
for lib in self.needed.values():
for version in lib.versions:
if not version.startswith("GLIBC_"):
continue
version = version[6:]
if parse_version(version) > parse_version(version_required):
version_required = version
self._required_glibc = version_required
return version_required
def load_dependencies(
self, root_path: str, core_base_path: str, soname_cache: SonameCache = None
) -> Set[str]:
"""Load the set of libraries that are needed to satisfy elf's runtime.
This may include libraries contained within the project.
The object's .dependencies attribute is set after loading.
:param str root_path: the root path to search for missing dependencies.
:param str core_base_path: the core base path to search for missing
dependencies.
:param SonameCache soname_cache: a cache of previously search
dependencies.
:returns: a set of string with paths to the library dependencies of
elf.
"""
if soname_cache is None:
soname_cache = SonameCache()
logger.debug("Getting dependencies for {!r}".format(self.path))
ldd_out = [] # type: List[str]
try:
# ldd output sample:
# /lib64/ld-linux-x86-64.so.2 (0x00007fb3c5298000)
# libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007fb3bef03000)
ldd_out = common.run_output(["ldd", self.path]).split("\n")
except subprocess.CalledProcessError:
logger.warning(
"Unable to determine library dependencies for {!r}".format(self.path)
)
return set()
ldd_out_split = [l.split() for l in ldd_out]
libs = set()
for ldd_line in ldd_out_split:
if len(ldd_line) > 2:
libs.add(
Library(
soname=ldd_line[0],
path=ldd_line[2],
root_path=root_path,
core_base_path=core_base_path,
arch=self.arch,
soname_cache=soname_cache,
)
)
self.dependencies = libs
# Return a set useful only for fetching libraries from the host
library_paths = set() # type: Set[str]
for l in libs:
if os.path.exists(l.path) and not l.in_base_snap and not l.system_lib:
library_paths.add(l.path)
return library_paths
class Patcher:
"""Patcher holds the necessary logic to patch elf files."""
def __init__(
self, *, dynamic_linker: str, root_path: str, preferred_patchelf_path=None
) -> None:
"""Create a Patcher instance.
:param str dynamic_linker: the path to the dynamic linker to set the
elf file to.
:param str root_path: the base path for the snap to determine
if use of $ORIGIN is possible.
:param str preferred_patchelf_path: patch the necessary elf_files with
this patchelf.
"""
self._dynamic_linker = dynamic_linker
self._root_path = root_path
if preferred_patchelf_path:
self._patchelf_cmd = preferred_patchelf_path
else:
self._patchelf_cmd = file_utils.get_tool_path("patchelf")
self._strip_cmd = file_utils.get_tool_path("strip")
def patch(self, *, elf_file: ElfFile) -> None:
"""Patch elf_file with the Patcher instance configuration.
If the ELF is executable, patch it to use the configured linker.
If the ELF has dependencies (DT_NEEDED), set an rpath to them.
:param ElfFile elf: a data object representing an elf file and its
relevant attributes.
:raises snapcraft.internal.errors.PatcherError:
raised when the elf_file cannot be patched.
"""
patchelf_args = []
if elf_file.interp:
patchelf_args.extend(["--set-interpreter", self._dynamic_linker])
if elf_file.dependencies:
rpath = self._get_rpath(elf_file)
# Due to https://github.com/NixOS/patchelf/issues/94 we need
# to first clear the current rpath
self._run_patchelf(
patchelf_args=["--remove-rpath"], elf_file_path=elf_file.path
)
# Parameters:
# --force-rpath: use RPATH instead of RUNPATH.
# --shrink-rpath: will remove unneeded entries, with the
# side effect of preferring host libraries
# so we simply do not use it.
# --set-rpath: set the RPATH to the colon separated argument.
patchelf_args.extend(["--force-rpath", "--set-rpath", rpath])
# no patchelf_args means there is nothing to do.
if not patchelf_args:
return
self._run_patchelf(patchelf_args=patchelf_args, elf_file_path=elf_file.path)
def _run_patchelf(self, *, patchelf_args: List[str], elf_file_path: str) -> None:
try:
return self._do_run_patchelf(
patchelf_args=patchelf_args, elf_file_path=elf_file_path
)
except errors.PatcherError as patch_error:
# This is needed for patchelf to properly work with
# go binaries (LP: #1736861).
# We do this here instead of the go plugin for two reasons, the
# first being that we do not want to blindly remove the section,
# only doing it when necessary, and the second, this logic
# should eventually be removed once patchelf catches up.
try:
logger.warning(
"Failed to update {!r}. Retrying after stripping "
"the .note.go.buildid from the elf file.".format(elf_file_path)
)
subprocess.check_call(
[
self._strip_cmd,
"--remove-section",
".note.go.buildid",
elf_file_path,
]
)
except subprocess.CalledProcessError:
logger.warning(
"Could not properly strip .note.go.buildid "
"from {!r}.".format(elf_file_path)
)
raise patch_error
return self._do_run_patchelf(
patchelf_args=patchelf_args, elf_file_path=elf_file_path
)
def _do_run_patchelf(self, *, patchelf_args: List[str], elf_file_path: str) -> None:
# Run patchelf on a copy of the primed file and replace it
# after it is successful. This allows us to break the potential
# hard link created when migrating the file across the steps of
# the part.
with tempfile.NamedTemporaryFile() as temp_file:
shutil.copy2(elf_file_path, temp_file.name)
cmd = [self._patchelf_cmd] + patchelf_args + [temp_file.name]
try:
subprocess.check_call(cmd)
# There is no need to catch FileNotFoundError as patchelf should be
# bundled with snapcraft which means its lack of existence is a
# "packager" error.
except subprocess.CalledProcessError as call_error:
patchelf_version = (
subprocess.check_output([self._patchelf_cmd, "--version"])
.decode()
.strip()
)
# 0.10 is the version where patching certain binaries will
# work (currently known affected packages are mostly built
# with go).
if parse_version(patchelf_version) < parse_version("0.10"):
raise errors.PatcherNewerPatchelfError(
elf_file=elf_file_path,
process_exception=call_error,
patchelf_version=patchelf_version,
)
else:
raise errors.PatcherGenericError(
elf_file=elf_file_path, process_exception=call_error
)
# We unlink to break the potential hard link
os.unlink(elf_file_path)
shutil.copy2(temp_file.name, elf_file_path)
def _get_existing_rpath(self, elf_file_path):
output = subprocess.check_output(
[self._patchelf_cmd, "--print-rpath", elf_file_path]
)
return output.decode().strip().split(":")
def _get_rpath(self, elf_file) -> str:
origin_rpaths = list() # type: List[str]
base_rpaths = set() # type: Set[str]
existing_rpaths = self._get_existing_rpath(elf_file.path)
for dependency in elf_file.dependencies:
if dependency.path:
if dependency.in_base_snap:
base_rpaths.add(os.path.dirname(dependency.path))
elif dependency.path.startswith(self._root_path):
rel_library_path = os.path.relpath(dependency.path, elf_file.path)
rel_library_path_dir = os.path.dirname(rel_library_path)
# return the dirname, with the first .. replace
# with $ORIGIN
origin_rpath = rel_library_path_dir.replace("..", "$ORIGIN", 1)
if origin_rpath not in origin_rpaths:
origin_rpaths.append(origin_rpath)
if existing_rpaths:
# Only keep those that mention origin and are not already in our
# bundle.
existing_rpaths = [
r for r in existing_rpaths if "$ORIGIN" in r and r not in origin_rpaths
]
origin_rpaths = existing_rpaths + origin_rpaths
origin_paths = ":".join((r for r in origin_rpaths if r))
core_base_rpaths = ":".join(base_rpaths)
if origin_paths and core_base_rpaths:
return "{}:{}".format(origin_paths, core_base_rpaths)
elif origin_paths and not core_base_rpaths:
return origin_paths
else:
return core_base_rpaths
def determine_ld_library_path(root: str) -> List[str]:
"""Determine additional library paths needed for the linker loader.
This is a workaround until full library searching is implemented which
works by searching for ld.so.conf in specific hard coded locations
within root.
:param root str: the root directory to search for specific ld.so.conf
entries.
:returns: a list of strings of library paths where relevant libraries
can be found within root.
"""
# If more ld.so.conf files need to be supported, add them here.
ld_config_globs = {"{}/usr/lib/*/mesa*/ld.so.conf".format(root)}
ld_library_paths = []
for this_glob in ld_config_globs:
for ld_conf_file in glob.glob(this_glob):
ld_library_paths.extend(_extract_ld_library_paths(ld_conf_file))
return [root + path for path in ld_library_paths]
def _extract_ld_library_paths(ld_conf_file: str) -> List[str]:
# From the ldconfig manpage, paths can be colon-, space-, tab-, newline-,
# or comma-separated.
path_delimiters = re.compile(r"[:\s,]")
comments = re.compile(r"#.*$")
paths = []
with open(ld_conf_file, "r") as f:
for line in f:
# Remove comments from line
line = comments.sub("", line).strip()
if line:
paths.extend(path_delimiters.split(line))
return paths
_libraries = None
def _get_system_libs() -> FrozenSet[str]:
global _libraries
if _libraries: # type: ignore
return _libraries # type: ignore
lib_path = None
release = os_release.OsRelease()
with contextlib.suppress(errors.OsReleaseVersionIdError):
lib_path = os.path.join(common.get_librariesdir(), release.version_id())
if not lib_path or not os.path.exists(lib_path):
logger.debug("Only excluding libc libraries from the release")
libc6_libs = [
os.path.basename(l) for l in repo.Repo.get_package_libraries("libc6")
]
_libraries = frozenset(libc6_libs)
else:
with open(lib_path) as fn:
_libraries = frozenset(fn.read().split())
return _libraries
def get_elf_files(root: str, file_list: Sequence[str]) -> FrozenSet[ElfFile]:
"""Return a frozenset of elf files from file_list prepended with root.
:param str root: the root directory from where the file_list is generated.
:param file_list: a list of file in root.
:returns: a frozentset of ElfFile objects.
"""
elf_files = set() # type: Set[ElfFile]
for part_file in file_list:
# Filter out object (*.o) files-- we only care about binaries.
if part_file.endswith(".o"):
continue
# No need to crawl links-- the original should be here, too.
path = os.path.join(root, part_file) # type: str
if os.path.islink(path):
logger.debug("Skipped link {!r} while finding dependencies".format(path))
continue
# Finally, make sure this is actually an ELF file
if ElfFile.is_elf(path):
elf_file = ElfFile(path=path)
# if we have dyn symbols we are dynamic
if elf_file.needed:
elf_files.add(elf_file)
return frozenset(elf_files)
def _get_dynamic_linker(library_list: List[str]) -> str:
"""Return the dynamic linker from library_list."""
regex = re.compile(r"(?P<dynamic_linker>ld-[\d.]+.so)$")
for library in library_list:
m = regex.search(os.path.basename(library))
if m:
return library
raise RuntimeError(
"The format for the linker should be of the form "
"<root>/ld-<X>.<Y>.so. There are no matches for the "
"current libc6 package"
)
def find_linker(*, root_path: str, snap_base_path: str) -> str:
"""Find and return the dynamic linker that would be seen at runtime.
:param str root_path: the root path of a snap tree.
:param str snap_base_path: absolute path to the snap once installed to
setup proper rpaths.
:returns: the path to the dynamic linker to use
"""
# We assume the current system will satisfy the GLIBC requirement,
# get the current libc6 libraries (which includes the linker)
libc6_libraries_list = repo.Repo.get_package_libraries("libc6")
# For security reasons, we do not want to automatically pull in
# libraries but expect them to be consciously brought in by stage-packages
# instead.
libc6_libraries_paths = [
os.path.join(root_path, l[1:]) for l in libc6_libraries_list
]
dynamic_linker = _get_dynamic_linker(libc6_libraries_paths)
# Get the path to the "would be" dynamic linker when this snap is
# installed. Strip the root_path from the retrieved dynamic_linker
# variables + the leading `/` so that os.path.join can perform the
# proper join with snap_base_path.
dynamic_linker_path = os.path.join(
snap_base_path, dynamic_linker[len(root_path) + 1 :]
)
return dynamic_linker_path
|
gpl-3.0
| 5,716,842,983,401,830,000
| 36.935252
| 88
| 0.573943
| false
| 3.975422
| false
| false
| false
|
smoitra87/gerbil
|
deepnet/impute.py
|
1
|
24315
|
"""Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import deepnet
import scipy.io as sio
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var=None, x_axis=None):
w = w.asarray().flatten()
plt.figure(1)
plt.clf()
plt.hist(w, 100)
visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
# plt.figure(3)
# plt.clf()
# plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
# plt.figure(4)
# plt.clf()
# plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
# plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# plt.figure(5)
# plt.clf()
# plt.suptitle('Variance')
# plt.plot(np.array(x_axis), np.array(w_var))
# plt.draw()
def impute_dbm_ais(model):
"""Run approximate pll using AIS on a DBM """
def impute_rbm_gaussian_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('bernoulli_hidden1')
bern2_hidden_layer = model.GetLayerByName('bernoulli2_hidden1')
gaussian_layer = model.GetLayerByName('gaussian_hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchzeroslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
datasize_squared = cm.CUDAMatrix(np.zeros([batchsize, batchsize]))
datasize_eye = cm.CUDAMatrix(np.eye(batchsize))
datasize_eye2 = cm.CUDAMatrix(np.eye(batchsize))
if hidden_layer:
hidden_bias = hidden_layer.params['bias']
bedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli_hidden1')
w = bedge.params['weight']
if bern2_hidden_layer:
bern2_hidden_bias = bern2_hidden_layer.params['bias']
bedge2 = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli2_hidden1')
w2 = bedge2.params['weight']
if 'bias' in input_layer.params:
input_bias = input_layer.params['bias']
if gaussian_layer:
gedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'gaussian_hidden1')
gw = gedge.params['weight']
input_diag = input_layer.params['diag']
diag_val = input_diag.sum() / (input_layer.dimensions * input_layer.numlabels)
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
batchslice.assign(batchzeroslice)
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
if hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
if bern2_hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w2.T, input_layer.state, target=bern2_hidden_layer.state)
bern2_hidden_layer.state.add_col_vec(bern2_hidden_bias)
cm.log_1_plus_exp(bern2_hidden_layer.state)
batchslice.add_sums(bern2_hidden_layer.state, axis=0)
if 'bias' in input_layer.params:
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
if gaussian_layer:
# Add contributions from gaussian hidden layer
cm.dot(gw.T, input_layer.state, target=gaussian_layer.state)
cm.dot(gaussian_layer.state.T, gaussian_layer.state, target= datasize_squared)
datasize_squared.mult(datasize_eye, target=datasize_eye2)
datasize_eye2.sum(axis=0, target=batchslice2)
# Add constants from gaussian hidden layer
integration_constant = gaussian_layer.dimensions * np.log(2*np.pi)
integration_constant += input_layer.dimensions * diag_val
batchslice2.add(integration_constant)
batchslice2.mult(0.5)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_rbm_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
hidden_bias = hidden_layer.params['bias']
input_bias = input_layer.params['bias']
edge = model.edge[0]
w = edge.params['weight']
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_mf(model, mf_steps, hidden_mf_steps, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
# Run MF steps
for mf_idx in range(mf_steps):
for hid_mf_idx in range(hidden_mf_steps):
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
input_layer.state.get_row_slice(offset, offset + numlabels , \
target=input_layer.fooslice)
input_layer.GetData()
input_layer.state.set_row_slice(offset, offset + numlabels , \
input_layer.fooslice)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (dimensions+0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def multicol_mf(model, multicols, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# Get the multicol dimensions
nBlocks, nCols = multicols.shape
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for mult_idx in range(nBlocks):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (nBlocks * nCols +0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Run AIS")
parser.add_argument("--model_file", type=str)
parser.add_argument("--train_file", type=str)
parser.add_argument("--infer-method", type=str, default='exact', \
help='mf/gibbs/exact/gaussian_exact')
parser.add_argument("--mf-steps", type=int, default=1)
parser.add_argument("--hidden-mf-steps", type=int, default=1)
parser.add_argument("--outf", type=str, help='Output File')
parser.add_argument("--valid_only", action='store_true', help="only run the validation set")
parser.add_argument("--blosum90", action='store_true', help="Calculate blosum90 scores")
parser.add_argument("--ncols", type=int, help="Number of multiple columns")
parser.add_argument("--multmode", type=str, help="Multicol mode",default='rand')
args = parser.parse_args()
if not args.outf :
raise ValueError('Output file not defined')
if not args.train_file or not args.model_file :
raise ValueError('Models and data missing')
board = tr.LockGPU()
model_file = args.model_file
train_file = args.train_file
model = dbm.DBM(model_file, train_file)
trainer_pb = util.ReadOperation(train_file)
dataset = os.path.basename(trainer_pb.data_proto_prefix)
# Fix paths
dirname = os.path.split(model.t_op.data_proto_prefix)[1]
model.t_op.data_proto_prefix = os.path.join('datasets/',\
dirname)
model.t_op.skip_last_piece = False
model.t_op.get_last_piece = True
model.t_op.randomize = False
model.LoadModelOnGPU()
model.SetUpData()
if args.valid_only:
data_types = ['valid']
else:
data_types = ['train', 'valid', 'test']
datagetters = {
'train' : model.GetTrainBatch,
'valid' : model.GetValidationBatch,
'test' : model.GetTestBatch
}
batchsizes = {
'train' : model.train_data_handler.num_batches,
'valid' : model.validation_data_handler.num_batches,
'test' : model.test_data_handler.num_batches
}
opts = {}
cm.CUDAMatrix.init_random(seed=int(time.time()))
if len(model.layer) > 2 and args.infer_method=='exact':
raise ValueError('Cannot use exact Exact inference for DBMs')
from collections import defaultdict
pll_data = defaultdict(list)
imperr_data = defaultdict(list)
for data_type in data_types:
num_batches = batchsizes[data_type]
datagetter = datagetters[data_type]
for batch_idx in range(num_batches):
print("Evalutating batch {}".format(batch_idx+1))
datagetter()
if args.infer_method == 'mf':
if args.blosum90:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps, blosum90=True)
else:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps)
elif args.infer_method == 'multicol':
ncols = args.ncols;
multicol_file = 'datasets/{0}/multicol/{1}_{2}.mat'.format(dataset,args.multmode, ncols)
multicols = sio.loadmat(multicol_file)['multicols']
multicols = np.asarray(multicols, dtype=np.int)
multicols = multicols - 1; # convert from matlab indexing
if args.blosum90:
pll, imperr = multicol_mf(model, multicols, blosum90=True)
else:
pll, imperr = multicol_mf(model, multicols)
elif args.infer_method == 'exact':
pll, imperr = impute_rbm_exact(model)
elif args.infer_method == 'gaussian_exact':
pll, imperr = impute_rbm_gaussian_exact(model)
else:
raise ValueError("Unknown infer method")
pll, imperr = pll.flatten(), imperr.flatten()
pll_data[data_type].append(pll)
imperr_data[data_type].append(imperr)
pll_data[data_type] = np.concatenate(pll_data[data_type])
imperr_data[data_type] = np.concatenate(imperr_data[data_type])
#-------------------------------------------------------------------
# Print and save the results
for dtype in pll_data :
pll = pll_data[dtype]
imperr = imperr_data[dtype]
print '%s : Pseudo-LogLikelihood %.5f, std %.5f' % (dtype, pll.mean(), pll.std())
print '%s : Imputation Error %.5f, std %.5f' % (dtype, imperr.mean(), imperr.std())
tr.FreeGPU(board)
import pickle
with open(args.outf,'wb') as fout:
pkldata = { 'pll' : pll_data, 'imperr' : imperr_data }
pickle.dump(pkldata, fout)
|
bsd-3-clause
| 1,073,403,383,934,571,500
| 37.904
| 110
| 0.618096
| false
| 3.616151
| false
| false
| false
|
rcbops/opencenter
|
opencenter/db/migrate_repo/versions/001_initial_data.py
|
1
|
6716
|
#!/usr/bin/env python
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
import json
import os
from sqlalchemy import *
from migrate import *
from opencenter.db.api import api_from_models
adventures = [
{'name': 'Run Chef',
'dsl': 'run_chef.json',
'criteria': 'run_chef.criteria'},
{'name': 'Install Chef Server',
'dsl': 'install_chef_server.json',
'criteria': 'install_chef_server.criteria'},
{'name': 'Create Nova Cluster',
'dsl': 'create_nova_cluster.json',
'criteria': 'create_nova_cluster.criteria'},
{'name': 'Enable HA Infrastructure',
'dsl': 'enable_ha_infrastructure.json',
'criteria': 'enable_ha_infrastructure.criteria'},
{'name': 'Download Chef Cookbooks',
'dsl': 'download_cookbooks.json',
'criteria': 'download_cookbooks.criteria'},
{'name': 'Subscribe Cookbook Channel',
'dsl': 'subscribe_cookbook_channel.json',
'criteria': 'subscribe_cookbook_channel.criteria'},
{'name': 'Sleep',
'dsl': 'sleep.json',
'criteria': 'sleep.criteria'},
{'name': 'Update Server',
'dsl': 'update_server.json',
'criteria': 'update_server.criteria'},
{'name': 'Update Agent',
'dsl': 'update_agent.json',
'criteria': 'update_agent.criteria'},
{'name': 'Create Availability Zone',
'dsl': 'create_az.json',
'criteria': 'create_az.criteria'},
{'name': 'Disable Scheduling on this Host',
'dsl': 'openstack_disable_host.json',
'criteria': 'openstack_disable_host.criteria'},
{'name': 'Enable Scheduling on this Host',
'dsl': 'openstack_enable_host.json',
'criteria': 'openstack_enable_host.criteria'},
{'name': 'Evacuate Host',
'dsl': 'openstack_evacuate_host.json',
'criteria': 'openstack_evacuate_host.criteria'},
{'name': 'Upload Initial Glance Images',
'dsl': 'openstack_upload_images.json',
'criteria': 'openstack_upload_images.criteria'},
{'name': 'Install Chef Client',
'dsl': 'install_chef.json',
'criteria': 'install_chef.criteria'},
{'name': 'Uninstall Chef Client',
'dsl': 'uninstall_chef.json',
'criteria': 'uninstall_chef.criteria'},
{'name': 'Uninstall Chef Server',
'dsl': 'uninstall_chef_server.json',
'criteria': 'uninstall_chef_server.criteria'}]
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
api = api_from_models()
for adventure in adventures:
new_adventure = {'name': adventure['name']}
json_path = os.path.join(
os.path.dirname(__file__), adventure['dsl'])
criteria_path = os.path.join(
os.path.dirname(__file__), adventure['criteria'])
new_adventure['dsl'] = json.loads(open(json_path).read())
new_adventure['criteria'] = open(criteria_path).read()
api.adventure_create(new_adventure)
canned_filters = [{'name': 'unprovisioned nodes',
'filter_type': 'node',
'expr': 'backend=\'unprovisioned\''},
{'name': 'chef client nodes',
'filter_type': 'node',
'expr': 'backend=\'chef-client\''},
{'name': 'chef-server',
'filter_type': 'interface',
'expr': 'facts.chef_server_uri != None and '
'facts.chef_server_pem != None'}]
for new_filter in canned_filters:
api._model_create('filters', new_filter)
workspace = api.node_create({'name': 'workspace'})
api._model_create('attrs', {'node_id': workspace['id'],
'key': 'json_schema_version',
'value': 1})
unprov = api.node_create({'name': 'unprovisioned'})
api._model_create('facts', {'node_id': unprov['id'],
'key': 'parent_id',
'value': workspace['id']})
support = api.node_create({'name': 'support'})
api._model_create('facts', {'node_id': support['id'],
'key': 'parent_id',
'value': workspace['id']})
# Add default fact to the default nodes
node_list = [(workspace, "Workspace"),
(unprov, "Available Nodes"),
(support, "Service Nodes")]
for node, display in node_list:
api.fact_create({'node_id': node['id'],
'key': 'backends',
'value': ["container", "node"]})
api.attr_create({'node_id': node['id'],
'key': 'display_name',
'value': display})
api.attr_create({'node_id': node['id'],
'key': 'locked',
'value': True})
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
api = api_from_models()
adventure_names = [x['name'] for x in adventures]
for name in adventure_names:
adventure_list = api._model_query('adventures', 'name="%s"' % name)
for adv in adventure_list:
api._model_delete_by_id('adventures', adv['id'])
node_list = ['"support"', '"unprovisioned"', '"workspace"']
for node in node_list:
tmp = api.nodes_query('name = %s' % node)
fact_list = api.facts_query('node_id = %s' % tmp[0]['id'])
for fact in fact_list:
api.fact_delete_by_id(fact['id'])
api.node_delete_by_id(tmp[0]['id'])
|
apache-2.0
| 595,253,243,819,142,300
| 39.457831
| 79
| 0.570876
| false
| 3.811578
| false
| false
| false
|
smn/blinky
|
blinky/slack/models.py
|
1
|
1402
|
from __future__ import unicode_literals
import requests
from django.db import models
class SlackWebhook(models.Model):
url = models.URLField()
username = models.CharField(max_length=255, null=True, blank=True)
icon_emoji = models.CharField(max_length=255, null=True, blank=True)
channel = models.CharField(max_length=255, null=True, blank=True)
apply_global = models.BooleanField(default=True)
limit_worker_types = models.ManyToManyField('core.WorkerType', blank=True)
is_active = models.BooleanField(default=True)
@classmethod
def for_worker_type(cls, worker_type):
return cls.objects.filter(
models.Q(apply_global=True) |
models.Q(limit_worker_types=worker_type)
).filter(is_active=True).distinct()
def slack_payload(self, text):
payload = {
'text': text
}
if self.username:
payload['username'] = self.username
if self.icon_emoji:
payload['icon_emoji'] = self.icon_emoji
if self.channel:
payload['channel'] = self.channel
return payload
def fire(self, text):
response = requests.post(self.url, headers={
'Content-Type': 'application/json',
}, json=self.slack_payload(text))
response.raise_for_status()
return response
def __unicode__(self):
return self.url
|
bsd-3-clause
| 4,312,822,045,934,085,600
| 31.604651
| 78
| 0.631954
| false
| 3.927171
| false
| false
| false
|
yavalvas/yav_com
|
build/matplotlib/lib/matplotlib/mathtext.py
|
1
|
111273
|
r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email mdroe@stsci.edu, but please check KNOWN ISSUES below first.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
from six import unichr
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
import pyparsing
from pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException
# Enable packrat parsing
if (six.PY3 and
[int(x) for x in pyparsing.__version__.split('.')] < [2, 0, 0]):
warn("Due to a bug in pyparsing <= 2.0.0 on Python 3.x, packrat parsing "
"has been disabled. Mathtext rendering will be much slower as a "
"result. Install pyparsing 2.0.0 or later to improve performance.")
else:
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError(message)
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.width = 0
self.height = 0
self.depth = 0
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendAgg(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
self.mode = 'bbox'
self.bbox = [0, 0, 0, 0]
MathtextBackend.__init__(self)
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
if self.mode != 'bbox':
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
if self.mode == 'bbox':
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
else:
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph,
antialiased=rcParams['text.antialiased'])
def render_rect_filled(self, x1, y1, x2, y2):
if self.mode == 'bbox':
self._update_bbox(x1, y1, x2, y2)
else:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box, used_characters):
self.mode = 'bbox'
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self.mode = 'render'
self.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
result = (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
used_characters)
self.image = None
return result
def get_hinting_type(self):
from matplotlib.backends import backend_agg
return backend_agg.get_hinting_flag()
class MathtextBackendBitmap(MathtextBackendAgg):
def get_results(self, box, used_characters):
ox, oy, width, height, depth, image, characters = \
MathtextBackendAgg.get_results(self, box, used_characters)
return image, depth
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = six.moves.cStringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
used_characters)
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
used_characters)
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
used_characters)
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
result = self.mathtext_backend.get_results(box, self.get_used_characters())
self.destroy()
return result
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in six.iteritems(self.charmap)])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(filename))
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None and os.path.exists(basename):
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in six.iteritems(self._fontmap):
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
cached_font = self._get_font(basename)
if cached_font is not None:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
cached_font = self._get_font(fontname)
if cached_font is not None:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']'),
(r'\{', '{'),
(r'\}', '}'),
(r'\[', '['),
(r'\]', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
cached_font = self._get_font(new_fontname)
if cached_font is not None:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U%x]" %
(new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in six.iteritems(self._fontmap):
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
fixes = {'\{': '{', '\}': '}', '\[': '[', '\]': ']'}
sym = fixes.get(sym, sym)
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
with open(filename, 'r') as fd:
default_font = AFM(fd)
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = six.moves.cStringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
with open(fname, 'r') as fd:
cached_font = AFM(fd)
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(six.text_type(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g., node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (six.string_types, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False, factor=None):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
if factor is None:
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(s, loc, msg)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set('''
+ *
\\pm \\sqcap \\rhd
\\mp \\sqcup \\unlhd
\\times \\vee \\unrhd
\\div \\wedge \\oplus
\\ast \\setminus \\ominus
\\star \\wr \\otimes
\\circ \\diamond \\oslash
\\bullet \\bigtriangleup \\odot
\\cdot \\bigtriangledown \\bigcirc
\\cap \\triangleleft \\dagger
\\cup \\triangleright \\ddagger
\\uplus \\lhd \\amalg'''.split())
_relation_symbols = set('''
= < > :
\\leq \\geq \\equiv \\models
\\prec \\succ \\sim \\perp
\\preceq \\succeq \\simeq \\mid
\\ll \\gg \\asymp \\parallel
\\subset \\supset \\approx \\bowtie
\\subseteq \\supseteq \\cong \\Join
\\sqsubset \\sqsupset \\neq \\smile
\\sqsubseteq \\sqsupseteq \\doteq \\frown
\\in \\ni \\propto
\\vdash \\dashv \\dots'''.split())
_arrow_symbols = set('''
\\leftarrow \\longleftarrow \\uparrow
\\Leftarrow \\Longleftarrow \\Uparrow
\\rightarrow \\longrightarrow \\downarrow
\\Rightarrow \\Longrightarrow \\Downarrow
\\leftrightarrow \\longleftrightarrow \\updownarrow
\\Leftrightarrow \\Longleftrightarrow \\Updownarrow
\\mapsto \\longmapsto \\nearrow
\\hookleftarrow \\hookrightarrow \\searrow
\\leftharpoonup \\rightharpoonup \\swarrow
\\leftharpoondown \\rightharpoondown \\nwarrow
\\rightleftharpoons \\leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambi_delim = set("""
| \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
\\Downarrow \\Updownarrow .""".split())
_left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
_right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
def __init__(self):
p = Bunch()
# All forward declarations are here
p.accent = Forward()
p.ambi_delim = Forward()
p.apostrophe = Forward()
p.auto_delim = Forward()
p.binom = Forward()
p.bslash = Forward()
p.c_over_c = Forward()
p.customspace = Forward()
p.end_group = Forward()
p.float_literal = Forward()
p.font = Forward()
p.frac = Forward()
p.function = Forward()
p.genfrac = Forward()
p.group = Forward()
p.int_literal = Forward()
p.latexfont = Forward()
p.lbracket = Forward()
p.left_delim = Forward()
p.lbrace = Forward()
p.main = Forward()
p.math = Forward()
p.math_string = Forward()
p.non_math = Forward()
p.operatorname = Forward()
p.overline = Forward()
p.placeable = Forward()
p.rbrace = Forward()
p.rbracket = Forward()
p.required_group = Forward()
p.right_delim = Forward()
p.right_delim_safe = Forward()
p.simple = Forward()
p.simple_group = Forward()
p.single_symbol = Forward()
p.space = Forward()
p.sqrt = Forward()
p.stackrel = Forward()
p.start_group = Forward()
p.subsuper = Forward()
p.subsuperop = Forward()
p.symbol = Forward()
p.symbol_name = Forward()
p.token = Forward()
p.unknown_symbol = Forward()
# Set names on everything -- very useful for debugging
for key, val in vars(p).items():
if not key.startswith('_'):
val.setName(key)
p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
p.int_literal <<= Regex("[-+]?[0-9]+")
p.lbrace <<= Literal('{').suppress()
p.rbrace <<= Literal('}').suppress()
p.lbracket <<= Literal('[').suppress()
p.rbracket <<= Literal(']').suppress()
p.bslash <<= Literal('\\')
p.space <<= oneOf(list(six.iterkeys(self._space_widths)))
p.customspace <<= (Suppress(Literal(r'\hspace'))
- ((p.lbrace + p.float_literal + p.rbrace)
| Error(r"Expected \hspace{n}")))
unicode_range = "\U00000080-\U0001ffff"
p.single_symbol <<= Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
unicode_range)
p.symbol_name <<= (Combine(p.bslash + oneOf(list(six.iterkeys(tex2uni)))) +
FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd()))
p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
p.apostrophe <<= Regex("'+")
p.c_over_c <<= Suppress(p.bslash) + oneOf(list(six.iterkeys(self._char_over_chars)))
p.accent <<= Group(
Suppress(p.bslash)
+ oneOf(list(six.iterkeys(self._accent_map)) + list(self._wide_accents))
- p.placeable
)
p.function <<= Suppress(p.bslash) + oneOf(list(self._function_names))
p.start_group <<= Optional(p.latexfont) + p.lbrace
p.end_group <<= p.rbrace.copy()
p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
p.required_group<<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
p.group <<= Group(p.start_group + ZeroOrMore(p.token) + p.end_group)
p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
p.latexfont <<= Suppress(p.bslash) + oneOf(['math' + x for x in self._fontnames])
p.frac <<= Group(
Suppress(Literal(r"\frac"))
- ((p.required_group + p.required_group) | Error(r"Expected \frac{num}{den}"))
)
p.stackrel <<= Group(
Suppress(Literal(r"\stackrel"))
- ((p.required_group + p.required_group) | Error(r"Expected \stackrel{num}{den}"))
)
p.binom <<= Group(
Suppress(Literal(r"\binom"))
- ((p.required_group + p.required_group) | Error(r"Expected \binom{num}{den}"))
)
p.ambi_delim <<= oneOf(list(self._ambi_delim))
p.left_delim <<= oneOf(list(self._left_delim))
p.right_delim <<= oneOf(list(self._right_delim))
p.right_delim_safe <<= oneOf(list(self._right_delim - set(['}'])) + [r'\}'])
p.genfrac <<= Group(
Suppress(Literal(r"\genfrac"))
- (((p.lbrace + Optional(p.ambi_delim | p.left_delim, default='') + p.rbrace)
+ (p.lbrace + Optional(p.ambi_delim | p.right_delim_safe, default='') + p.rbrace)
+ (p.lbrace + p.float_literal + p.rbrace)
+ p.simple_group + p.required_group + p.required_group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
)
p.sqrt <<= Group(
Suppress(Literal(r"\sqrt"))
- ((Optional(p.lbracket + p.int_literal + p.rbracket, default=None)
+ p.required_group)
| Error("Expected \sqrt{value}"))
)
p.overline <<= Group(
Suppress(Literal(r"\overline"))
- (p.required_group | Error("Expected \overline{value}"))
)
p.unknown_symbol<<= Combine(p.bslash + Regex("[A-Za-z]*"))
p.operatorname <<= Group(
Suppress(Literal(r"\operatorname"))
- ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
| Error("Expected \operatorname{value}"))
)
p.placeable <<= ( p.accent # Must be first
| p.symbol # Must be second
| p.c_over_c
| p.function
| p.group
| p.frac
| p.stackrel
| p.binom
| p.genfrac
| p.sqrt
| p.overline
| p.operatorname
)
p.simple <<= ( p.space
| p.customspace
| p.font
| p.subsuper
)
p.subsuperop <<= oneOf(["_", "^"])
p.subsuper <<= Group(
(Optional(p.placeable) + OneOrMore(p.subsuperop - p.placeable) + Optional(p.apostrophe))
| (p.placeable + Optional(p.apostrophe))
| p.apostrophe
)
p.token <<= ( p.simple
| p.auto_delim
| p.unknown_symbol # Must be last
)
p.auto_delim <<= (Suppress(Literal(r"\left"))
- ((p.left_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ Group(ZeroOrMore(p.simple | p.auto_delim))
+ Suppress(Literal(r"\right"))
- ((p.right_delim | p.ambi_delim) | Error("Expected a delimiter"))
)
p.math <<= OneOrMore(p.token)
p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
p.main <<= (p.non_math + ZeroOrMore(p.math_string + p.non_math)) + StringEnd()
# Set actions
for key, val in vars(p).items():
if not key.startswith('_'):
if hasattr(self, key):
val.setParseAction(getattr(self, key))
self._expression = p.main
self._math_expression = p.math
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
self._em_width_cache = {}
try:
result = self._expression.parseString(s)
except ParseBaseException as err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
six.text_type(err)]))
self._state_stack = None
self._em_width_cache = {}
self._expression.resetCache()
return result[0]
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def main(self, s, loc, toks):
#~ print "finish", toks
return [Hlist(toks)]
def math_string(self, s, loc, toks):
# print "math_string", toks[0][1:-1]
return self._math_expression.parseString(toks[0][1:-1])
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[0]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
def unknown_symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def c_over_c(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow'
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def operatorname(self, s, loc, toks):
self.push_state()
state = self.get_state()
state.font = 'rm'
# Change the font of Chars, but leave Kerns alone
for c in toks[0]:
if isinstance(c, Char):
c.font = 'rm'
c._update_metrics()
self.pop_state()
return Hlist(toks[0])
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
required_group = simple_group = group
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuper(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuper', toks
nucleus = None
sub = None
super = None
# Pick all of the apostrophe's out
napostrophes = 0
new_toks = []
for tok in toks[0]:
if isinstance(tok, six.string_types) and tok not in ('^', '_'):
napostrophes += len(tok)
else:
new_toks.append(tok)
toks = new_toks
if len(toks) == 0:
assert napostrophes
nucleus = Hbox(0.0)
elif len(toks) == 1:
if not napostrophes:
return toks[0] # .asList()
else:
nucleus = toks[0]
elif len(toks) == 2:
op, next = toks
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks) == 3:
nucleus, op, next = toks
if op == '_':
sub = next
else:
super = next
elif len(toks) == 5:
nucleus, op1, next1, op2, next2 = toks
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
if napostrophes:
if super is None:
super = Hlist([])
for i in range(napostrophes):
super.children.extend(self.symbol(s, loc, ['\prime']))
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness, '', num, den)
def stackrel(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('', '', 0.0, '', num, den)
def binom(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0, '', num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
if len(middle):
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
factor = None
else:
height = 0
depth = 0
factor = 1.0
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state, factor=factor))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state, factor=factor))
hlist = Hlist(parts)
return hlist
def auto_delim(self, s, loc, toks):
#~ print "auto_delim", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
# There is a bug in Python 3.x where it leaks frame references,
# and therefore can't handle this caching
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
|
mit
| 6,743,337,852,527,449,000
| 34.235275
| 117
| 0.521474
| false
| 3.82342
| false
| false
| false
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/music21/demos/trecento/largestAmbitus.py
|
1
|
2001
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: demos/trecento/largestAmbitus.py
# Purpose: find Trecento/ars nova pieces with large ambitus
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
On September 11, 2012, Camilla Cavicchi reported to me the finding of
a new fragment in the Ferrara archives. One unknown piece has an extraordinary
large range in the top voice: a 15th within a few notes. The clefs can't
be read and the piece is an adaptation into
Stroke notation, so it's unlikely to have an exact match in the database
(also the piece is probably from the 1430s [MSC, guess, not CC], so it's
not likely to be in the Trecento database anyhow).
This demo uses the .analyze('ambitus') function of music21 to try
to find a match for the ambitus (or at least narrow down the search for others)
by finding all parts within pieces where the range is at least a 15th.
'''
from music21 import corpus, converter
def main():
trecentoFiles = corpus.getWork('trecento')
for t in trecentoFiles:
print (t)
tparsed = converter.parse(t)
for p in tparsed.parts:
ambi = p.analyze('ambitus')
distance = ambi.diatonic.generic.undirected
if distance >= 15:
print ("************ GOT ONE!: {0} ************".format(ambi))
elif distance >= 9:
print (ambi)
else:
pass
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = []
if __name__ == "__main__":
main()
#------------------------------------------------------------------------------
# eof
|
mit
| 5,355,574,308,488,821,000
| 37.215686
| 80
| 0.5285
| false
| 4.081633
| false
| false
| false
|
zhengjue/mytornado
|
study/2/filecmp/simple2.py
|
1
|
1828
|
#!/usr/bin/env python
import os, sys
import filecmp
import re
import shutil
holderlist=[]
def compareme(dir1, dir2):
dircomp=filecmp.dircmp(dir1,dir2)
only_in_one=dircomp.left_only
diff_in_one=dircomp.diff_files
dirpath=os.path.abspath(dir1)
[holderlist.append(os.path.abspath( os.path.join(dir1,x) )) for x in only_in_one]
[holderlist.append(os.path.abspath( os.path.join(dir1,x) )) for x in diff_in_one]
if len(dircomp.common_dirs) > 0:
for item in dircomp.common_dirs:
compareme(os.path.abspath(os.path.join(dir1,item)), \
os.path.abspath(os.path.join(dir2,item)))
return holderlist
def main():
if len(sys.argv) > 2:
dir1=sys.argv[1]
dir2=sys.argv[2]
else:
print "Usage: ", sys.argv[0], "datadir backupdir"
sys.exit()
source_files=compareme(dir1,dir2)
dir1=os.path.abspath(dir1)
if not dir2.endswith('/'): dir2=dir2+'/'
dir2=os.path.abspath(dir2)
destination_files=[]
createdir_bool=False
for item in source_files:
destination_dir=re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
if os.path.isdir(item):
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
createdir_bool=True
if createdir_bool:
destination_files=[]
source_files=[]
source_files=compareme(dir1,dir2)
for item in source_files:
destination_dir=re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
print "update item:"
print source_files
copy_pair=zip(source_files,destination_files)
for item in copy_pair:
if os.path.isfile(item[0]):
shutil.copyfile(item[0], item[1])
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,692,140,856,921,574,000
| 28.015873
| 85
| 0.622538
| false
| 3.190227
| false
| false
| false
|
cjlee112/socraticqs2
|
mysite/pages/migrations/0008_listplugin.py
|
1
|
1139
|
from django.db import models, migrations
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0012_auto_20150607_2207'),
('pages', '0007_activelearningratesplugin'),
]
operations = [
migrations.CreateModel(
name='ListPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('title', models.CharField(max_length=70, blank=True)),
('description_header', djangocms_text_ckeditor.fields.HTMLField(blank=True)),
('list_type', models.CharField(default='list-questions', max_length=20, choices=[('list-questions', 'list-questions')])),
('list_text', djangocms_text_ckeditor.fields.HTMLField()),
('description_footer', djangocms_text_ckeditor.fields.HTMLField(blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
apache-2.0
| 4,625,556,113,305,008,000
| 39.678571
| 174
| 0.592625
| false
| 4.082437
| false
| false
| false
|
Macainian/BaseDjangoProject
|
website/apps/search_filter_sort/views/class_based/BaseBrowseView.py
|
1
|
10170
|
import operator
import logging
from functools import reduce
from django.db.models import Q
from django.views.generic import ListView
from django.conf import settings
from website.apps.search_filter_sort.utils.misc import class_strings_to_class, convert_age_to_date
from website.mixins import LoginRequiredMixin
logger = logging.getLogger(__name__)
USER_SEARCH_LIST_DEFAULT = ["username", "first_name", "last_name", "email"]
if hasattr(settings, "USER_SEARCH_LIST"):
USER_SEARCH_LIST = settings.USER_SEARCH_LIST
else:
USER_SEARCH_LIST = USER_SEARCH_LIST_DEFAULT
class BaseBrowseView(LoginRequiredMixin, ListView):
template_name = None
model = None
should_override_pagination = False
searches = []
filters = []
filter_names = []
sorts = []
default_sort_by = []
default_pagination = 25
search_by = None
using_filters = None
def get_context_data(self, **kwargs):
context = super(BaseBrowseView, self).get_context_data(**kwargs)
# check_search_fields()
context["paginate_by"] = self.paginate_by
context["search_by"] = self.search_by
context["filters"] = self.filters
context["filter_names"] = self.filter_names
context["using_filters"] = self.using_filters
context["default_pagination"] = self.default_pagination
return context
def get_queryset(self):
self.searches = self.search_fields(self.model, [])
if not self.should_override_pagination:
try:
self.paginate_by = int(self.request.GET.get("paginate_by", self.default_pagination))
except:
self.paginate_by = self.default_pagination
should_return_empty = self.request.GET.get("__RETURN_EMPTY__", None)
if should_return_empty:
return self.model.objects.none()
search_bys = self.request.GET.get("search_by", None)
filter_names = self.request.GET.getlist("filter_name", None)
filter_values = self.request.GET.getlist("filter_value", None)
sort_bys = self.request.GET.getlist("sort_by", self.default_sort_by)
search_list = self.get_search_list(search_bys)
filter_list = self.get_filter_list(filter_names, filter_values)
sort_list = self.get_sort_list(sort_bys)
# Search, filter, sort
if search_list:
list_of_search_bys_Q = [Q(**{key: value}) for key, value in search_list.items()]
search_reduce = reduce(operator.or_, list_of_search_bys_Q)
else:
search_reduce = None
if filter_list:
list_of_filter_bys_Q = [[Q(**{key: value}) for value in array] for key, array in filter_list.items()]
reduced_filters = []
for array in list_of_filter_bys_Q:
reduced_filters.append(reduce(operator.or_, array))
filter_reduce = reduce(operator.and_, reduced_filters)
self.using_filters = True
else:
filter_reduce = None
self.using_filters = False
if search_reduce and filter_reduce:
queryset = self.model.objects.filter(search_reduce).filter(filter_reduce).distinct().order_by(*sort_list)
elif search_reduce:
queryset = self.model.objects.filter(search_reduce).distinct().order_by(*sort_list)
elif filter_reduce:
queryset = self.model.objects.filter(filter_reduce).distinct().order_by(*sort_list)
else:
queryset = self.model.objects.order_by(*sort_list)
return queryset
def get_search_list(self, search_bys):
# Determine search_list
search_list = {}
if search_bys:
self.search_by = search_bys
search_terms = []
for term in search_bys.split():
search_terms.append(term)
for field in self.searches:
field += "__icontains"
for term in search_terms:
search_list[field] = term
else:
self.search_by = ""
return search_list
def get_filter_list(self, filter_names, filter_values):
# Determine filter_list
filter_list = {}
self.define_filters()
for i in range(len(filter_names)):
filter_name = filter_names[i]
# This is only false if there are more filter_names than filter_values. Should be equal.
if i < len(filter_values):
values = filter_values[i].split(",")
if "__lte_age" in filter_name or "__lt_age" in filter_name:
values = [convert_age_to_date(int(filter_values[i]))]
filter_name = filter_name.replace("__lte_age", "__lte")
filter_name = filter_name.replace("__lt_age", "__lt")
elif "__lte_number" in filter_name or "__lt_number" in filter_name:
filter_name = filter_name.replace("__lte_number", "__lte")
filter_name = filter_name.replace("__lt_number", "__lt")
if "__gte_age" in filter_name or "__gt_age" in filter_name:
values = [convert_age_to_date(int(filter_values[i]) + 1)]
filter_name = filter_name.replace("__gte_age", "__gte")
filter_name = filter_name.replace("__gt_age", "__gt")
elif "__gte_number" in filter_name or "__gt_number" in filter_name:
filter_name = filter_name.replace("__gte_number", "__gte")
filter_name = filter_name.replace("__gt_number", "__gt")
new_values = []
for value in values:
if value == "__NONE_OR_BLANK__":
new_values.append("")
value = None
elif value == "__NONE__":
value = None
elif value == "__BLANK__":
value = ""
elif value == "__TRUE__":
value = True
elif value == "__FALSE__":
value = False
new_values.append(value)
values = new_values
filter_list[filter_name] = values
else:
break
return filter_list
def get_sort_list(self, sort_bys):
# Determine sort_list
sort_list = list(sort_bys)
count = 0
for i in range(len(sort_bys)):
if "-" in sort_bys[i]:
base_sort = sort_bys[i].split("-")[1]
else:
base_sort = sort_bys[i]
if base_sort not in self.sorts:
sort_list.remove(sort_bys[i])
logger.debug("Sort of " + base_sort + " is not in the sorts.")
count -= 1
elif "last_name" in sort_bys[i]: # Special clause for last_names/first_names
sort_list.insert(count, sort_bys[i].replace("last_name", "first_name"))
count += 1
elif base_sort == "birthday": # Special clause for birthday/age. Need to reverse order because it is backwards for some reason.
if sort_bys[i] == "birthday":
sort_list[count] = "-birthday"
else:
sort_list[count] = "birthday"
count += 1
return sort_list
def define_filters(self):
self.filters = []
self.filter_names = []
def add_select_filter(self, html_name, filter_name, html_options_code):
html_code = '<select class="multi-select form-control" id="' + filter_name + '-filter" name="' + filter_name + '_filter" autocomplete="off" multiple>'
html_code += html_options_code + '</select>'
self.filters.append(
{
"filter_name": filter_name,
"html_name": html_name,
"html_code": html_code
})
self.filter_names.append(filter_name)
def add_number_range_filter(self, html_name, lower_filter_name, upper_filter_name, max_width="50px", step_size="1"):
html_code = \
'<input type="number" class="range-filter form-control" id="' + lower_filter_name + '-filter" ' + \
'name="' + lower_filter_name + '" step="' + step_size + '" style="max-width: ' + max_width + '" />' + \
'<b> - </b>' + \
'<input type="number" class="range-filter form-control" id="' + upper_filter_name + '-filter" ' + \
'name="' + upper_filter_name + '" step="' + step_size + '" style="max-width: ' + max_width + '" />'
self.filters.append(
{
"html_name": html_name,
"html_code": html_code
})
self.filter_names.append(lower_filter_name)
self.filter_names.append(upper_filter_name)
def search_fields(self, class_object, list_of_used_classes):
object_search_list = []
if class_object in list_of_used_classes:
return []
else:
list_of_used_classes.append(class_object)
if class_object.__name__ == "User":
search_list = [search_item for search_item in USER_SEARCH_LIST]
else:
object_dependencies = class_object.object_dependencies()
for object_dependency in object_dependencies:
if object_dependency[2] == "User":
object_search_list += [
str(object_dependency[0] + "__{0}").format(search_item) for search_item in USER_SEARCH_LIST
]
else:
other_class_object = class_strings_to_class(object_dependency[1], object_dependency[2])
other_object_search_list = self.search_fields(other_class_object, list_of_used_classes)
object_search_list += [str(object_dependency[0] + "__{0}").format(search_item) for search_item in
other_object_search_list]
search_list = class_object.basic_search_list() + class_object.special_search_list() + object_search_list
return search_list
|
mit
| -8,668,220,080,105,243,000
| 37.669202
| 158
| 0.550147
| false
| 4.010252
| false
| false
| false
|
luzhijun/Optimization
|
cma-es/batchcompute_python_sdk/vp/testWork.py
|
1
|
1368
|
#!usr/bin/env python
#encoding: utf-8
import os
import sys
import config as cfg
from math import sqrt
from simple_oss import SimpleOss
import json
TASK_ID = os.environ.get('ALI_DIKU_TASK_ID')
INSTANCE_COUNT = int(os.environ.get('INSTANCE_COUNT'))
INSTANCE_ID = int(os.environ.get('ALI_DIKU_INSTANCE_ID'))
OSS_HOST = os.environ.get('ALI_DIKU_OSS_HOST')
oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
def get_json(filePath,instance_count, instance_id):
json_cfg=oss_clnt.download_str(cfg.BUCKET,cfg.DATA_PATH)
return json.loads(json_cfg)
def find_task():
is_prime = lambda x: 0 not in [ x%d for d in range(2, int(sqrt(x))+1)]
s, e = get_range(cfg.DATA_START, cfg.DATA_END, INSTANCE_COUNT, INSTANCE_ID)
f = open('result.txt', 'w')
for num in xrange(s, e):
if is_prime(num):
f.write(str(num) + '\n')
f.close()
oss_clnt.upload(cfg.OSS_BUCKET, 'result.txt', cfg.FIND_OUTPUT_PATH%INSTANCE_ID)
return 0
def count_task():
s = ""
for instance_id in range(INSTANCE_COUNT):
instance_result = oss_clnt.download_str(BUCKET, FIND_OUTPUT_PATH%instance_id)
s+=instance_result
oss_clnt.upload_str(cfg.OSS_BUCKET, s, cfg.COUNT_OUTPUT_PATH)
def main():
if TASK_ID == 'Find':
find_task()
else:
count_task()
return 0
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
| -6,602,918,301,452,156,000
| 26.938776
| 85
| 0.647661
| false
| 2.774848
| false
| false
| false
|
boreq/archive_chan
|
archive_chan/management/commands/archive_chan_update.py
|
1
|
2562
|
import datetime, sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import utc
from tendo import singleton
from archive_chan.models import Board, Update
from archive_chan.lib.scraper import BoardScraper
from archive_chan.settings import AppSettings
class Command(BaseCommand):
args = ''
help = 'Scraps threads from all active boards. This command should be run periodically to download new threads, posts and images.'
option_list = BaseCommand.option_list + (
make_option(
'--progress',
action="store_true",
dest='progress',
help='Display progress.',
),
)
def handle(self, *args, **options):
# Prevent multiple instances. Apparently fcntl.lockf is very useful and does completely nothing.
me = singleton.SingleInstance()
boards = Board.objects.filter(active=True)
# Show progress?
if options['progress']:
progress = True
else:
progress = False
# Get new data for each board.
for board in boards:
# Info.
processing_start = datetime.datetime.utcnow().replace(tzinfo=utc)
update = Update.objects.create(board=board, start=processing_start, used_threads = AppSettings.get('SCRAPER_THREADS_NUMBER'))
try:
# Actual update.
scraper = BoardScraper(board, progress=progress)
scraper.update()
# Info.
update.status = Update.COMPLETED
except Exception as e:
sys.stderr.write('%s\n' % (e))
finally:
# Info.
try:
if update.status != Update.COMPLETED:
update.status = Update.FAILED
processing_end = datetime.datetime.utcnow().replace(tzinfo=utc)
processing_time = processing_end - processing_start
update.end = processing_end
update = scraper.stats.add_to_record(update, processing_time)
except Exception as e:
sys.stderr.write('%s\n' % (e))
finally:
update.save()
# Everything below is just info.
print('%s Board: %s %s' % (
datetime.datetime.now(),
board,
scraper.stats.get_text(processing_time),
))
|
gpl-2.0
| -655,112,818,715,229,200
| 32.272727
| 137
| 0.559329
| false
| 4.726937
| false
| false
| false
|
maxogden/Locker
|
Connectors/XMPP/webservice.py
|
1
|
2723
|
import sys
import json
import logging
from flask import Flask, render_template, request, redirect, url_for
sys.path.append("../../Common/python")
import lockerfs
import client
import util
app = Flask(__name__)
@app.route("/setupAuth")
def setupAuth():
return render_template("setupAuth.html")
@app.route("/save", methods=['POST'])
def saveAuth():
logging.info("Saving auth")
secrets = lockerfs.loadJsonFile("secrets.json")
secrets["jid"] = request.form["jid"]
secrets["password"] = request.form["password"]
lockerfs.saveJsonFile("secrets.json", secrets)
start()
return json.dumps("started")
def start():
logging.info("Starting")
secrets = lockerfs.loadJsonFile("secrets.json")
app.client = client.Client(app.info, jid=secrets["jid"], password=secrets["password"])
if app.client.connect():
app.client.process(threaded=True)
app.started = True
else:
util.die("XMPP connection failed")
@app.route("/")
def index():
if app.started:
return json.dumps({
"/messages" : "All messages received. Filter by: body, from, mucnick, mucroom, to, type, id, subject",
"/statuses" : "All status updates received. Filter by: status, from, show, priority, type, id",
"/roster" : "Current roster (at time of login)"
})
else:
return redirect(url_for("setupAuth"))
def matches_arg(value, arg):
# either a literal match or a range [lo,hi]
if type(arg) is list and len(arg) is 2:
(lo, hi) = arg
return (lo <= value) and (value < hi)
else:
return (value == arg)
@app.route("/messages")
def messages():
messages = app.client.messages
for key, value in request.args.items():
messages = [msg for msg in messages if matches_arg(msg[key], json.loads(value))]
return json.dumps(messages)
@app.route("/statuses")
def statuses():
statuses = app.client.statuses
for key, value in request.args.items():
statuses = [sts for sts in statuses if matches_arg(sts[key], json.loads(value))]
return json.dumps(statuses)
@app.route("/roster")
def roster():
return json.dumps(app.client.fetch_roster())
def runService(info):
app.info = info
app.client = None
app.started = False
secrets = lockerfs.loadJsonFile("secrets.json")
if "jid" in secrets and "password" in secrets:
start()
else:
logging.info("No auth details available")
app.debug = True
app.run(port=app.info["port"], use_reloader=False)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(levelname)-8s %(message)s')
runService({"port": 7474})
|
bsd-3-clause
| -8,044,905,973,466,973,000
| 28.597826
| 118
| 0.632758
| false
| 3.621011
| false
| false
| false
|
doingmathwithpython/code
|
chapter3/solutions/stats.py
|
1
|
1385
|
'''
stats.py
Python module with functions for calculating common statistical measures
'''
from collections import Counter
def mean(numbers):
s = sum(numbers)
N = len(numbers)
mean = s/N
return mean
def median(numbers):
# find the numnber of items
N = len(numbers)
# sort the list in ascending order
numbers = sorted(numbers)
# find the median
if N % 2 == 0:
# if N is even
m1 = N/2
m2 = (N/2) + 1
# convert to integer, match position
m1 = int(m1) - 1
m2 = int(m2) - 1
median = (numbers[m1] + numbers[m2])/2
else:
m = (N+1)/2
# convert to integer, match position
m = int(m) - 1
median = numbers[m]
return median
def mode(numbers):
c = Counter(numbers)
mode = c.most_common(1)
return mode[0][0]
def find_differences(numbers):
m = mean(numbers)
# find the differences from the mean
diff = []
for num in numbers:
diff.append(num-m)
return diff
def variance_sd(numbers):
# find the list of differences
diff = find_differences(numbers)
# find the squared differences
squared_diff = []
for d in diff:
squared_diff.append(d**2)
# find the variance
sum_squared_diff = sum(squared_diff)
variance = sum_squared_diff/len(numbers)
return variance, variance**0.5
|
mit
| 209,777,365,508,709,760
| 20.640625
| 72
| 0.59278
| false
| 3.524173
| false
| false
| false
|
jejimenez/invetronic
|
templated_docs_adecuated/templatetags/templated_docs_tags.py
|
1
|
2274
|
# --coding: utf8--
import os.path
from django.db.models.fields.files import ImageFieldFile
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django import template
register = template.Library()
PIXEL_TO_CM = 0.00846666
class ImageNode(template.Node):
def __init__(self, value):
self.value = template.Variable(value)
def render(self, context):
try:
self.value = self.value.resolve(context)
if not isinstance(self.value, ImageFieldFile):
raise template.VariableDoesNotExist(
'Image argument should be an ImageField')
images = context.dicts[0].setdefault('ootemplate_imgs', {})
id = len(images)
z_index = id + 3 # Magic
width = self.value.width * PIXEL_TO_CM
height = self.value.height * PIXEL_TO_CM
filename = os.path.basename(self.value.name)
basename = os.path.splitext(filename)[0]
images[self.value.path] = self.value
img_frame = '<draw:frame draw:style-name="gr%(z_index)s" ' \
'draw:name="%(basename)s" ' \
'draw:id="id%(id)s" ' \
'text:anchor-type="paragraph" svg:width="%(width)fcm" ' \
'svg:height="%(height)fcm" draw:z-index="%(z_index)s">' \
'<draw:image xlink:href="Pictures/%(filename)s" ' \
'xlink:type="simple" xlink:show="embed" ' \
'xlink:actuate="onLoad"/></draw:frame>'
return (img_frame) % locals()
except template.VariableDoesNotExist:
return ''
@register.tag
def image(parser, token):
"""
Insert an image from a ImageField into a document.
"""
try:
tag_name, value = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires a file as an argument' % tag_name)
return ImageNode(value)
@register.filter
def lolinebreaks(value):
"""
LibreOffice-flavored ``linebreaks`` filter.
"""
if not value:
return ''
paragraphs = [line for line in escape(value).splitlines()]
return mark_safe('<text:line-break/>'.join(paragraphs))
|
mit
| 3,747,650,145,073,412,600
| 31.956522
| 77
| 0.583553
| false
| 3.880546
| false
| false
| false
|
haxandsnax/AnkhUtils
|
AnkhUtils/__init__.py
|
1
|
4831
|
import json
import os
import codecs
import math
#---------------------------------------
# inject decorator inserts the Utils object so it can be
# used directly in your Execute, Init functions etc.
#---------------------------------------
def inject(util):
def injectfn(fn):
def wrapped(*args, **kwargs):
util.SetData(fn.__globals__.get('Parent'), args, kwargs)
fn.__globals__['Utils'] = util
return fn(*args, **kwargs)
return wrapped
return injectfn
#---------------------------------------
# Call this to create the initial Utils object
#---------------------------------------
def setup(script, command):
return UtilClass(script, command)
#---------------------------------------
# Add functions to this class to expand functionality
#---------------------------------------
class UtilClass:
def __init__(self, scriptname, commandnames):
self.ScriptName = scriptname
if isinstance(commandnames, basestring):
self.CommandNames = [commandnames.lower()]
else:
self.CommandNames = map(lambda x: x.lower(), commandnames)
self.Settings = dict()
self.Data = None
# Called when injected into Execute, Init etc
# Extracts Data object from parameter if it exists, such as in Execute
def SetData(self, Parent, args, kwargs):
self.Parent = Parent
for arg in args:
try:
if 'User' in dir(arg):
self.Data = arg
except Exception as e:
self.Log('[AnkhUtils] Unable to set data object. Error: {0}'.format(str(e)))
def ProcessCommand(self):
# No data, so it's not a command
if self.Data is None:
return
if not self.Data.IsChatMessage() or self.Data.GetParamCount() == 0:
return
match = None
command = self.Data.GetParam(0).lower()
for name in self.CommandNames:
if command == name:
match = command
break
if not match:
return
params = [self.Data.GetParam(i) for i in range(1, self.Data.GetParamCount())]
return CommandMatch(self.Data.User, match, self.CommandNames, params)
# Logging with string formatting. Also keeps you from having to add
# ScriptName parameter every time
# Usage: Utils.Log('{0} + {0} = {1}', 2, 4)
def Log(self, str, *args):
if len(args) > 0:
try:
self.Parent.Log(self.ScriptName, str.format(*args))
except Exception as e:
self.Parent.Log(self.ScriptName, '[AnkhUtils] Invalid format string or parameters for Utils.Log')
else:
self.Parent.Log(self.ScriptName, str)
# Allows you to set the settings object directly.
def SetSettings(self, data):
self.Settings = json.loads(data)
# Loads settings from a file. Pass __file__ from your script
# to load relative to your script. Optionally override the filename
def ReloadSettings(self, base, filename='settings.json'):
try:
with codecs.open(os.path.join(os.path.dirname(base), filename), encoding='utf-8-sig') as jsonData:
self.SetSettings(jsonData.read())
return self.Settings
except Exception as e:
self.Log('[AnkhUtils] Error loading {0}: {1}'.format(filename, str(e)))
return
# Helper to get pretty formatted cooldown text from Seconds remaining
def CooldownText(self, cd, seconds=True, minutes=True, hours=True):
h = int(math.floor(cd/3600))
m = int(math.floor((cd%3600)/60))
s = cd % 60
hourtext = '{0} hour{1}'.format(h, '' if h == 1 else 's') if hours and h > 0 else ''
minutetext = '{0} minute{1}'.format(m, '' if m == 1 else 's') if minutes and m > 0 else ''
secondtext = '{0} second{1}'.format(s, '' if s == 1 else 's') if seconds and s > 0 else ''
if hours and h > 0 and minutes and m > 0:
minutetext = ' '+minutetext
if seconds and s > 0 and ((minutes and m > 0) or (hours and h > 0)):
secondtext = ' '+secondtext
return '{0}{1}{2}'.format(hourtext, minutetext, secondtext)
# Sends a Twitch or Discord chat message or a whisper/DM depending on where the
# initiating message came from
def ChatOrWhisper(self, msg, discord=True, whisper=True):
if self.Data is None:
self.Parent.SendTwitchMessage(msg)
return
whisper = whisper and self.Data.IsWhisper()
if self.Data.IsFromTwitch():
self.Parent.SendTwitchWhisper(self.Data.User, msg) if whisper else self.Parent.SendTwitchMessage(msg)
elif discord and self.Data.IsFromDiscord():
self.Parent.SendDiscordDM(self.Data.User, msg) if whisper else self.Parent.SendDiscordMessage(msg)
# Parsed commands object for use in ProcessCommand method
class CommandMatch:
def __init__(self, user, matched, commandnames, params):
self.CommandNames = commandnames
self.MatchedCommand = matched
self.Params = params
self.User = user
self.Target = self.Target = params[0] if len(params) > 0 else None
|
mit
| 618,498,824,165,726,100
| 35.598485
| 107
| 0.634651
| false
| 3.707598
| false
| false
| false
|
hjweide/cifar-10-uncertainty
|
iter_funcs.py
|
1
|
2875
|
import lasagne
import theano
import theano.tensor as T
from lasagne import layers
from lasagne.regularization import regularize_network_params, l2
def create_iter_funcs_train(l_out, lr, mntm, wd):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
y_hat = layers.get_output(l_out, X, deterministic=False)
# softmax loss
train_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
# L2 regularization
train_loss += wd * regularize_network_params(l_out, l2)
train_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
all_params = layers.get_all_params(l_out, trainable=True)
updates = lasagne.updates.nesterov_momentum(
train_loss, all_params, lr, mntm)
train_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[train_loss, train_acc],
updates=updates,
givens={
X: X_batch,
y: y_batch,
},
)
return train_iter
def create_iter_funcs_valid(l_out, bs=None, N=50, mc_dropout=False):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
if not mc_dropout:
y_hat = layers.get_output(l_out, X, deterministic=True)
else:
if bs is None:
raise ValueError('a fixed batch size is required for mc dropout')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
valid_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
valid_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
valid_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[valid_loss, valid_acc],
givens={
X: X_batch,
y: y_batch,
},
)
return valid_iter
def create_iter_funcs_test(l_out, bs, N=50):
X = T.tensor4('X')
X_batch = T.tensor4('X_batch')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
# the number of splits needs to be pre-defined
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
#y_var = T.var(y_sample_split, axis=1)
test_iter = theano.function(
inputs=[theano.Param(X_batch)],
outputs=y_hat,
#outputs=[y_hat, y_var],
givens={
X: X_batch,
},
)
return test_iter
|
mit
| 9,025,105,275,118,694,000
| 26.644231
| 77
| 0.582609
| false
| 3.029505
| false
| false
| false
|
EDUlib/edx-ora2
|
openassessment/assessment/migrations/0023_assign_criteria_and_option_labels.py
|
1
|
14308
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
Default the criterion and option labels to the same value as "name".
"""
for criterion in orm['assessment.criterion'].objects.filter(label=""):
criterion.label = criterion.name
criterion.save()
for option in orm['assessment.criterionoption'].objects.filter(label=""):
option.label = option.name
option.save()
def backwards(self, orm):
""" The backwards migration does nothing. """
pass
models = {
'assessment.aiclassifier': {
'Meta': {'object_name': 'AIClassifier'},
'classifier_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classifiers'", 'to': "orm['assessment.AIClassifierSet']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'assessment.aiclassifierset': {
'Meta': {'ordering': "['-created_at', '-id']", 'object_name': 'AIClassifierSet'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"})
},
'assessment.aigradingworkflow': {
'Meta': {'object_name': 'AIGradingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.Assessment']"}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'essay_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Rubric']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.aitrainingworkflow': {
'Meta': {'object_name': 'AITrainingWorkflow'},
'algorithm_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'classifier_set': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.AIClassifierSet']"}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'training_examples': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'+'", 'symmetrical': 'False', 'to': "orm['assessment.TrainingExample']"}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'assessment.assessment': {
'Meta': {'ordering': "['-scored_at', '-id']", 'object_name': 'Assessment'},
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"}),
'score_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'scored_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'scorer_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedback': {
'Meta': {'object_name': 'AssessmentFeedback'},
'assessments': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.Assessment']"}),
'feedback_text': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'related_name': "'assessment_feedback'", 'symmetrical': 'False', 'to': "orm['assessment.AssessmentFeedbackOption']"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.assessmentfeedbackoption': {
'Meta': {'object_name': 'AssessmentFeedbackOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'assessment.assessmentpart': {
'Meta': {'object_name': 'AssessmentPart'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['assessment.Assessment']"}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['assessment.Criterion']"}),
'feedback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['assessment.CriterionOption']"})
},
'assessment.criterion': {
'Meta': {'ordering': "['rubric', 'order_num']", 'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'prompt': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'criteria'", 'to': "orm['assessment.Rubric']"})
},
'assessment.criterionoption': {
'Meta': {'ordering': "['criterion', 'order_num']", 'object_name': 'CriterionOption'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['assessment.Criterion']"}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'assessment.peerworkflow': {
'Meta': {'ordering': "['created_at', 'id']", 'object_name': 'PeerWorkflow'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'grading_completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.peerworkflowitem': {
'Meta': {'ordering': "['started_at', 'id']", 'object_name': 'PeerWorkflowItem'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Assessment']", 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded_by'", 'to': "orm['assessment.PeerWorkflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scorer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'graded'", 'to': "orm['assessment.PeerWorkflow']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'assessment.rubric': {
'Meta': {'object_name': 'Rubric'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'assessment.studenttrainingworkflow': {
'Meta': {'object_name': 'StudentTrainingWorkflow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'assessment.studenttrainingworkflowitem': {
'Meta': {'ordering': "['workflow', 'order_num']", 'unique_together': "(('workflow', 'order_num'),)", 'object_name': 'StudentTrainingWorkflowItem'},
'completed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'training_example': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.TrainingExample']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['assessment.StudentTrainingWorkflow']"})
},
'assessment.trainingexample': {
'Meta': {'object_name': 'TrainingExample'},
'content_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options_selected': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['assessment.CriterionOption']", 'symmetrical': 'False'}),
'raw_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessment.Rubric']"})
}
}
complete_apps = ['assessment']
symmetrical = True
|
agpl-3.0
| 5,719,073,179,449,297,000
| 79.836158
| 217
| 0.563042
| false
| 3.695248
| false
| false
| false
|
npapier/sbf
|
pak/mkdb/cityhash.py
|
1
|
3317
|
# SConsBuildFramework - Copyright (C) 2013, Nicolas Papier.
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation.
# Author Guillaume Brocker
#
# http://code.google.com/p/cityhash/
import os
import re
import shutil
import subprocess
# Version components definition
versionMajor = 1
versionMinor = 1
versionMaint = 0
# Package name and version definition
packageName = 'cityhash'
packageVersion = '{0}-{1}-{2}'.format(versionMajor, versionMinor, versionMaint)
# Defines the path to the source files
sourcePath = '{0}-{1}.{2}.{3}'.format(packageName, versionMajor, versionMinor, versionMaint )
# Defines the content of the SBF project file.
sconsDefaultOptions = """productName = '{name}'
type = 'static'
version = '{version}'""".format( name=packageName, version=packageVersion)
def patcher():
def _patcher():
global io
global os
global packageName
global re
global sconsDefaultOptions
global shutil
global sourcePath
# Creates a new directory tree for the compilation.
os.makedirs(packageName+'/include')
os.makedirs(packageName+'/src')
# Creates the SBF project options file
sconsDefaultoptionsFile = open( packageName+'/default.options', 'w' )
sconsDefaultoptionsFile.write( sconsDefaultOptions )
sconsDefaultoptionsFile.close()
# Copy the sconstruct file into the project
shutil.copy( os.getenv('SCONS_BUILD_FRAMEWORK')+'/template/projectTemplate/sconstruct', packageName )
# Moves include and source files to the right place.
shutil.move( sourcePath+'/src/city.h', packageName+'/include' )
###Deactivated###
#shutil.move( srcPath+'/citycrc.h', includePath )
shutil.move( sourcePath+'/src/city.cc', packageName+'/src/city.cpp' )
# Patches the 'city.h' file
with open( packageName+'/include/city.h', 'r+' ) as city_h:
city_h_lines = city_h.readlines()
for (i, line) in enumerate(city_h_lines):
city_h_lines[i] = re.sub('^(uint\d+)', 'extern "C" \\1', line)
city_h.seek(0)
city_h.writelines(city_h_lines)
# Patches the city.cpp file
with open( packageName+'/src/city.cpp', 'r+' ) as city_cpp:
city_cpp_lines = city_cpp.readlines()
for (i, line) in enumerate(city_cpp_lines):
if( re.match('^#include "config.h"', line) ):
city_cpp_lines[i] = '//' + line
city_cpp.seek(0)
city_cpp.writelines(city_cpp_lines)
return lambda : _patcher()
def builder():
def _builder():
global os
global packageName
global subprocess
owd = os.getcwd()
nwd = owd + '/' + packageName
os.chdir(nwd)
installPaths = 'installPaths={0}/{1}/local'.format(owd, packageName)
subprocess.call(['scons',installPaths,'release'], shell=True)
subprocess.call(['scons',installPaths,'debug'], shell=True)
os.chdir(owd)
return lambda : _builder()
descriptor = {
'name' : packageName,
'version' : packageVersion,
'urls' : [ 'http://cityhash.googlecode.com/files/{0}-{1}.{2}.{3}.tar.gz'.format(packageName, versionMajor, versionMinor, versionMaint) ],
'include' : [ packageName+'/local/include/*.h' ],
'license' : [ sourcePath+'/COPYING' ],
'lib' : [ packageName+'/local/bin/*.lib' ],
'builds' : [ patcher(), builder() ]
}
|
gpl-3.0
| 2,374,909,519,164,992,500
| 29.292453
| 139
| 0.669279
| false
| 3.180249
| false
| false
| false
|
desihub/qlf
|
backend/framework/qlf/dashboard/bokeh/qaskypeak/main.py
|
1
|
6438
|
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Span
from bokeh.models import LinearColorMapper
from bokeh.models import TapTool, OpenURL, Range1d
from bokeh.models.widgets import Div
from qlf_models import QLFModels
from dashboard.bokeh.helper import sort_obj
from dashboard.bokeh.plots.descriptors.table import Table
from dashboard.bokeh.plots.descriptors.title import Title
from dashboard.bokeh.plots.plot2d.main import Plot2d
from dashboard.bokeh.helper import get_palette
import numpy as np
from bokeh.resources import CDN
from bokeh.embed import file_html
class Skypeak:
def __init__(self, process_id, arm, spectrograph):
self.selected_process_id = process_id
self.selected_arm = arm
self.selected_spectrograph = spectrograph
def load_qa(self):
cam = self.selected_arm+str(self.selected_spectrograph)
mergedqa = QLFModels().get_output(self.selected_process_id, cam)
check_spectra = mergedqa['TASKS']['CHECK_SPECTRA']
gen_info = mergedqa['GENERAL_INFO']
ra = gen_info['RA']
dec = gen_info['DEC']
nrg = check_spectra['PARAMS']['PEAKCOUNT_NORMAL_RANGE']
wrg = check_spectra['PARAMS']['PEAKCOUNT_WARN_RANGE']
current_exposures = [check_spectra['METRICS']['PEAKCOUNT']]
program = gen_info['PROGRAM'].upper()
reference_exposures = check_spectra['PARAMS']['PEAKCOUNT_' +
program + '_REF']
obj_type = sort_obj(gen_info)
my_palette = get_palette("RdYlBu_r")
peak_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">PEAKCOUNT: </span>
<span style="font-size: 1vw; color: #515151">@peakcount_fib</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1vw; color: #515151;">@x1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DEC: </span>
<span style="font-size: 1vw; color: #515151;">@y1</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Obj Type: </span>
<span style="font-size: 1vw; color: #515151;">@OBJ_TYPE</span>
</div>
</div>
"""
url = "http://legacysurvey.org/viewer?ra=@ra&dec=@dec&zoom=16&layer=decals-dr5"
qlf_fiberid = np.arange(0, 500)
peak_hover = HoverTool(tooltips=peak_tooltip)
peakcount_fib = check_spectra['METRICS']['PEAKCOUNT_FIB']
source = ColumnDataSource(data={
'x1': ra,
'y1': dec,
'peakcount_fib': peakcount_fib,
'delta_peakcount_fib': np.array(peakcount_fib)-reference_exposures,
'QLF_FIBERID': qlf_fiberid,
'OBJ_TYPE': obj_type,
})
low, high = wrg
mapper = LinearColorMapper(palette=my_palette,
low=low, #0.98*np.min(peakcount_fib),
high=high, #1.02*np.max(peakcount_fib))
nan_color='darkgrey')
radius = 0.0165
radius_hover = 0.02
# centralize wedges in plots:
ra_center=0.5*(max(ra)+min(ra))
dec_center=0.5*(max(dec)+min(dec))
xrange_wedge = Range1d(start=ra_center + .95, end=ra_center-.95)
yrange_wedge = Range1d(start=dec_center+.82, end=dec_center-.82)
# axes limit
xmin, xmax = [min(gen_info['RA'][:]), max(gen_info['RA'][:])]
ymin, ymax = [min(gen_info['DEC'][:]), max(gen_info['DEC'][:])]
xfac, yfac = [(xmax-xmin)*0.06, (ymax-ymin)*0.06]
left, right = xmin - xfac, xmax+xfac
bottom, top = ymin-yfac, ymax+yfac
wedge_plot = Plot2d(
x_range=xrange_wedge,
y_range=yrange_wedge,
x_label="RA",
y_label="DEC",
tooltip=peak_tooltip,
title="PEAKCOUNT",
width=500,
height=380,
).wedge(
source,
x='x1',
y='y1',
field='delta_peakcount_fib',
mapper=mapper,
).plot
info_col = Title().write_description('skypeak')
# ================================
# histogram
hist_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Frequency: </span>
<span style="font-size: 1vw; color: #515151">@hist</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Peakcount: </span>
<span style="font-size: 1vw; color: #515151;">[@left, @right]</span>
</div>
</div>
"""
hist, edges = np.histogram(peakcount_fib, bins="sqrt")
source_hist = ColumnDataSource(data={
'hist': hist,
'histplusone': hist+1,
'bottom': [0] * len(hist),
'bottomplusone': [1]*len(hist),
'left': edges[:-1],
'right': edges[1:]
})
p_hist = Plot2d(
y_range=(1, 11**(int(np.log10(max(hist)))+1)),
x_label='PEAKCOUNT',
y_label='Frequency + 1',
tooltip=hist_tooltip,
title="",
width=550,
height=300,
yscale="log",
hover_mode="vline",
).quad(
source_hist,
top='histplusone',
bottom='bottomplusone',
line_width=1,
)
# Prepare tables
keynames = ["PEAKCOUNT" for i in range(len(current_exposures))]
table = Table().single_table(keynames, current_exposures, reference_exposures, nrg, wrg)
layout = column(info_col, Div(),
table, Div(),
column(wedge_plot, sizing_mode='scale_both'),
column(p_hist, sizing_mode='scale_both'),
css_classes=["display-grid"])
return file_html(layout, CDN, "SKYPEAK")
|
bsd-3-clause
| -3,194,120,361,751,438,300
| 35.168539
| 103
| 0.517552
| false
| 3.592634
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/bgp_settings.py
|
1
|
1394
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BgpSettings(Model):
"""BgpSettings.
:param asn: Gets or sets this BGP speaker's ASN
:type asn: long
:param bgp_peering_address: Gets or sets the BGP peering address and BGP
identifier of this BGP speaker
:type bgp_peering_address: str
:param peer_weight: Gets or sets the weight added to routes learned from
this BGP speaker
:type peer_weight: int
"""
_attribute_map = {
'asn': {'key': 'asn', 'type': 'long'},
'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'},
'peer_weight': {'key': 'peerWeight', 'type': 'int'},
}
def __init__(self, **kwargs):
super(BgpSettings, self).__init__(**kwargs)
self.asn = kwargs.get('asn', None)
self.bgp_peering_address = kwargs.get('bgp_peering_address', None)
self.peer_weight = kwargs.get('peer_weight', None)
|
mit
| -7,371,447,792,608,689,000
| 35.684211
| 76
| 0.586083
| false
| 4.076023
| false
| false
| false
|
Spoken-tutorial/spoken-website
|
creation/migrations/0001_initial.py
|
1
|
29727
|
# -*- coding: utf-8 -*-
# Third Party Stuff
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdminReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='AdminReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ArchivedVideo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.PositiveSmallIntegerField(default=0)),
('video', models.CharField(max_length=255)),
('atype', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Collaborate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contact_number', models.CharField(max_length=20, null=True)),
('institution_name', models.CharField(max_length=255)),
('foss_name', models.CharField(max_length=255)),
('are_you_one', models.CharField(max_length=255)),
('howmuch_time', models.PositiveIntegerField()),
('availability_constraints', models.TextField(null=True, blank=True)),
('is_reviewer', models.BooleanField()),
('contribs_foss', models.TextField(null=True, blank=True)),
('educational_qualifications', models.TextField(null=True, blank=True)),
('prof_experience', models.CharField(max_length=255, null=True, blank=True)),
('lang_contributor', models.BooleanField()),
('lead_st', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributeTowards',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='ContributorLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributorNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ContributorRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Contributor Role',
},
),
migrations.CreateModel(
name='DomainReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='DomainReviewerRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Domain Reviewer Role',
},
),
migrations.CreateModel(
name='DomainReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossAvailableForTest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossAvailableForWorkshop',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FossCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('foss', models.CharField(unique=True, max_length=255)),
('description', models.TextField()),
('status', models.BooleanField(max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('foss',),
'verbose_name': 'FOSS Categorie',
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('code', models.CharField(default=b'en', max_length=10)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Level',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(max_length=255)),
('code', models.CharField(max_length=10)),
],
options={
'verbose_name': 'Tutorial Level',
},
),
migrations.CreateModel(
name='NeedImprovementLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('review_state', models.PositiveSmallIntegerField()),
('component', models.CharField(max_length=50)),
('comment', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='PlaylistInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('playlist_id', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss', models.ForeignKey(to='creation.FossCategory')),
('language', models.ForeignKey(to='creation.Language')),
],
options={
'verbose_name': 'Playlist Info',
},
),
migrations.CreateModel(
name='PlaylistItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('item_id', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('playlist', models.ForeignKey(to='creation.PlaylistInfo')),
],
options={
'verbose_name': 'Playlist Item',
},
),
migrations.CreateModel(
name='PublicReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PublishTutorialLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='QualityReviewerNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='QualityReviewerRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss_category', models.ForeignKey(to='creation.FossCategory')),
('language', models.ForeignKey(to='creation.Language')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Quality Reviewer Role',
},
),
migrations.CreateModel(
name='QualityReviewLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.CharField(max_length=255)),
('status', models.PositiveSmallIntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='RoleRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_type', models.IntegerField(default=0)),
('status', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('approved_user', models.ForeignKey(related_name='approved_user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('user', models.ForeignKey(related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SuggestExample',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('topic_title', models.CharField(max_length=255)),
('example_description', models.TextField()),
('script_writer', models.BooleanField()),
('is_reviewer', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SuggestTopic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('topic_title', models.CharField(max_length=255)),
('brief_description', models.TextField()),
('example_suggestion', models.BooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('difficulty_level', models.ForeignKey(to='creation.Level')),
('operating_system', models.ManyToManyField(to='creation.OperatingSystem')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TutorialCommonContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slide', models.CharField(max_length=255)),
('slide_status', models.PositiveSmallIntegerField(default=0)),
('code', models.CharField(max_length=255)),
('code_status', models.PositiveSmallIntegerField(default=0)),
('assignment', models.CharField(max_length=255)),
('assignment_status', models.PositiveSmallIntegerField(default=0)),
('prerequisite_status', models.PositiveSmallIntegerField(default=0)),
('keyword', models.TextField()),
('keyword_status', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('assignment_user', models.ForeignKey(related_name='assignments', to=settings.AUTH_USER_MODEL)),
('code_user', models.ForeignKey(related_name='codes', to=settings.AUTH_USER_MODEL)),
('keyword_user', models.ForeignKey(related_name='keywords', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Tutorial Common Content',
},
),
migrations.CreateModel(
name='TutorialDetail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tutorial', models.CharField(max_length=255)),
('order', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('foss', models.ForeignKey(to='creation.FossCategory')),
('level', models.ForeignKey(to='creation.Level')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Tutorial Detail',
},
),
migrations.CreateModel(
name='TutorialMissingComponent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('component', models.PositiveSmallIntegerField()),
('report_type', models.BooleanField(default=0)),
('remarks', models.TextField(null=True, blank=True)),
('inform_me', models.BooleanField(default=0)),
('email', models.CharField(max_length=255, null=True, blank=True)),
('reply_status', models.BooleanField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='TutorialMissingComponentReply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reply_message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('missing_component', models.ForeignKey(to='creation.TutorialMissingComponent')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TutorialResource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('outline', models.TextField()),
('outline_status', models.PositiveSmallIntegerField(default=0)),
('script', models.URLField(max_length=255)),
('script_status', models.PositiveSmallIntegerField(default=0)),
('timed_script', models.URLField(max_length=255)),
('video', models.CharField(max_length=255)),
('video_id', models.CharField(default=None, max_length=255, null=True, blank=True)),
('playlist_item_id', models.CharField(default=None, max_length=255, null=True, blank=True)),
('video_thumbnail_time', models.TimeField(default='00:00:00')),
('video_status', models.PositiveSmallIntegerField(default=0)),
('status', models.PositiveSmallIntegerField(default=0)),
('version', models.PositiveSmallIntegerField(default=0)),
('hit_count', models.PositiveIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('common_content', models.ForeignKey(to='creation.TutorialCommonContent')),
('language', models.ForeignKey(to='creation.Language')),
('outline_user', models.ForeignKey(related_name='outlines', to=settings.AUTH_USER_MODEL)),
('script_user', models.ForeignKey(related_name='scripts', to=settings.AUTH_USER_MODEL)),
('tutorial_detail', models.ForeignKey(to='creation.TutorialDetail')),
('video_user', models.ForeignKey(related_name='videos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='tutorialmissingcomponent',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='tutorialmissingcomponent',
name='user',
field=models.ForeignKey(related_name='raised_user', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='prerequisite',
field=models.ForeignKey(related_name='prerequisite', blank=True, to='creation.TutorialDetail', null=True),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='prerequisite_user',
field=models.ForeignKey(related_name='prerequisite', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='slide_user',
field=models.ForeignKey(related_name='slides', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tutorialcommoncontent',
name='tutorial_detail',
field=models.OneToOneField(related_name='tutorial_detail', to='creation.TutorialDetail'),
),
migrations.AddField(
model_name='qualityreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='qualityreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='qualityreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='qualityreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='publishtutoriallog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='publishtutoriallog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='publicreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='publicreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='needimprovementlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='needimprovementlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='fossavailableforworkshop',
name='foss',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='fossavailableforworkshop',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='fossavailablefortest',
name='foss',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='fossavailablefortest',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='domainreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='domainreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='domainreviewerrole',
name='foss_category',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='domainreviewerrole',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='domainreviewerrole',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='domainreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='domainreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributorrole',
name='foss_category',
field=models.ForeignKey(to='creation.FossCategory'),
),
migrations.AddField(
model_name='contributorrole',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='contributorrole',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributornotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='contributornotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='contributorlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='contributorlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='collaborate',
name='contribute_towards',
field=models.ManyToManyField(to='creation.ContributeTowards'),
),
migrations.AddField(
model_name='collaborate',
name='language',
field=models.ForeignKey(to='creation.Language'),
),
migrations.AddField(
model_name='collaborate',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='archivedvideo',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='archivedvideo',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='adminreviewlog',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='adminreviewlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='adminreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(to='creation.TutorialResource'),
),
migrations.AddField(
model_name='adminreviewernotification',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='tutorialresource',
unique_together=set([('tutorial_detail', 'language')]),
),
migrations.AlterUniqueTogether(
name='tutorialdetail',
unique_together=set([('foss', 'tutorial', 'level')]),
),
migrations.AlterUniqueTogether(
name='rolerequest',
unique_together=set([('user', 'role_type')]),
),
migrations.AlterUniqueTogether(
name='qualityreviewerrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
migrations.AlterUniqueTogether(
name='playlistitem',
unique_together=set([('playlist', 'item_id')]),
),
migrations.AlterUniqueTogether(
name='playlistinfo',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='fossavailableforworkshop',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='fossavailablefortest',
unique_together=set([('foss', 'language')]),
),
migrations.AlterUniqueTogether(
name='domainreviewerrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
migrations.AlterUniqueTogether(
name='contributorrole',
unique_together=set([('user', 'foss_category', 'language')]),
),
]
|
gpl-3.0
| 8,895,247,861,478,283,000
| 44.040909
| 135
| 0.552192
| false
| 4.713334
| false
| false
| false
|
sh01/taf
|
setup.py
|
1
|
1102
|
#!/usr/bin/env python3
#Copyright 20015 Sebastian Hagen
# This file is part of taf.
# taf is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation
#
# taf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from distutils.core import setup
if (sys.version_info[0] <= 2):
raise Exception('This program needs a python >= 3.0')
setup(name='taf',
version='0.1',
description='TAF: Tunneled attention flags.',
author='Sebastian Hagen',
author_email='sebastian_hagen@memespace.net',
#url='http://git.memespace.net/git/??',
packages=('taf',),
scripts=(
'src/bin/logs2stdout.py',
'src/bin/taf_ui.py'
),
package_dir={'taf':'src/taf'}
)
|
gpl-2.0
| -1,341,954,545,255,813,000
| 29.611111
| 71
| 0.715064
| false
| 3.339394
| false
| false
| false
|
jeremiedecock/snippets
|
python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_dateedit_widget.py
|
1
|
4596
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
import datetime
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QDateEdit
DATETIME_FORMAT = '%Y-%m-%d'
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[datetime.datetime.now().strftime(DATETIME_FORMAT) for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QDateEdit(parent=parent)
editor.setMinimumDate(datetime.datetime(year=2017, month=9, day=1))
editor.setMaximumDate(datetime.datetime(year=2020, month=9, day=1))
editor.setDisplayFormat("yyyy-MM-dd")
editor.setCalendarPopup(True)
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
return editor
def setEditorData(self, editor, index):
str_value = index.data(Qt.EditRole) # equivalent of value = index.model().data(index, Qt.EditRole)
value = datetime.datetime.strptime(str_value, DATETIME_FORMAT)
editor.setDate(value.date()) # value cannot be a string, it have to be a datetime...
def setModelData(self, editor, model, index):
editor.interpretText()
value = editor.text()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
mit
| 1,544,970,414,205,038,800
| 34.353846
| 139
| 0.625762
| false
| 3.718447
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.