repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tclim/your
|
urscript/urscript/comm.py
|
1
|
3567
|
""" comm.py module manages Robot communication using sockets.
It contains functions for sending and listening to the robot
"""
import socket
from struct import unpack
PORT_DASH = 29999
PORT = 30002
PORT_RT = 30003
def send_script(ur_program, robot_ip) :
"""Send a script to robot via a socket
Args:
ur_program: Formatted UR Script program to send (string)
robot_ip: IP address of robot (string)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
# add an extra new line
ur_program += '\n'
s.connect((robot_ip, PORT))
s.send(ur_program)
except socket.timeout:
print "Time out connecting to {0} Port:{1}".format(robot_ip,PORT)
except socket.error, e:
print e
s.close()
def stop_program(robot_ip):
""" Pauses a running program by sending a command to the Dashboard
Args:
robot_ip: IP address of robot (string)
"""
send_script('pause', robot_ip,PORT_DASH)
def listen(robot_ip):
"""Returns robot data received through a socket in dictionary format.
Args:
robot_ip: IP address of robot (string)
Returns:
dict_data: A dictionary containing robot data in readable format
"""
data = _receive_data(robot_ip)
dict_data = _format_data(data)
return dict_data
def _receive_data(robot_ip):
"""Receives unformatted data from robot using the realtime interface (Port 30003)
Args:
robot_ip: ip address of robot (string)
Returns:
data: Robot data (byte[])
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(.1)
try:
s.connect((robot_ip, PORT_RT))
except socket.timeout:
print "Time out connecting to {0} Port:{1}".format(robot_ip,PORT_RT)
except socket.error, e:
print e
data = s.recv(1024)
s.close()
return data
def _format_data(data):
"""Formats robot data into dictionary
Received byte array is formatted as a dictionary. For added into on data: see
Args:
data: Raw data from robot (byte[])
Returns:
dict_data: A dictionary containing data in readable format
"""
dict_data = {}
fmt_int = "!i"
#fmt_uInt = "!Q"
fmt_double1 = "!d"
fmt_double3 = "!ddd"
fmt_double6 = "!dddddd"
dict_data["message_length"] = unpack(fmt_int, data[0:4])
dict_data["time"] = unpack(fmt_double1, data[4:12])
dict_data["target_joints_pos"] = unpack(fmt_double6, data[12:60])
dict_data["target_joints_vel"] = unpack(fmt_double6, data[60:108])
dict_data["target_joints_accel"] = unpack(fmt_double6, data[108:156])
dict_data["target_joints_current"] = unpack(fmt_double6, data[156:204])
dict_data["target_joints_torque"] = unpack(fmt_double6, data[204:252])
dict_data["actual_joints_pos"] = unpack(fmt_double6, data[252:300])
dict_data["actual_joints_vel"] = unpack(fmt_double6, data[300:348])
dict_data["actual_joints_current"] = unpack(fmt_double6, data[348:396])
dict_data["xyz_accelerometer"] = unpack(fmt_double3, data[396:420])
dict_data["tcp_force"] = unpack(fmt_double6, data[540:588])
dict_data["tool_pose"] = unpack(fmt_double6, data[588:636])
dict_data["tool_speed"] = unpack(fmt_double6, data[636:684])
#dict_data["digital_input"] = unpack(fmt_double6, data[636:684])
dict_data["joint_temperatures"] = unpack(fmt_double6, data[692:740])
return dict_data
|
mit
| 9,156,183,232,854,302,000
| 32.990196
| 85
| 0.626857
| false
| 3.371456
| false
| false
| false
|
igorgai/django-custom-user
|
custom_user/models.py
|
1
|
4588
|
"""User models."""
import django
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class EmailUserManager(BaseUserManager):
"""Custom manager for EmailUser."""
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""Create and save an EmailUser with the given email and password.
:param str email: user email
:param str password: user password
:param bool is_staff: whether user staff or not
:param bool is_superuser: whether user admin or not
:return custom_user.models.EmailUser user: user
:raise ValueError: email is not set
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
is_active = extra_fields.pop("is_active", True)
user = self.model(email=email, is_staff=is_staff, is_active=is_active,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save an EmailUser with the given email and password.
:param str email: user email
:param str password: user password
:return custom_user.models.EmailUser user: regular user
"""
is_staff = extra_fields.pop("is_staff", False)
return self._create_user(email, password, is_staff, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Create and save an EmailUser with the given email and password.
:param str email: user email
:param str password: user password
:return custom_user.models.EmailUser user: admin user
"""
return self._create_user(email, password, True, True,
**extra_fields)
class AbstractEmailUser(AbstractBaseUser, PermissionsMixin):
"""Abstract User with the same behaviour as Django's default User.
AbstractEmailUser does not have username field. Uses email as the
USERNAME_FIELD for authentication.
Use this if you need to extend EmailUser.
Inherits from both the AbstractBaseUser and PermissionMixin.
The following attributes are inherited from the superclasses:
* password
* last_login
* is_superuser
"""
email = models.EmailField(_('email address'), max_length=255,
unique=True, db_index=True)
is_staff = models.BooleanField(
_('staff status'), default=False, help_text=_(
'Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'), default=True, help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = EmailUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""Return the email."""
return self.email
def get_short_name(self):
"""Return the email."""
return self.email
def email_user(self, subject, message, from_email=None, **kwargs):
"""Send an email to this User."""
send_mail(subject, message, from_email, [self.email], **kwargs)
# Monkey patch Django 1.7 to avoid detecting migrations
if django.VERSION[:2] == (1, 7):
last_login = AbstractEmailUser._meta.get_field('last_login')
last_login.blank = True
last_login.null = True
last_login.default = models.fields.NOT_PROVIDED
groups = AbstractEmailUser._meta.get_field('groups')
groups.help_text = _('The groups this user belongs to. A user will get '
'all permissions granted to each of their groups.')
class EmailUser(AbstractEmailUser):
"""
Concrete class of AbstractEmailUser.
Use this if you don't need to extend EmailUser.
"""
class Meta(AbstractEmailUser.Meta):
swappable = 'AUTH_USER_MODEL'
|
bsd-3-clause
| 7,541,121,175,570,798,000
| 32.985185
| 78
| 0.637533
| false
| 4.299906
| false
| false
| false
|
LevinJ/SSD_tensorflow_VOC
|
exercise/data_generator.py
|
1
|
8416
|
import tensorflow as tf
from datasets import dataset_utils
from datasets import flowers
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
from datasets import download_and_convert_flowers
import numpy as np
from preprocessing import inception_preprocessing
flowers_data_dir = '../../data/flower'
train_dir = '/tmp/tfslim_model/'
print('Will save model to %s' % train_dir)
def display_data():
with tf.Graph().as_default():
dataset = flowers.get_split('train', flowers_data_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32, common_queue_min=1)
image, label = data_provider.get(['image', 'label'])
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
for i in range(4):
np_image, np_label = sess.run([image, label])
height, width, _ = np_image.shape
class_name = name = dataset.labels_to_names[np_label]
plt.figure()
plt.imshow(np_image)
plt.title('%s, %d x %d' % (name, height, width))
plt.axis('off')
plt.show()
return
def download_convert():
dataset_dir = flowers_data_dir
download_and_convert_flowers.run(dataset_dir)
return
def disp_data():
with tf.Graph().as_default():
dataset = flowers.get_split('train', flowers_data_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32, common_queue_min=1)
image, label,format = data_provider.get(['image', 'label', 'format'])
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
for i in range(4):
np_image, np_label,np_format = sess.run([image, label,format])
height, width, _ = np_image.shape
class_name = name = dataset.labels_to_names[np_label]
plt.figure()
plt.imshow(np_image)
plt.title('%s, %d x %d' % (name, height, width))
plt.axis('off')
plt.show()
return
def my_cnn(images, num_classes, is_training): # is_training is not used...
with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
net = slim.conv2d(images, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.conv2d(net, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 192)
net = slim.fully_connected(net, num_classes, activation_fn=None)
return net
def apply_random_image():
with tf.Graph().as_default():
# The model can handle any input size because the first layer is convolutional.
# The size of the model is determined when image_node is first passed into the my_cnn function.
# Once the variables are initialized, the size of all the weight matrices is fixed.
# Because of the fully connected layers, this means that all subsequent images must have the same
# input size as the first image.
batch_size, height, width, channels = 3, 28, 28, 3
images = tf.random_uniform([batch_size, height, width, channels], maxval=1)
# Create the model.
num_classes = 10
logits = my_cnn(images, num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
# Initialize all the variables (including parameters) randomly.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the init_op, evaluate the model outputs and print the results:
sess.run(init_op)
probabilities = sess.run(probabilities)
print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes
print('\nProbabilities:')
print(probabilities)
print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
return
def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):
"""Loads a single batch of data.
Args:
dataset: The dataset to load.
batch_size: The number of images in the batch.
height: The size of each image after preprocessing.
width: The size of each image after preprocessing.
is_training: Whether or not we're currently training or evaluating.
Returns:
images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed.
images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization.
labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes.
"""
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32,
common_queue_min=8)
image_raw, label = data_provider.get(['image', 'label'])
# Preprocess image for usage by Inception.
image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)
# Preprocess the image for display purposes.
image_raw = tf.expand_dims(image_raw, 0)
image_raw = tf.image.resize_images(image_raw, [height, width])
image_raw = tf.squeeze(image_raw)
# Batch it up.
images, images_raw, labels = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=1,
capacity=2 * batch_size)
return images, images_raw, labels
def train_save_model():
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
# Create the model:
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
number_of_steps=1, # For speed, we just do 1 epoch
save_summaries_secs=1)
print('Finished training. Final batch loss %d' % final_loss)
return
def evaluate_model():
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.DEBUG)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
})
print('Running evaluation Loop...')
checkpoint_path = tf.train.latest_checkpoint(train_dir)
metric_values = slim.evaluation.evaluate_once(
master='',
checkpoint_path=checkpoint_path,
logdir=train_dir,
eval_op=names_to_updates.values(),
final_op=names_to_values.values())
names_to_values = dict(zip(names_to_values.keys(), metric_values))
for name in names_to_values:
print('%s: %f' % (name, names_to_values[name]))
return
def main():
# download_convert()
# disp_data()
# apply_random_image()
# train_save_model()
evaluate_model()
return
main()
|
apache-2.0
| 5,897,464,082,682,168,000
| 36.73991
| 116
| 0.606702
| false
| 3.887298
| false
| false
| false
|
gnocchixyz/gnocchi
|
gnocchi/rest/api.py
|
1
|
87732
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import itertools
import logging
import operator
import uuid
import jsonpatch
import pecan
from pecan import rest
import pyparsing
import six
from six.moves.urllib import parse as urllib_parse
import tenacity
import tooz
import voluptuous
import werkzeug.http
import gnocchi
from gnocchi import archive_policy
from gnocchi import calendar
from gnocchi import chef
from gnocchi.cli import metricd
from gnocchi import incoming
from gnocchi import indexer
from gnocchi import json
from gnocchi import resource_type
from gnocchi.rest.aggregates import exceptions
from gnocchi.rest.aggregates import processor
from gnocchi.rest import exceptions as rest_exceptions
from gnocchi import storage
from gnocchi import utils
try:
from gnocchi.rest.prometheus import remote_pb2
import snappy
PROMETHEUS_SUPPORTED = True
except ImportError:
PROMETHEUS_SUPPORTED = False
ATTRGETTER_GRANULARITY = operator.attrgetter("granularity")
LOG = logging.getLogger(__name__)
def arg_to_list(value):
if isinstance(value, list):
return value
elif value:
return [value]
return []
def abort(status_code, detail=''):
"""Like pecan.abort, but make sure detail is a string."""
if status_code == 404 and not detail:
raise RuntimeError("http code 404 must have 'detail' set")
if isinstance(detail, voluptuous.Invalid):
detail = {
'cause': 'Invalid input',
'reason': six.text_type(detail),
'detail': [six.text_type(path) for path in detail.path],
}
elif isinstance(detail, Exception):
detail = detail.jsonify()
LOG.debug("Aborting request. Code [%s]. Details [%s]", status_code, detail)
return pecan.abort(status_code, detail)
def flatten_dict_to_keypairs(d, separator=':'):
"""Generator that produces sequence of keypairs for nested dictionaries.
:param d: dictionaries which may be nested
:param separator: symbol between names
"""
for name, value in sorted(six.iteritems(d)):
if isinstance(value, dict):
for subname, subvalue in flatten_dict_to_keypairs(value,
separator):
yield ('%s%s%s' % (name, separator, subname), subvalue)
else:
yield name, value
def enforce(rule, target):
"""Return the user and project the request should be limited to.
:param rule: The rule name
:param target: The target to enforce on.
"""
creds = pecan.request.auth_helper.get_auth_info(pecan.request)
if not isinstance(target, dict):
if hasattr(target, "jsonify"):
target = target.jsonify()
else:
target = target.__dict__
# Flatten dict
target = dict(flatten_dict_to_keypairs(d=target, separator='.'))
if not pecan.request.policy_enforcer.enforce(rule, target, creds):
abort(403)
def set_resp_location_hdr(location):
location = '%s%s' % (pecan.request.script_name, location)
# NOTE(sileht): according the pep-3333 the headers must be
# str in py2 and py3 even this is not the same thing in both
# version
# see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues
if six.PY2 and isinstance(location, six.text_type):
location = location.encode('utf-8')
location = urllib_parse.quote(location)
pecan.response.headers['Location'] = location
def set_resp_link_hdr(marker, *args):
# NOTE(sileht): This comes from rfc5988.
# Setting prev, last is too costly/complicated, so just set next for now.
options = {}
for arg in args:
options.update(arg)
if "sorts" in options:
options["sort"] = options["sorts"]
del options["sorts"]
options["marker"] = marker
# NOTE(sileht): To always have the same orders
options = sorted(options.items())
params = urllib_parse.urlencode(options, doseq=True)
pecan.response.headers.add("Link", '<%s?%s>; rel="next"' %
(pecan.request.path_url, params))
def deserialize(expected_content_types=None):
if expected_content_types is None:
expected_content_types = ("application/json", )
mime_type, options = werkzeug.http.parse_options_header(
pecan.request.headers.get('Content-Type'))
if mime_type not in expected_content_types:
abort(415)
try:
params = json.load(pecan.request.body_file)
except Exception as e:
details = rest_exceptions.UnableToDecodeBody(e,
pecan.request.body_file)
LOG.warning(details.jsonify())
abort(400, details)
return params
def validate(schema, data, required=True):
try:
return voluptuous.Schema(schema, required=required)(data)
except voluptuous.Invalid as e:
abort(400, e)
def deserialize_and_validate(schema, required=True,
expected_content_types=None):
return validate(schema,
deserialize(expected_content_types=expected_content_types),
required)
def Timespan(value):
try:
return utils.to_timespan(value)
except ValueError as e:
raise voluptuous.Invalid(e)
def get_bool_param(name, params, default='false'):
return strtobool(name, params.get(name, default))
def strtobool(varname, v):
"""Convert a string to a boolean."""
try:
return utils.strtobool(v)
except ValueError as e:
abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e)))
RESOURCE_DEFAULT_PAGINATION = [u'revision_start:asc',
u'started_at:asc']
METRIC_DEFAULT_PAGINATION = [u'id:asc']
def get_pagination_options(params, default):
try:
opts = voluptuous.Schema({
voluptuous.Required(
"limit", default=pecan.request.conf.api.max_limit):
voluptuous.All(voluptuous.Coerce(int),
voluptuous.Range(min=1),
voluptuous.Clamp(
min=1, max=pecan.request.conf.api.max_limit)),
"marker": six.text_type,
voluptuous.Required("sort", default=default):
voluptuous.All(
voluptuous.Coerce(arg_to_list),
[six.text_type]),
}, extra=voluptuous.REMOVE_EXTRA)(params)
except voluptuous.Invalid as e:
abort(400, {"cause": "Argument value error",
"reason": str(e)})
opts['sorts'] = opts['sort']
del opts['sort']
return opts
ArchivePolicyDefinitionSchema = voluptuous.Schema(
voluptuous.All([{
"granularity": Timespan,
"points": voluptuous.All(
voluptuous.Coerce(int),
voluptuous.Range(min=1),
),
"timespan": Timespan,
}], voluptuous.Length(min=1)),
)
class ArchivePolicyController(rest.RestController):
def __init__(self, archive_policy):
self.archive_policy = archive_policy
@pecan.expose('json')
def get(self):
ap = pecan.request.indexer.get_archive_policy(self.archive_policy)
if ap:
enforce("get archive policy", ap)
return ap
abort(404, six.text_type(
indexer.NoSuchArchivePolicy(self.archive_policy)))
@pecan.expose('json')
def patch(self):
ap = pecan.request.indexer.get_archive_policy(self.archive_policy)
if not ap:
abort(404, six.text_type(
indexer.NoSuchArchivePolicy(self.archive_policy)))
enforce("update archive policy", ap)
body = deserialize_and_validate(voluptuous.Schema({
voluptuous.Required("definition"): ArchivePolicyDefinitionSchema,
}))
# Validate the data
try:
ap_items = [archive_policy.ArchivePolicyItem(**item) for item in
body['definition']]
except ValueError as e:
abort(400, six.text_type(e))
try:
return pecan.request.indexer.update_archive_policy(
self.archive_policy, ap_items)
except indexer.UnsupportedArchivePolicyChange as e:
abort(400, six.text_type(e))
@pecan.expose('json')
def delete(self):
# NOTE(jd) I don't think there's any point in fetching and passing the
# archive policy here, as the rule is probably checking the actual role
# of the user, not the content of the AP.
enforce("delete archive policy", {})
try:
pecan.request.indexer.delete_archive_policy(self.archive_policy)
except indexer.NoSuchArchivePolicy as e:
abort(404, six.text_type(e))
except indexer.ArchivePolicyInUse as e:
abort(400, six.text_type(e))
class ArchivePoliciesController(rest.RestController):
@pecan.expose()
def _lookup(self, archive_policy, *remainder):
return ArchivePolicyController(archive_policy), remainder
@pecan.expose('json')
def post(self):
enforce("create archive policy", {})
# NOTE(jd): Initialize this one at run-time because we rely on conf
conf = pecan.request.conf
valid_agg_methods = list(
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES
)
ArchivePolicySchema = voluptuous.Schema({
voluptuous.Required("name"): six.text_type,
voluptuous.Required("back_window", default=0): voluptuous.All(
voluptuous.Coerce(int),
voluptuous.Range(min=0),
),
voluptuous.Required(
"aggregation_methods",
default=list(conf.archive_policy.default_aggregation_methods)):
valid_agg_methods,
voluptuous.Required("definition"): ArchivePolicyDefinitionSchema,
})
body = deserialize_and_validate(ArchivePolicySchema)
# Validate the data
try:
ap = archive_policy.ArchivePolicy.from_dict(body)
except ValueError as e:
abort(400, six.text_type(e))
enforce("create archive policy", ap)
try:
ap = pecan.request.indexer.create_archive_policy(ap)
except indexer.ArchivePolicyAlreadyExists as e:
abort(409, six.text_type(e))
location = "/archive_policy/" + ap.name
set_resp_location_hdr(location)
pecan.response.status = 201
return ap
@pecan.expose('json')
def get_all(self):
enforce("list archive policy", {})
return pecan.request.indexer.list_archive_policies()
class ArchivePolicyRulesController(rest.RestController):
@pecan.expose()
def _lookup(self, archive_policy_rule, *remainder):
apr = pecan.request.indexer.get_archive_policy_rule(
archive_policy_rule
)
if apr:
return ArchivePolicyRuleController(apr), remainder
abort(404, six.text_type(
indexer.NoSuchArchivePolicyRule(archive_policy_rule)))
@pecan.expose('json')
def post(self):
enforce("create archive policy rule", {})
ArchivePolicyRuleSchema = voluptuous.Schema({
voluptuous.Required("name"): six.text_type,
voluptuous.Required("metric_pattern"): six.text_type,
voluptuous.Required("archive_policy_name"): six.text_type,
})
body = deserialize_and_validate(ArchivePolicyRuleSchema)
enforce("create archive policy rule", body)
try:
ap = pecan.request.indexer.create_archive_policy_rule(
body['name'], body['metric_pattern'],
body['archive_policy_name']
)
except indexer.ArchivePolicyRuleAlreadyExists as e:
abort(409, six.text_type(e))
except indexer.NoSuchArchivePolicy as e:
abort(400, e)
location = "/archive_policy_rule/" + ap.name
set_resp_location_hdr(location)
pecan.response.status = 201
return ap
@pecan.expose('json')
def get_all(self):
enforce("list archive policy rule", {})
return pecan.request.indexer.list_archive_policy_rules()
class ArchivePolicyRuleController(rest.RestController):
def __init__(self, archive_policy_rule):
self.archive_policy_rule = archive_policy_rule
@pecan.expose('json')
def get(self):
enforce("get archive policy rule", self.archive_policy_rule)
return self.archive_policy_rule
@pecan.expose('json')
def patch(self):
ArchivePolicyRuleSchema = voluptuous.Schema({
voluptuous.Required("name"): six.text_type,
})
body = deserialize_and_validate(ArchivePolicyRuleSchema)
enforce("update archive policy rule", {})
try:
return pecan.request.indexer.update_archive_policy_rule(
self.archive_policy_rule.name, body["name"])
except indexer.UnsupportedArchivePolicyRuleChange as e:
abort(400, six.text_type(e))
@pecan.expose('json')
def delete(self):
# NOTE(jd) I don't think there's any point in fetching and passing the
# archive policy rule here, as the rule is probably checking the actual
# role of the user, not the content of the AP rule.
enforce("delete archive policy rule", {})
try:
pecan.request.indexer.delete_archive_policy_rule(
self.archive_policy_rule.name
)
except indexer.NoSuchArchivePolicyRule as e:
abort(404, six.text_type(e))
def MeasuresListSchema(measures):
try:
times = utils.to_timestamps([m['timestamp'] for m in measures])
except TypeError:
raise voluptuous.Invalid("unexpected measures format")
except ValueError as e:
raise voluptuous.Invalid("unexpected timestamp '%s'" % e)
try:
values = [float(i['value']) for i in measures]
except Exception:
raise voluptuous.Invalid("unexpected measures value")
return (incoming.Measure(t, v) for t, v in six.moves.zip(times, values))
class MetricController(rest.RestController):
_custom_actions = {
'measures': ['POST', 'GET']
}
def __init__(self, metric):
self.metric = metric
def enforce_metric(self, rule):
enforce(rule, json.to_primitive(self.metric))
@pecan.expose('json')
def get_all(self):
self.enforce_metric("get metric")
return self.metric
@pecan.expose('json')
def post_measures(self):
self.enforce_metric("post measures")
measures = deserialize_and_validate(MeasuresListSchema)
if measures:
pecan.request.incoming.add_measures(self.metric.id, measures)
pecan.response.status = 202
@pecan.expose('json')
def get_measures(self, start=None, stop=None, aggregation='mean',
granularity=None, resample=None, refresh=False,
**param):
self.enforce_metric("get measures")
if resample:
if not granularity:
abort(400, 'A granularity must be specified to resample')
try:
resample = (resample if calendar.GROUPINGS.get(resample) else
utils.to_timespan(resample))
except ValueError as e:
abort(400, six.text_type(e))
if granularity is None:
granularity = [d.granularity
for d in self.metric.archive_policy.definition]
start, stop, _, _, _ = validate_qs(
start=start, stop=stop)
else:
start, stop, granularity, _, _ = validate_qs(
start=start, stop=stop, granularity=granularity)
if aggregation not in self.metric.archive_policy.aggregation_methods:
abort(404, {
"cause": "Aggregation method does not exist for this metric",
"detail": {
"metric": self.metric.id,
"aggregation_method": aggregation,
},
})
aggregations = []
for g in sorted(granularity, reverse=True):
agg = self.metric.archive_policy.get_aggregation(
aggregation, g)
if agg is None:
abort(404, six.text_type(
storage.AggregationDoesNotExist(
self.metric, aggregation, g)
))
aggregations.append(agg)
if (strtobool("refresh", refresh) and
pecan.request.incoming.has_unprocessed(self.metric.id)):
try:
pecan.request.chef.refresh_metrics(
[self.metric],
pecan.request.conf.api.operation_timeout)
except chef.SackAlreadyLocked:
abort(503, 'Unable to refresh metric: %s. Metric is locked. '
'Please try again.' % self.metric.id)
try:
results = pecan.request.storage.get_aggregated_measures(
{self.metric: aggregations},
start, stop, resample)[self.metric]
return [(timestamp, results[key].aggregation.granularity, value)
for key in sorted(results.keys(),
reverse=True)
for timestamp, value in results[key]]
except storage.AggregationDoesNotExist as e:
abort(404, six.text_type(e))
except storage.MetricDoesNotExist:
return []
@pecan.expose('json')
def delete(self):
self.enforce_metric("delete metric")
try:
pecan.request.indexer.delete_metric(self.metric.id)
except indexer.NoSuchMetric as e:
abort(404, six.text_type(e))
class MetricsController(rest.RestController):
@pecan.expose()
def _lookup(self, id, *remainder):
try:
metric_id = uuid.UUID(id)
except ValueError:
abort(404, six.text_type(indexer.NoSuchMetric(id)))
# Load details for ACL
metrics = pecan.request.indexer.list_metrics(
attribute_filter={"=": {"id": metric_id}}, details=True)
if not metrics:
abort(404, six.text_type(indexer.NoSuchMetric(id)))
return MetricController(metrics[0]), remainder
# NOTE(jd) Define this method as it was a voluptuous schema – it's just a
# smarter version of a voluptuous schema, no?
@staticmethod
def MetricSchema(definition):
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
# First basic validation
schema = voluptuous.Schema({
"archive_policy_name": six.text_type,
"resource_id": functools.partial(ResourceID, creator=creator),
"name": six.text_type,
voluptuous.Optional("unit"):
voluptuous.All(six.text_type, voluptuous.Length(max=31)),
})
definition = schema(definition)
archive_policy_name = definition.get('archive_policy_name')
name = definition.get('name')
if name and '/' in name:
abort(400, "'/' is not supported in metric name")
if archive_policy_name is None:
try:
ap = pecan.request.indexer.get_archive_policy_for_metric(name)
except indexer.NoArchivePolicyRuleMatch:
# NOTE(jd) Since this is a schema-like function, we
# should/could raise ValueError, but if we do so, voluptuous
# just returns a "invalid value" with no useful message – so we
# prefer to use abort() to make sure the user has the right
# error message
abort(400, "No archive policy name specified "
"and no archive policy rule found matching "
"the metric name %s" % name)
else:
definition['archive_policy_name'] = ap.name
resource_id = definition.get('resource_id')
if resource_id is None:
original_resource_id = None
else:
if name is None:
abort(400,
{"cause": "Attribute value error",
"detail": "name",
"reason": "Name cannot be null "
"if resource_id is not null"})
original_resource_id, resource_id = resource_id
enforce("create metric", {
"creator": creator,
"archive_policy_name": archive_policy_name,
"resource_id": resource_id,
"original_resource_id": original_resource_id,
"name": name,
"unit": definition.get('unit'),
})
return definition
@pecan.expose('json')
def post(self):
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
body = deserialize_and_validate(self.MetricSchema)
resource_id = body.get('resource_id')
if resource_id is not None:
resource_id = resource_id[1]
try:
m = pecan.request.indexer.create_metric(
uuid.uuid4(),
creator,
resource_id=resource_id,
name=body.get('name'),
unit=body.get('unit'),
archive_policy_name=body['archive_policy_name'])
except indexer.NoSuchArchivePolicy as e:
abort(400, six.text_type(e))
except indexer.NamedMetricAlreadyExists as e:
abort(400, e)
set_resp_location_hdr("/metric/" + str(m.id))
pecan.response.status = 201
return m
MetricListSchema = voluptuous.Schema({
"user_id": six.text_type,
"project_id": six.text_type,
"creator": six.text_type,
"name": six.text_type,
"id": six.text_type,
"unit": six.text_type,
"archive_policy_name": six.text_type,
"status": voluptuous.Any("active", "delete"),
}, extra=voluptuous.REMOVE_EXTRA)
@classmethod
@pecan.expose('json')
def get_all(cls, **kwargs):
filtering = cls.MetricListSchema(kwargs)
# Compat with old user/project API
provided_user_id = filtering.pop('user_id', None)
provided_project_id = filtering.pop('project_id', None)
if provided_user_id is None and provided_project_id is None:
provided_creator = filtering.pop('creator', None)
else:
provided_creator = (
(provided_user_id or "")
+ ":"
+ (provided_project_id or "")
)
pagination_opts = get_pagination_options(kwargs,
METRIC_DEFAULT_PAGINATION)
attr_filters = []
if provided_creator is not None:
attr_filters.append({"=": {"creator": provided_creator}})
for k, v in six.iteritems(filtering):
attr_filters.append({"=": {k: v}})
policy_filter = pecan.request.auth_helper.get_metric_policy_filter(
pecan.request, "list metric")
resource_policy_filter = (
pecan.request.auth_helper.get_resource_policy_filter(
pecan.request, "list metric", resource_type=None,
prefix="resource")
)
try:
metrics = pecan.request.indexer.list_metrics(
attribute_filter={"and": attr_filters},
policy_filter=policy_filter,
resource_policy_filter=resource_policy_filter,
**pagination_opts)
if metrics and len(metrics) >= pagination_opts['limit']:
set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts)
return metrics
except indexer.InvalidPagination as e:
abort(400, six.text_type(e))
_MetricsSchema = voluptuous.Schema({
six.text_type: voluptuous.Any(utils.UUID,
MetricsController.MetricSchema),
})
def MetricsSchema(data):
# NOTE(jd) Before doing any kind of validation, copy the metric name
# into the metric definition. This is required so we have the name
# available when doing the metric validation with its own MetricSchema,
# and so we can do things such as applying archive policy rules.
if isinstance(data, dict):
for metric_name, metric_def in six.iteritems(data):
if isinstance(metric_def, dict):
metric_def['name'] = metric_name
return _MetricsSchema(data)
class NamedMetricController(rest.RestController):
def __init__(self, resource_id, resource_type):
self.resource_id = resource_id
self.resource_type = resource_type
@pecan.expose()
def _lookup(self, name, *remainder):
m = pecan.request.indexer.list_metrics(
details=True,
attribute_filter={"and": [
{"=": {"name": name}},
{"=": {"resource_id": self.resource_id}},
]})
if m:
return MetricController(m[0]), remainder
resource = pecan.request.indexer.get_resource(self.resource_type,
self.resource_id)
if resource:
abort(404, six.text_type(indexer.NoSuchMetric(name)))
else:
abort(404, six.text_type(indexer.NoSuchResource(self.resource_id)))
@pecan.expose('json')
def post(self):
resource = pecan.request.indexer.get_resource(
self.resource_type, self.resource_id)
if not resource:
abort(404, six.text_type(indexer.NoSuchResource(self.resource_id)))
enforce("update resource", resource)
metrics = deserialize_and_validate(MetricsSchema)
try:
r = pecan.request.indexer.update_resource(
self.resource_type,
self.resource_id,
metrics=metrics,
append_metrics=True,
create_revision=False)
except (indexer.NoSuchMetric,
indexer.NoSuchArchivePolicy,
ValueError) as e:
abort(400, six.text_type(e))
except indexer.NamedMetricAlreadyExists as e:
abort(409, six.text_type(e))
except indexer.NoSuchResource as e:
abort(404, six.text_type(e))
return r.metrics
@pecan.expose('json')
def get_all(self):
resource = pecan.request.indexer.get_resource(
self.resource_type, self.resource_id)
if not resource:
abort(404, six.text_type(indexer.NoSuchResource(self.resource_id)))
enforce("get resource", resource)
return pecan.request.indexer.list_metrics(
attribute_filter={"=": {"resource_id": self.resource_id}})
class ResourceHistoryController(rest.RestController):
def __init__(self, resource_id, resource_type):
self.resource_id = resource_id
self.resource_type = resource_type
@pecan.expose('json')
def get(self, **kwargs):
details = get_bool_param('details', kwargs)
pagination_opts = get_pagination_options(
kwargs, RESOURCE_DEFAULT_PAGINATION)
resource = pecan.request.indexer.get_resource(
self.resource_type, self.resource_id)
if not resource:
abort(404, six.text_type(indexer.NoSuchResource(self.resource_id)))
enforce("get resource", resource)
try:
resources = pecan.request.indexer.list_resources(
self.resource_type,
attribute_filter={"=": {"id": self.resource_id}},
details=details,
history=True,
**pagination_opts
)
if resources and len(resources) >= pagination_opts['limit']:
marker = "%s@%s" % (resources[-1].id, resources[-1].revision)
set_resp_link_hdr(marker, kwargs, pagination_opts)
return resources
except indexer.IndexerException as e:
abort(400, six.text_type(e))
def etag_precondition_check(obj):
etag, lastmodified = obj.etag, obj.lastmodified
# NOTE(sileht): Checks and order come from rfc7232
# in webob, the '*' and the absent of the header is handled by
# if_match.__contains__() and if_none_match.__contains__()
# and are identique...
if etag not in pecan.request.if_match:
abort(412)
elif (not pecan.request.environ.get("HTTP_IF_MATCH")
and pecan.request.if_unmodified_since
and pecan.request.if_unmodified_since < lastmodified):
abort(412)
if etag in pecan.request.if_none_match:
if pecan.request.method in ['GET', 'HEAD']:
abort(304)
else:
abort(412)
elif (not pecan.request.environ.get("HTTP_IF_NONE_MATCH")
and pecan.request.if_modified_since
and (pecan.request.if_modified_since >=
lastmodified)
and pecan.request.method in ['GET', 'HEAD']):
abort(304)
def etag_set_headers(obj):
pecan.response.etag = obj.etag
pecan.response.last_modified = obj.lastmodified
def AttributesPath(value):
if value.startswith("/attributes"):
return value
raise ValueError("Only attributes can be modified")
ResourceTypeJsonPatchSchema = voluptuous.Schema([{
"op": voluptuous.Any("add", "remove"),
"path": AttributesPath,
voluptuous.Optional("value"): dict,
}])
class ResourceTypeController(rest.RestController):
def __init__(self, name):
self._name = name
@pecan.expose('json')
def get(self):
try:
rt = pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
enforce("get resource type", rt)
return rt
@pecan.expose('json')
def patch(self):
# NOTE(sileht): should we check for "application/json-patch+json"
# Content-Type ?
try:
rt = pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
enforce("update resource type", rt)
# Ensure this is a valid jsonpatch dict
patch = deserialize_and_validate(
ResourceTypeJsonPatchSchema,
expected_content_types=["application/json-patch+json"])
# Add new attributes to the resource type
rt_json_current = rt.jsonify()
try:
rt_json_next = jsonpatch.apply_patch(rt_json_current, patch)
except jsonpatch.JsonPatchException as e:
abort(400, six.text_type(e))
del rt_json_next['state']
# Validate that the whole new resource_type is valid
schema = pecan.request.indexer.get_resource_type_schema()
try:
rt_json_next = voluptuous.Schema(schema.for_update, required=True)(
rt_json_next)
except voluptuous.Error as e:
abort(400, "Invalid input: %s" % e)
# Get only newly formatted and deleted attributes
add_attrs = {k: v for k, v in rt_json_next["attributes"].items()
if k not in rt_json_current["attributes"]}
del_attrs = [k for k in rt_json_current["attributes"]
if k not in rt_json_next["attributes"]]
update_attrs = self.retrieve_update_attrs(rt_json_current,
rt_json_next)
if update_attrs:
LOG.debug("Updating attributes [%s] for resource-type [%s]",
update_attrs, self._name)
if not add_attrs and not del_attrs and not update_attrs:
# NOTE(sileht): just returns the resource, the asked changes
# just do nothing
return rt
try:
add_attrs = schema.attributes_from_dict(add_attrs)
update_attrs = self.create_update_attrs(schema, update_attrs)
except resource_type.InvalidResourceAttribute as e:
abort(400, "Invalid input: %s" % e)
try:
return pecan.request.indexer.update_resource_type(
self._name, add_attributes=add_attrs,
del_attributes=del_attrs, update_attributes=update_attrs)
except indexer.NoSuchResourceType as e:
abort(400, six.text_type(e))
def create_update_attrs(self, schema, update_attrs):
new_attrs = dict(map(lambda entry: (entry[0], entry[1][1]),
update_attrs.items()))
old_attrs = dict(map(lambda entry: (entry[0], entry[1][0]),
update_attrs.items()))
update_attrs_new = schema.attributes_from_dict(new_attrs)
update_attrs_new.sort(key=lambda attr: attr.name)
update_attrs_old = schema.attributes_from_dict(old_attrs)
update_attrs_old.sort(key=lambda attr: attr.name)
update_attrs = []
for i in range(len(update_attrs_new)):
update_attrs.append((update_attrs_new[i],
update_attrs_old[i]))
return update_attrs
def retrieve_update_attrs(self, rt_json_current, rt_json_next):
update_attrs = {}
for k, v in rt_json_current["attributes"].items():
if k in rt_json_next["attributes"]:
self.validate_types(k, rt_json_next, v)
should_be_updated = False
for kc, vc in v.items():
if vc != rt_json_next["attributes"][k][kc]:
should_be_updated = True
break
if should_be_updated:
update_attrs[k] = (v, rt_json_next["attributes"][k])
return update_attrs
def validate_types(self, attribute, new_json, old_json):
old_type = old_json['type']
new_type = new_json["attributes"][attribute]['type']
if new_type != old_type:
msg = "Type update is not available yet. Changing %s to %s " \
"for attribute %s of resource %s" % (old_type, new_type,
attribute, self._name)
abort(400, msg)
@pecan.expose('json')
def delete(self):
try:
pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
enforce("delete resource type", resource_type)
try:
pecan.request.indexer.delete_resource_type(self._name)
except (indexer.NoSuchResourceType,
indexer.ResourceTypeInUse) as e:
abort(400, six.text_type(e))
class ResourceTypesController(rest.RestController):
@pecan.expose()
def _lookup(self, name, *remainder):
return ResourceTypeController(name), remainder
@pecan.expose('json')
def post(self):
schema = pecan.request.indexer.get_resource_type_schema()
body = deserialize_and_validate(schema)
body["state"] = "creating"
try:
rt = schema.resource_type_from_dict(**body)
except resource_type.InvalidResourceAttribute as e:
abort(400, "Invalid input: %s" % e)
enforce("create resource type", body)
try:
rt = pecan.request.indexer.create_resource_type(rt)
except indexer.ResourceTypeAlreadyExists as e:
abort(409, six.text_type(e))
set_resp_location_hdr("/resource_type/" + rt.name)
pecan.response.status = 201
return rt
@pecan.expose('json')
def get_all(self, **kwargs):
enforce("list resource type", {})
try:
return pecan.request.indexer.list_resource_types()
except indexer.IndexerException as e:
abort(400, six.text_type(e))
def ResourceSchema(schema):
base_schema = {
voluptuous.Optional('started_at'): utils.to_datetime,
voluptuous.Optional('ended_at'): utils.to_datetime,
voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type),
voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type),
voluptuous.Optional('metrics'): MetricsSchema,
}
base_schema.update(schema)
return base_schema
class ResourceController(rest.RestController):
def __init__(self, resource_type, id):
self._resource_type = resource_type
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
try:
self.id = utils.ResourceUUID(id, creator)
except ValueError:
abort(404, six.text_type(indexer.NoSuchResource(id)))
self.metric = NamedMetricController(str(self.id), self._resource_type)
self.history = ResourceHistoryController(str(self.id),
self._resource_type)
@pecan.expose('json')
def get(self):
resource = pecan.request.indexer.get_resource(
self._resource_type, self.id, with_metrics=True)
if resource:
enforce("get resource", resource)
etag_precondition_check(resource)
etag_set_headers(resource)
return resource
abort(404, six.text_type(indexer.NoSuchResource(self.id)))
@pecan.expose('json')
def patch(self):
resource = pecan.request.indexer.get_resource(
self._resource_type, self.id, with_metrics=True)
if not resource:
abort(404, six.text_type(indexer.NoSuchResource(self.id)))
enforce("update resource", resource)
etag_precondition_check(resource)
body = deserialize_and_validate(
schema_for(self._resource_type),
required=False)
if len(body) == 0:
etag_set_headers(resource)
return resource
for k, v in six.iteritems(body):
if k != 'metrics' and getattr(resource, k) != v:
create_revision = True
break
else:
if 'metrics' not in body:
# No need to go further, we assume the db resource
# doesn't change between the get and update
return resource
create_revision = False
try:
resource = pecan.request.indexer.update_resource(
self._resource_type,
self.id,
create_revision=create_revision,
**body)
except (indexer.NoSuchMetric,
indexer.NoSuchArchivePolicy,
ValueError) as e:
abort(400, six.text_type(e))
except indexer.NoSuchResource as e:
abort(404, six.text_type(e))
etag_set_headers(resource)
return resource
@pecan.expose('json')
def delete(self):
resource = pecan.request.indexer.get_resource(
self._resource_type, self.id)
if not resource:
abort(404, six.text_type(indexer.NoSuchResource(self.id)))
enforce("delete resource", resource)
etag_precondition_check(resource)
try:
pecan.request.indexer.delete_resource(self.id)
except indexer.NoSuchResource as e:
abort(404, six.text_type(e))
def schema_for(resource_type):
resource_type = pecan.request.indexer.get_resource_type(resource_type)
return ResourceSchema(resource_type.schema)
def ResourceUUID(value, creator):
try:
return utils.ResourceUUID(value, creator)
except ValueError as e:
raise voluptuous.Invalid(e)
def ResourceID(value, creator):
"""Convert value to a resource ID.
:return: A tuple (original_resource_id, resource_id)
"""
return (six.text_type(value), ResourceUUID(value, creator))
class ResourcesController(rest.RestController):
def __init__(self, resource_type):
self._resource_type = resource_type
@pecan.expose()
def _lookup(self, id, *remainder):
return ResourceController(self._resource_type, id), remainder
@pecan.expose('json')
def post(self):
# NOTE(sileht): we need to copy the dict because when change it
# and we don't want that next patch call have the "id"
schema = dict(schema_for(self._resource_type))
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
schema["id"] = functools.partial(ResourceID, creator=creator)
body = deserialize_and_validate(schema)
body["original_resource_id"], body["id"] = body["id"]
target = {
"resource_type": self._resource_type,
}
target.update(body)
enforce("create resource", target)
rid = body['id']
del body['id']
try:
resource = pecan.request.indexer.create_resource(
self._resource_type, rid, creator,
**body)
except (ValueError,
indexer.NoSuchMetric,
indexer.NoSuchArchivePolicy) as e:
abort(400, six.text_type(e))
except indexer.ResourceAlreadyExists as e:
abort(409, six.text_type(e))
set_resp_location_hdr("/resource/"
+ self._resource_type + "/"
+ six.text_type(resource.id))
etag_set_headers(resource)
pecan.response.status = 201
return resource
@pecan.expose('json')
def get_all(self, **kwargs):
details = get_bool_param('details', kwargs)
history = get_bool_param('history', kwargs)
pagination_opts = get_pagination_options(
kwargs, RESOURCE_DEFAULT_PAGINATION)
json_attrs = arg_to_list(kwargs.get('attrs', None))
policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
pecan.request, "list resource", self._resource_type)
try:
# FIXME(sileht): next API version should returns
# {'resources': [...], 'links': [ ... pagination rel ...]}
resources = pecan.request.indexer.list_resources(
self._resource_type,
attribute_filter=policy_filter,
details=details,
history=history,
**pagination_opts
)
if resources and len(resources) >= pagination_opts['limit']:
if history:
marker = "%s@%s" % (resources[-1].id,
resources[-1].revision)
else:
marker = str(resources[-1].id)
set_resp_link_hdr(marker, kwargs, pagination_opts)
return [r.jsonify(json_attrs) for r in resources]
except indexer.IndexerException as e:
abort(400, six.text_type(e))
@pecan.expose('json')
def delete(self, **kwargs):
# NOTE(sileht): Don't allow empty filter, this is going to delete
# the entire database.
if pecan.request.body:
attr_filter = deserialize_and_validate(ResourceSearchSchema)
elif kwargs.get("filter"):
attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"])
else:
attr_filter = None
# the voluptuous checks everything, but it is better to
# have this here.
if not attr_filter:
abort(400, "caution: the query can not be empty, or it will \
delete entire database")
policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
pecan.request,
"delete resources", self._resource_type)
if policy_filter:
attr_filter = {"and": [policy_filter, attr_filter]}
try:
delete_num = pecan.request.indexer.delete_resources(
self._resource_type, attribute_filter=attr_filter)
except indexer.IndexerException as e:
abort(400, six.text_type(e))
return {"deleted": delete_num}
class ResourcesByTypeController(rest.RestController):
@pecan.expose('json')
def get_all(self):
return dict(
(rt.name,
pecan.request.application_url + '/resource/' + rt.name)
for rt in pecan.request.indexer.list_resource_types())
@pecan.expose()
def _lookup(self, resource_type, *remainder):
try:
pecan.request.indexer.get_resource_type(resource_type)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
return ResourcesController(resource_type), remainder
class QueryStringSearchAttrFilter(object):
uninary_operators = ("not", )
binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq",
u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠",
u"≥", u"≤")
multiple_operators = (u"and", u"or", u"∧", u"∨")
operator = pyparsing.Regex(u"|".join(binary_operator))
null = pyparsing.Regex("None|none|null").setParseAction(
pyparsing.replaceWith(None))
boolean = "False|True|false|true"
boolean = pyparsing.Regex(boolean).setParseAction(
lambda t: t[0].lower() == "true")
hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n)
uuid_string = pyparsing.Combine(
hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 +
pyparsing.Optional("-") + hex_string(12))
number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?"
number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0]))
identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_")
quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'")
comparison_term = pyparsing.Forward()
in_list = pyparsing.Group(
pyparsing.Suppress('[') +
pyparsing.Optional(pyparsing.delimitedList(comparison_term)) +
pyparsing.Suppress(']'))("list")
comparison_term << (null | boolean | uuid_string | identifier | number |
quoted_string | in_list)
condition = pyparsing.Group(comparison_term + operator + comparison_term)
expr = pyparsing.infixNotation(condition, [
("not", 1, pyparsing.opAssoc.RIGHT, ),
("and", 2, pyparsing.opAssoc.LEFT, ),
("∧", 2, pyparsing.opAssoc.LEFT, ),
("or", 2, pyparsing.opAssoc.LEFT, ),
("∨", 2, pyparsing.opAssoc.LEFT, ),
])
@classmethod
def _parsed_query2dict(cls, parsed_query):
result = None
while parsed_query:
part = parsed_query.pop()
if part in cls.binary_operator:
result = {part: {parsed_query.pop(): result}}
elif part in cls.multiple_operators:
if result.get(part):
result[part].append(
cls._parsed_query2dict(parsed_query.pop()))
else:
result = {part: [result]}
elif part in cls.uninary_operators:
result = {part: result}
elif isinstance(part, pyparsing.ParseResults):
kind = part.getName()
if kind == "list":
res = part.asList()
else:
res = cls._parsed_query2dict(part)
if result is None:
result = res
elif isinstance(result, dict):
list(result.values())[0].append(res)
else:
result = part
return result
@classmethod
def _parse(cls, query):
try:
parsed_query = cls.expr.parseString(query, parseAll=True)[0]
except pyparsing.ParseException as e:
raise abort(400, "Invalid filter: %s" % str(e))
return cls._parsed_query2dict(parsed_query)
@classmethod
def parse(cls, query):
attr_filter = cls._parse(query)
return validate(ResourceSearchSchema, attr_filter, required=True)
def ResourceSearchSchema(v):
return _ResourceSearchSchema()(v)
# NOTE(sileht): indexer will cast this type to the real attribute
# type, here we just want to be sure this is not a dict or a list
ResourceSearchSchemaAttributeValue = voluptuous.Any(
six.text_type, float, int, bool, None)
NotIDKey = voluptuous.All(six.text_type, voluptuous.NotIn(["id"]))
def _ResourceSearchSchema():
user = pecan.request.auth_helper.get_current_user(
pecan.request)
_ResourceUUID = functools.partial(ResourceUUID, creator=user)
return voluptuous.Schema(
voluptuous.All(
voluptuous.Length(min=0, max=1),
{
voluptuous.Any(
u"=", u"==", u"eq",
u"<", u"lt",
u">", u"gt",
u"<=", u"≤", u"le",
u">=", u"≥", u"ge",
u"!=", u"≠", u"ne",
): voluptuous.All(
voluptuous.Length(min=1, max=1),
{"id": _ResourceUUID,
NotIDKey: ResourceSearchSchemaAttributeValue},
),
u"like": voluptuous.All(
voluptuous.Length(min=1, max=1),
{NotIDKey: ResourceSearchSchemaAttributeValue},
),
u"in": voluptuous.All(
voluptuous.Length(min=1, max=1),
{"id": voluptuous.All(
[_ResourceUUID],
voluptuous.Length(min=1)),
NotIDKey: voluptuous.All(
[ResourceSearchSchemaAttributeValue],
voluptuous.Length(min=1))}
),
voluptuous.Any(
u"and", u"∨",
u"or", u"∧",
): voluptuous.All(
[ResourceSearchSchema], voluptuous.Length(min=1)
),
u"not": ResourceSearchSchema,
}
)
)
class SearchResourceTypeController(rest.RestController):
def __init__(self, resource_type):
self._resource_type = resource_type
def _search(self, **kwargs):
if pecan.request.body:
attr_filter = deserialize_and_validate(ResourceSearchSchema)
elif kwargs.get("filter"):
attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"])
else:
attr_filter = None
details = get_bool_param('details', kwargs)
history = get_bool_param('history', kwargs)
pagination_opts = get_pagination_options(
kwargs, RESOURCE_DEFAULT_PAGINATION)
policy_filter = pecan.request.auth_helper.get_resource_policy_filter(
pecan.request, "search resource", self._resource_type)
if policy_filter:
if attr_filter:
attr_filter = {"and": [
policy_filter,
attr_filter
]}
else:
attr_filter = policy_filter
resources = pecan.request.indexer.list_resources(
self._resource_type,
attribute_filter=attr_filter,
details=details,
history=history,
**pagination_opts)
if resources and len(resources) >= pagination_opts['limit']:
if history:
marker = "%s@%s" % (resources[-1].id,
resources[-1].revision)
else:
marker = str(resources[-1].id)
set_resp_link_hdr(marker, kwargs, pagination_opts)
return resources
@pecan.expose('json')
def post(self, **kwargs):
json_attrs = arg_to_list(kwargs.get('attrs', None))
try:
return [r.jsonify(json_attrs) for r in self._search(**kwargs)]
except indexer.IndexerException as e:
abort(400, six.text_type(e))
class SearchResourceController(rest.RestController):
@pecan.expose()
def _lookup(self, resource_type, *remainder):
try:
pecan.request.indexer.get_resource_type(resource_type)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
return SearchResourceTypeController(resource_type), remainder
def _MetricSearchSchema(v):
"""Helper method to indirect the recursivity of the search schema"""
return SearchMetricController.MetricSearchSchema(v)
def _MetricSearchOperationSchema(v):
"""Helper method to indirect the recursivity of the search schema"""
return SearchMetricController.MetricSearchOperationSchema(v)
class SearchMetricController(rest.RestController):
MetricSearchOperationSchema = voluptuous.Schema(
voluptuous.All(
voluptuous.Length(min=1, max=1),
{
voluptuous.Any(
u"=", u"==", u"eq",
u"<", u"lt",
u">", u"gt",
u"<=", u"≤", u"le",
u">=", u"≥", u"ge",
u"!=", u"≠", u"ne",
u"%", u"mod",
u"+", u"add",
u"-", u"sub",
u"*", u"×", u"mul",
u"/", u"÷", u"div",
u"**", u"^", u"pow",
): voluptuous.Any(
float, int,
voluptuous.All(
[float, int,
voluptuous.Any(_MetricSearchOperationSchema)],
voluptuous.Length(min=2, max=2),
),
),
},
)
)
MetricSearchSchema = voluptuous.Schema(
voluptuous.Any(
MetricSearchOperationSchema,
voluptuous.All(
voluptuous.Length(min=1, max=1),
{
voluptuous.Any(
u"and", u"∨",
u"or", u"∧",
u"not",
): [_MetricSearchSchema],
}
)
)
)
class MeasureQuery(object):
binary_operators = {
u"=": operator.eq,
u"==": operator.eq,
u"eq": operator.eq,
u"<": operator.lt,
u"lt": operator.lt,
u">": operator.gt,
u"gt": operator.gt,
u"<=": operator.le,
u"≤": operator.le,
u"le": operator.le,
u">=": operator.ge,
u"≥": operator.ge,
u"ge": operator.ge,
u"!=": operator.ne,
u"≠": operator.ne,
u"ne": operator.ne,
u"%": operator.mod,
u"mod": operator.mod,
u"+": operator.add,
u"add": operator.add,
u"-": operator.sub,
u"sub": operator.sub,
u"*": operator.mul,
u"×": operator.mul,
u"mul": operator.mul,
u"/": operator.truediv,
u"÷": operator.truediv,
u"div": operator.truediv,
u"**": operator.pow,
u"^": operator.pow,
u"pow": operator.pow,
}
multiple_operators = {
u"or": any,
u"∨": any,
u"and": all,
u"∧": all,
}
def __init__(self, tree):
self._eval = self.build_evaluator(tree)
def __call__(self, value):
return self._eval(value)
def build_evaluator(self, tree):
try:
operator, nodes = list(tree.items())[0]
except Exception:
return lambda value: tree
try:
op = self.multiple_operators[operator]
except KeyError:
try:
op = self.binary_operators[operator]
except KeyError:
raise self.InvalidQuery("Unknown operator %s" % operator)
return self._handle_binary_op(op, nodes)
return self._handle_multiple_op(op, nodes)
def _handle_multiple_op(self, op, nodes):
elements = [self.build_evaluator(node) for node in nodes]
return lambda value: op((e(value) for e in elements))
def _handle_binary_op(self, op, node):
try:
iterator = iter(node)
except Exception:
return lambda value: op(value, node)
nodes = list(iterator)
if len(nodes) != 2:
raise self.InvalidQuery(
"Binary operator %s needs 2 arguments, %d given" %
(op, len(nodes)))
node0 = self.build_evaluator(node[0])
node1 = self.build_evaluator(node[1])
return lambda value: op(node0(value), node1(value))
class InvalidQuery(Exception):
pass
@pecan.expose('json')
def post(self, metric_id, start=None, stop=None, aggregation='mean',
granularity=None):
metrics = pecan.request.indexer.list_metrics(
attribute_filter={"in": {"id": arg_to_list(metric_id)}})
for metric in metrics:
enforce("search metric", metric)
if not pecan.request.body:
abort(400, "No query specified in body")
query = deserialize_and_validate(self.MetricSearchSchema)
if start is not None:
try:
start = utils.to_timestamp(start)
except Exception:
abort(400, "Invalid value for start")
if stop is not None:
try:
stop = utils.to_timestamp(stop)
except Exception:
abort(400, "Invalid value for stop")
try:
predicate = self.MeasureQuery(query)
except self.MeasureQuery.InvalidQuery as e:
abort(400, six.text_type(e))
if granularity is not None:
granularity = sorted(
map(utils.to_timespan, arg_to_list(granularity)),
reverse=True)
metrics_and_aggregations = collections.defaultdict(list)
for metric in metrics:
if granularity is None:
granularity = sorted((
d.granularity
for d in metric.archive_policy.definition),
reverse=True)
for gr in granularity:
agg = metric.archive_policy.get_aggregation(
aggregation, gr)
if agg is None:
abort(400,
storage.AggregationDoesNotExist(
metric, aggregation, gr))
metrics_and_aggregations[metric].append(agg)
try:
timeseries = pecan.request.storage.get_aggregated_measures(
metrics_and_aggregations, start, stop)
except storage.MetricDoesNotExist as e:
# This can happen if all the metrics have been created but one
# doesn't have any measures yet.
abort(400, e)
return {
str(metric.id): [
(timestamp, aggregation.granularity, value)
for aggregation, ts in six.iteritems(aggregations_and_ts)
for timestamp, value in ts
if predicate(value)
]
for metric, aggregations_and_ts in six.iteritems(timeseries)
}
class ResourcesMetricsMeasuresBatchController(rest.RestController):
@staticmethod
def BackwardCompatibleMeasuresList(v):
v = voluptuous.Schema(
voluptuous.Any(MeasuresListSchema,
{voluptuous.Optional("archive_policy_name"):
six.text_type,
voluptuous.Optional("unit"):
six.text_type,
"measures": MeasuresListSchema}),
required=True)(v)
if isinstance(v, dict):
return v
else:
# Old format
return {"measures": v}
@pecan.expose('json')
def post(self, create_metrics=False):
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
MeasuresBatchSchema = voluptuous.Schema(
{functools.partial(ResourceID, creator=creator):
{six.text_type: self.BackwardCompatibleMeasuresList}})
body = deserialize_and_validate(MeasuresBatchSchema)
known_metrics = []
unknown_metrics = []
unknown_resources = []
body_by_rid = {}
attribute_filter = {"or": []}
for original_resource_id, resource_id in body:
names = list(body[(original_resource_id, resource_id)].keys())
if names:
attribute_filter["or"].append({"and": [
{"=": {"resource_id": resource_id}},
{"in": {"name": names}}]})
if not attribute_filter["or"]:
pecan.response.status = 202
return
all_metrics = collections.defaultdict(list)
for metric in pecan.request.indexer.list_metrics(
attribute_filter=attribute_filter):
all_metrics[metric.resource_id].append(metric)
for original_resource_id, resource_id in body:
r = body[(original_resource_id, resource_id)]
body_by_rid[resource_id] = r
names = list(r.keys())
metrics = all_metrics[resource_id]
known_names = [m.name for m in metrics]
if strtobool("create_metrics", create_metrics):
already_exists_names = []
for name in names:
if name not in known_names:
metric_data = {"name": name}
for attr in ["archive_policy_name", "unit"]:
if attr in r[name]:
metric_data[attr] = r[name][attr]
metric = MetricsController.MetricSchema(metric_data)
try:
m = pecan.request.indexer.create_metric(
uuid.uuid4(),
creator=creator,
resource_id=resource_id,
name=metric.get('name'),
unit=metric.get('unit'),
archive_policy_name=metric[
'archive_policy_name'])
except indexer.NamedMetricAlreadyExists as e:
already_exists_names.append(e.metric_name)
except indexer.NoSuchResource:
unknown_resources.append({
'resource_id': six.text_type(resource_id),
'original_resource_id': original_resource_id})
break
except indexer.IndexerException as e:
# This catch NoSuchArchivePolicy, which is unlikely
# be still possible
abort(400, six.text_type(e))
else:
known_metrics.append(m)
if already_exists_names:
# Add metrics created in the meantime
known_names.extend(already_exists_names)
known_metrics.extend(
pecan.request.indexer.list_metrics(
attribute_filter={"and": [
{"=": {"resource_id": resource_id}},
{"in": {"name": already_exists_names}},
]}))
elif len(names) != len(metrics):
unknown_metrics.extend(
["%s/%s" % (six.text_type(resource_id), m)
for m in names if m not in known_names])
known_metrics.extend(metrics)
if unknown_resources:
abort(400, {"cause": "Unknown resources",
"detail": unknown_resources})
if unknown_metrics:
abort(400, "Unknown metrics: %s" % ", ".join(
sorted(unknown_metrics)))
for metric in known_metrics:
enforce("post measures", metric)
pecan.request.incoming.add_measures_batch(
dict((metric.id,
body_by_rid[metric.resource_id][metric.name]["measures"])
for metric in known_metrics))
pecan.response.status = 202
class MetricsMeasuresBatchController(rest.RestController):
# NOTE(sileht): we don't allow to mix both formats
# to not have to deal with id collision that can
# occurs between a metric_id and a resource_id.
# Because while json allow duplicate keys in dict payload
# only the last key will be retain by json python module to
# build the python dict.
MeasuresBatchSchema = voluptuous.Schema(
{utils.UUID: MeasuresListSchema}
)
@pecan.expose("json")
def post(self):
body = deserialize_and_validate(self.MeasuresBatchSchema)
metrics = pecan.request.indexer.list_metrics(
attribute_filter={"in": {"id": list(body.keys())}})
if len(metrics) != len(body):
missing_metrics = sorted(set(body) - set(m.id for m in metrics))
abort(400, "Unknown metrics: %s" % ", ".join(
six.moves.map(str, missing_metrics)))
for metric in metrics:
enforce("post measures", metric)
pecan.request.incoming.add_measures_batch(
dict((metric.id, body[metric.id]) for metric in
metrics))
pecan.response.status = 202
class SearchController(object):
resource = SearchResourceController()
metric = SearchMetricController()
class AggregationResourceController(rest.RestController):
def __init__(self, resource_type, metric_name):
self.resource_type = resource_type
self.metric_name = metric_name
@pecan.expose('json')
def post(self, start=None, stop=None, aggregation='mean',
reaggregation=None, granularity=None, needed_overlap=100.0,
groupby=None, fill=None, refresh=False, resample=None,
**kwargs):
# First, set groupby in the right format: a sorted list of unique
# strings.
groupby = sorted(set(arg_to_list(groupby)))
# NOTE(jd) Sort by groupby so we are sure we do not return multiple
# groups when using itertools.groupby later.
try:
resources = SearchResourceTypeController(
self.resource_type)._search(sort=groupby,
filter=kwargs.get("filter"))
except indexer.InvalidPagination:
abort(400, "Invalid groupby attribute")
except indexer.IndexerException as e:
abort(400, six.text_type(e))
if resources is None:
return []
if not groupby:
metrics = list(filter(None,
(r.get_metric(self.metric_name)
for r in resources)))
return AggregationController.get_cross_metric_measures_from_objs(
metrics, start, stop, aggregation, reaggregation,
granularity, needed_overlap, fill, refresh, resample)
def groupper(r):
return tuple((attr, r[attr]) for attr in groupby)
results = []
for key, resources in itertools.groupby(resources, groupper):
metrics = list(filter(None,
(r.get_metric(self.metric_name)
for r in resources)))
results.append({
"group": dict(key),
"measures": AggregationController.get_cross_metric_measures_from_objs( # noqa
metrics, start, stop, aggregation, reaggregation,
granularity, needed_overlap, fill, refresh, resample)
})
return results
FillSchema = voluptuous.Schema(
voluptuous.Any(voluptuous.Coerce(float), "null", "dropna",
msg="Must be a float, 'dropna' or 'null'"))
def validate_qs(start=None, stop=None, granularity=None,
needed_overlap=None, fill=None):
if needed_overlap is not None:
try:
needed_overlap = float(needed_overlap)
except ValueError:
abort(400, {"cause": "Argument value error",
"detail": "needed_overlap",
"reason": "Must be a number"})
if start is not None:
try:
start = utils.to_timestamp(start)
except Exception:
abort(400, {"cause": "Argument value error",
"detail": "start",
"reason": "Must be a datetime or a timestamp"})
if stop is not None:
try:
stop = utils.to_timestamp(stop)
except Exception:
abort(400, {"cause": "Argument value error",
"detail": "stop",
"reason": "Must be a datetime or a timestamp"})
if granularity is not None:
try:
granularity = [utils.to_timespan(granularity)]
except ValueError as e:
abort(400, {"cause": "Argument value error",
"detail": "granularity",
"reason": six.text_type(e)})
if fill is not None:
try:
fill = FillSchema(fill)
except voluptuous.Error as e:
abort(400, {"cause": "Argument value error",
"detail": "fill",
"reason": str(e)})
return start, stop, granularity, needed_overlap, fill
class AggregationController(rest.RestController):
_custom_actions = {
'metric': ['POST', 'GET'],
}
@pecan.expose()
def _lookup(self, object_type, resource_type, key, metric_name,
*remainder):
if object_type != "resource" or key != "metric":
# NOTE(sileht): we want the raw 404 message here
# so use directly pecan
pecan.abort(404)
try:
pecan.request.indexer.get_resource_type(resource_type)
except indexer.NoSuchResourceType as e:
abort(404, six.text_type(e))
return AggregationResourceController(resource_type,
metric_name), remainder
@staticmethod
def get_cross_metric_measures_from_objs(metrics, start=None, stop=None,
aggregation='mean',
reaggregation=None,
granularity=None,
needed_overlap=100.0, fill=None,
refresh=False, resample=None):
start, stop, granularity, needed_overlap, fill = validate_qs(
start, stop, granularity, needed_overlap, fill)
if reaggregation is None:
reaggregation = aggregation
for metric in metrics:
enforce("get metric", metric)
number_of_metrics = len(metrics)
if number_of_metrics == 0:
return []
if resample:
if not granularity:
abort(400, 'A granularity must be specified to resample')
try:
resample = (resample if calendar.GROUPINGS.get(resample) else
utils.to_timespan(resample))
except ValueError as e:
abort(400, six.text_type(e))
if granularity is None:
granularities = (
definition.granularity
for m in metrics
for definition in m.archive_policy.definition
)
# granularities_in_common
granularity = [
g
for g, occurrence in six.iteritems(
collections.Counter(granularities))
if occurrence == len(metrics)
]
if not granularity:
abort(400, exceptions.UnAggregableTimeseries(
list((metric.id, aggregation)
for metric in metrics),
'No granularity match'))
aggregations = set()
for metric in metrics:
for g in granularity:
agg = metric.archive_policy.get_aggregation(
aggregation, g)
if agg is None:
abort(404, six.text_type(
storage.AggregationDoesNotExist(metric, aggregation, g)
))
aggregations.add(agg)
aggregations = sorted(aggregations, key=ATTRGETTER_GRANULARITY,
reverse=True)
operations = ["aggregate", reaggregation, []]
if resample:
operations[2].extend(
["resample", aggregation, resample,
["metric"] + [[str(m.id), aggregation]
for m in metrics]]
)
else:
operations[2].extend(
["metric"] + [[str(m.id), aggregation]
for m in metrics]
)
try:
if strtobool("refresh", refresh):
metrics_to_update = [
m for m in metrics
if pecan.request.incoming.has_unprocessed(m.id)]
for m in metrics_to_update:
try:
pecan.request.chef.refresh_metrics(
[m], pecan.request.conf.api.operation_timeout)
except chef.SackAlreadyLocked:
abort(503, 'Unable to refresh metric: %s. '
'Metric is locked. '
'Please try again.' % m.id)
if number_of_metrics == 1:
# NOTE(sileht): don't do the aggregation if we only have one
# metric
metric = metrics[0]
if (aggregation
not in metric.archive_policy.aggregation_methods):
abort(404, {
"cause":
"Aggregation method does not exist for this metric",
"detail": {
"metric": str(metric.id),
"aggregation_method": aggregation,
},
})
try:
results = pecan.request.storage.get_aggregated_measures(
{metric: aggregations}, start, stop, resample)[metric]
return [(timestamp, results[key].aggregation.granularity,
value)
for key in sorted(results.keys(),
reverse=True)
for timestamp, value in results[key]]
except storage.MetricDoesNotExist:
return []
return processor.get_measures(
pecan.request.storage,
[processor.MetricReference(m, aggregation) for m in metrics],
operations, start, stop,
granularity, needed_overlap, fill)["aggregated"]
except exceptions.UnAggregableTimeseries as e:
abort(400, e)
except storage.AggregationDoesNotExist as e:
abort(404, six.text_type(e))
MetricIDsSchema = [utils.UUID]
@pecan.expose('json')
def get_metric(self, metric=None, start=None, stop=None,
aggregation='mean', reaggregation=None, granularity=None,
needed_overlap=100.0, fill=None,
refresh=False, resample=None):
if pecan.request.method == 'GET':
try:
metric_ids = voluptuous.Schema(
self.MetricIDsSchema, required=True)(arg_to_list(metric))
except voluptuous.Error as e:
abort(400, "Invalid input: %s" % e)
else:
self._workaround_pecan_issue_88()
metric_ids = deserialize_and_validate(self.MetricIDsSchema)
metric_ids = [six.text_type(m) for m in metric_ids]
# Check RBAC policy
metrics = pecan.request.indexer.list_metrics(
attribute_filter={"in": {"id": metric_ids}})
missing_metric_ids = (set(metric_ids)
- set(six.text_type(m.id) for m in metrics))
if missing_metric_ids:
# Return one of the missing one in the error
abort(404, six.text_type(storage.MetricDoesNotExist(
missing_metric_ids.pop())))
return self.get_cross_metric_measures_from_objs(
metrics, start, stop, aggregation, reaggregation,
granularity, needed_overlap, fill, refresh, resample)
post_metric = get_metric
def _workaround_pecan_issue_88(self):
# FIXME(sileht): https://github.com/pecan/pecan/pull/88
if pecan.request.path_info.startswith("/aggregation/resource"):
pecan.abort(405)
class CapabilityController(rest.RestController):
@staticmethod
@pecan.expose('json')
def get():
return dict(aggregation_methods=set(
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS))
class StatusController(rest.RestController):
@staticmethod
@pecan.expose('json')
def get(details=True):
enforce("get status", {})
try:
members_req = pecan.request.coordinator.get_members(
metricd.MetricProcessor.GROUP_ID)
except tooz.NotImplemented:
members_req = None
try:
report = pecan.request.incoming.measures_report(
strtobool("details", details))
except incoming.ReportGenerationError:
abort(503, 'Unable to generate status. Please retry.')
report_dict = {"storage": {"summary": report['summary']}}
if 'details' in report:
report_dict["storage"]["measures_to_process"] = report['details']
report_dict['metricd'] = {}
if members_req:
members = members_req.get()
caps = [
pecan.request.coordinator.get_member_capabilities(
metricd.MetricProcessor.GROUP_ID, member)
for member in members
]
report_dict['metricd']['processors'] = [
member.decode() for member in members
]
members_data = {}
for member, cap in six.moves.zip(members, caps):
caps_data = {
six.ensure_str(k): v
for k, v in six.iteritems(cap.get())
}
members_data[member.decode()] = caps_data
report_dict['metricd']['statistics'] = members_data
else:
report_dict['metricd']['processors'] = None
report_dict['metricd']['statistics'] = {}
return report_dict
class MetricsBatchController(object):
measures = MetricsMeasuresBatchController()
class ResourcesMetricsBatchController(object):
measures = ResourcesMetricsMeasuresBatchController()
class ResourcesBatchController(object):
metrics = ResourcesMetricsBatchController()
class BatchController(object):
metrics = MetricsBatchController()
resources = ResourcesBatchController()
# Retry with exponential backoff for up to 1 minute
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=0.5, max=60),
retry=tenacity.retry_if_exception_type(
(indexer.NoSuchResource, indexer.ResourceAlreadyExists,
indexer.ResourceTypeAlreadyExists,
indexer.NamedMetricAlreadyExists)))
def get_or_create_resource_and_metrics(
creator, rid, original_resource_id, metric_names,
resource_attributes,
resource_type, resource_type_attributes=None):
try:
r = pecan.request.indexer.get_resource(resource_type, rid,
with_metrics=True)
except indexer.NoSuchResourceType:
if resource_type_attributes:
enforce("create resource type", {
'name': resource_type,
'state': 'creating',
'attributes': resource_type_attributes,
})
schema = pecan.request.indexer.get_resource_type_schema()
rt = schema.resource_type_from_dict(
resource_type, resource_type_attributes, 'creating')
pecan.request.indexer.create_resource_type(rt)
raise tenacity.TryAgain
else:
raise
except indexer.UnexpectedResourceTypeState as e:
# NOTE(sileht): Currently created by another thread
if not e.state.endswith("_error"):
raise tenacity.TryAgain
if r:
enforce("update resource", r)
exists_metric_names = [m.name for m in r.metrics]
metrics = MetricsSchema(dict(
(m, {}) for m in metric_names
if m not in exists_metric_names
))
if metrics:
return pecan.request.indexer.update_resource(
resource_type, rid,
metrics=metrics,
append_metrics=True,
create_revision=False
).metrics
else:
return r.metrics
else:
metrics = MetricsSchema(dict((m, {}) for m in metric_names))
target = {
"id": rid,
"resource_type": resource_type,
"creator": creator,
"original_resource_id": original_resource_id,
"metrics": metrics,
}
target.update(resource_attributes)
enforce("create resource", target)
kwargs = resource_attributes # no copy used since not used after
kwargs['metrics'] = metrics
kwargs['original_resource_id'] = original_resource_id
try:
return pecan.request.indexer.create_resource(
resource_type, rid, creator, **kwargs
).metrics
except indexer.ResourceAlreadyExists as e:
# NOTE(sileht): ensure the rid is not registered whitin another
# resource type.
r = pecan.request.indexer.get_resource('generic', rid)
if r.type != resource_type:
abort(409, e)
raise
class PrometheusWriteController(rest.RestController):
PROMETHEUS_RESOURCE_TYPE = {
"instance": {"type": "string",
"min_length": 1,
"max_length": 512,
"required": True},
"job": {"type": "string",
"min_length": 1,
"max_length": 512,
"required": True}
}
@pecan.expose()
def post(self):
buf = snappy.uncompress(pecan.request.body)
f = remote_pb2.WriteRequest()
f.ParseFromString(buf)
measures_by_rid = collections.defaultdict(dict)
for ts in f.timeseries:
attrs = dict((label.name, label.value) for label in ts.labels)
original_rid = (attrs.get("job", "none"),
attrs.get("instance", "none"))
name = attrs['__name__']
if ts.samples:
data = [{'timestamp': s.timestamp_ms / 1000.0,
'value': s.value} for s in ts.samples]
measures_by_rid[original_rid][name] = validate(
MeasuresListSchema, data)
creator = pecan.request.auth_helper.get_current_user(pecan.request)
measures_to_batch = {}
for (job, instance), measures in measures_by_rid.items():
original_rid = '%s@%s' % (job, instance)
rid = ResourceUUID(original_rid, creator=creator)
metric_names = list(measures.keys())
timeout = pecan.request.conf.api.operation_timeout
metrics = get_or_create_resource_and_metrics.retry_with(
stop=tenacity.stop_after_delay(timeout))(
creator, rid, original_rid, metric_names,
dict(job=job, instance=instance),
"prometheus", self.PROMETHEUS_RESOURCE_TYPE)
for metric in metrics:
enforce("post measures", metric)
measures_to_batch.update(
dict((metric.id, measures[metric.name]) for metric in
metrics if metric.name in measures))
pecan.request.incoming.add_measures_batch(measures_to_batch)
pecan.response.status = 202
class PrometheusController(object):
write = PrometheusWriteController()
class V1Controller(object):
def __init__(self):
# FIXME(sileht): split controllers to avoid lazy loading
from gnocchi.rest.aggregates import api as agg_api
from gnocchi.rest import influxdb
self.sub_controllers = {
"search": SearchController(),
"archive_policy": ArchivePoliciesController(),
"archive_policy_rule": ArchivePolicyRulesController(),
"metric": MetricsController(),
"batch": BatchController(),
"resource": ResourcesByTypeController(),
"resource_type": ResourceTypesController(),
"aggregation": AggregationController(),
"capabilities": CapabilityController(),
"status": StatusController(),
"aggregates": agg_api.AggregatesController(),
"influxdb": influxdb.InfluxDBController(),
}
for name, ctrl in self.sub_controllers.items():
setattr(self, name, ctrl)
if PROMETHEUS_SUPPORTED:
setattr(self, "prometheus", PrometheusController())
@pecan.expose('json')
def index(self):
return {
"version": "1.0",
"links": [
{"rel": "self",
"href": pecan.request.application_url}
] + [
{"rel": name,
"href": pecan.request.application_url + "/" + name}
for name in sorted(self.sub_controllers)
]
}
class VersionsController(object):
@staticmethod
@pecan.expose('json')
def index():
return {
"build": gnocchi.__version__,
"versions": [
{
"status": "CURRENT",
"links": [
{
"rel": "self",
"href": pecan.request.application_url + "/v1/"
}
],
"id": "v1.0",
"updated": "2015-03-19"
}
]
}
|
apache-2.0
| -4,405,489,096,522,094,000
| 35.578223
| 94
| 0.557312
| false
| 4.272807
| false
| false
| false
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/employment_v30.py
|
1
|
13727
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class EmploymentV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'path': 'str',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'organization': 'OrganizationV30',
'url': 'UrlV30',
'external_ids': 'ExternalIDsV30',
'display_index': 'str',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'department_name': 'department-name',
'role_title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'url': 'url',
'external_ids': 'external-ids',
'display_index': 'display-index',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, url=None, external_ids=None, display_index=None, visibility=None): # noqa: E501
"""EmploymentV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._url = None
self._external_ids = None
self._display_index = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if organization is not None:
self.organization = organization
if url is not None:
self.url = url
if external_ids is not None:
self.external_ids = external_ids
if display_index is not None:
self.display_index = display_index
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this EmploymentV30. # noqa: E501
:return: The created_date of this EmploymentV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this EmploymentV30.
:param created_date: The created_date of this EmploymentV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this EmploymentV30. # noqa: E501
:return: The last_modified_date of this EmploymentV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this EmploymentV30.
:param last_modified_date: The last_modified_date of this EmploymentV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this EmploymentV30. # noqa: E501
:return: The source of this EmploymentV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EmploymentV30.
:param source: The source of this EmploymentV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this EmploymentV30. # noqa: E501
:return: The put_code of this EmploymentV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this EmploymentV30.
:param put_code: The put_code of this EmploymentV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this EmploymentV30. # noqa: E501
:return: The path of this EmploymentV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this EmploymentV30.
:param path: The path of this EmploymentV30. # noqa: E501
:type: str
"""
self._path = path
@property
def department_name(self):
"""Gets the department_name of this EmploymentV30. # noqa: E501
:return: The department_name of this EmploymentV30. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this EmploymentV30.
:param department_name: The department_name of this EmploymentV30. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this EmploymentV30. # noqa: E501
:return: The role_title of this EmploymentV30. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this EmploymentV30.
:param role_title: The role_title of this EmploymentV30. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this EmploymentV30. # noqa: E501
:return: The start_date of this EmploymentV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EmploymentV30.
:param start_date: The start_date of this EmploymentV30. # noqa: E501
:type: FuzzyDateV30
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this EmploymentV30. # noqa: E501
:return: The end_date of this EmploymentV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this EmploymentV30.
:param end_date: The end_date of this EmploymentV30. # noqa: E501
:type: FuzzyDateV30
"""
self._end_date = end_date
@property
def organization(self):
"""Gets the organization of this EmploymentV30. # noqa: E501
:return: The organization of this EmploymentV30. # noqa: E501
:rtype: OrganizationV30
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this EmploymentV30.
:param organization: The organization of this EmploymentV30. # noqa: E501
:type: OrganizationV30
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def url(self):
"""Gets the url of this EmploymentV30. # noqa: E501
:return: The url of this EmploymentV30. # noqa: E501
:rtype: UrlV30
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this EmploymentV30.
:param url: The url of this EmploymentV30. # noqa: E501
:type: UrlV30
"""
self._url = url
@property
def external_ids(self):
"""Gets the external_ids of this EmploymentV30. # noqa: E501
:return: The external_ids of this EmploymentV30. # noqa: E501
:rtype: ExternalIDsV30
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this EmploymentV30.
:param external_ids: The external_ids of this EmploymentV30. # noqa: E501
:type: ExternalIDsV30
"""
self._external_ids = external_ids
@property
def display_index(self):
"""Gets the display_index of this EmploymentV30. # noqa: E501
:return: The display_index of this EmploymentV30. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this EmploymentV30.
:param display_index: The display_index of this EmploymentV30. # noqa: E501
:type: str
"""
self._display_index = display_index
@property
def visibility(self):
"""Gets the visibility of this EmploymentV30. # noqa: E501
:return: The visibility of this EmploymentV30. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this EmploymentV30.
:param visibility: The visibility of this EmploymentV30. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE", "public", "private",
"limited", "registered-only"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmploymentV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmploymentV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| -2,466,509,074,726,212,000
| 28.394004
| 276
| 0.585197
| false
| 3.786759
| false
| false
| false
|
Agicia/lpod-python
|
utils.py
|
1
|
17649
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009-2010 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: David Versmisse <david.versmisse@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
# Romain Gauthier <romain@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the Standard Library
from datetime import date, datetime, timedelta
from decimal import Decimal as dec
from os import getcwd
from os.path import splitdrive, join, sep
from re import search
from sys import _getframe, modules
from warnings import warn
# Import from lpod
from datatype import Boolean, Date, DateTime, Duration
CELL_TYPES = ('boolean', 'currency', 'date', 'float', 'percentage', 'string',
'time')
STYLE_FAMILIES = ('paragraph', 'text', 'section', 'table', 'table-column',
'table-row', 'table-cell', 'table-page', 'chart',
'default', 'drawing-page', 'graphic', 'presentation',
'control', 'ruby', 'list', 'number', 'page-layout',
'presentation-page-layout', 'font-face', 'master-page')
NOTE_CLASSES = ('footnote', 'endnote')
# This DPI is computed to have:
# 640 px (width of your wiki) <==> 17 cm (width of a normal ODT page)
DPI = 640 * dec('2.54') / 17
######################################################################
# Private API
######################################################################
def _get_abspath(local_path):
"""Returns the absolute path to the required file.
"""
mname = _getframe(1).f_globals.get('__name__')
if mname == '__main__' or mname == '__init__':
mpath = getcwd()
else:
module = modules[mname]
if hasattr(module, '__path__'):
mpath = module.__path__[0]
elif '.' in mname:
mpath = modules[mname[:mname.rfind('.')]].__path__[0]
else:
mpath = mname
drive, mpath = splitdrive(mpath)
mpath = drive + join(mpath, local_path)
# Make it working with Windows. Internally we use always the "/".
if sep == '\\':
mpath = mpath.replace(sep, '/')
return mpath
def _make_xpath_query(element_name, family=None, text_style=None,
draw_id=None, draw_name=None, draw_style=None, draw_text_style=None,
table_name=None, table_style=None, style_name=None,
display_name=None, note_class=None, text_id=None, text_name=None,
office_name=None, office_title=None, outline_level=None, level=None,
page_layout=None, master_page=None, parent_style=None,
presentation_class=None, position=None, **kw):
query = [element_name]
attributes = kw
if text_style:
attributes['text:style-name'] = text_style
if family:
attributes['style:family'] = family
if draw_id:
attributes['draw:id'] = draw_id
if draw_name:
attributes['draw:name'] = draw_name
if draw_style:
attributes['draw:style-name'] = draw_style
if draw_text_style:
attributes['draw:text-style-name'] = draw_text_style
if table_name:
attributes['table:name'] = table_name
if table_style:
attributes['table:style-name'] = table_style
if style_name:
attributes['style:name'] = style_name
if display_name:
attributes['style:display-name'] = display_name
if note_class:
attributes['text:note-class'] = note_class
if text_id:
attributes['text:id'] = text_id
if text_name:
attributes['text:name'] = text_name
if office_name:
attributes['office:name'] = office_name
if office_title:
attributes['office:title'] = office_title
if outline_level:
attributes['text:outline-level'] = outline_level
if level:
attributes['text:level'] = level
if page_layout:
attributes['style:page-layout-name'] = page_layout
if master_page:
attributes['draw:master-page-name'] = master_page
if parent_style:
attributes['style:parent-style-name'] = parent_style
if presentation_class:
attributes['presentation:class'] = presentation_class
# Sort attributes for reproducible test cases
for qname in sorted(attributes):
value = attributes[qname]
if value is True:
query.append(u'[@%s]' % qname)
else:
query.append(u'[@%s="%s"]' % (qname, unicode(value)))
query = ''.join(query)
if position is not None:
# A position argument that mimics the behaviour of a python's list
if position >= 0:
position = str(position + 1)
elif position == -1:
position = 'last()'
else:
position = 'last()-%d' % (abs(position) - 1)
query = u'(%s)[%s]' % (query, position)
return query
# These are listed exhaustively for keeping count of
# implemented style types
family_mapping = {
'paragraph': ('style:style', 'paragraph'),
'text': ('style:style', 'text'),
'section': ('style:style', 'section'),
'table': ('style:style', 'table'),
'table-column': ('style:style', 'table-column'),
'table-row': ('style:style', 'table-row'),
'table-cell': ('style:style', 'table-cell'),
'drawing-page': ('style:style', 'drawing-page'),
'graphic': ('style:style', 'graphic'),
'presentation': ('style:style', 'presentation'),
# False families
'list': ('text:list-style', None),
'outline': ('text:outline-style', None),
'page-layout': ('style:page-layout', None),
'presentation-page-layout': ('style:presentation-page-layout', None),
'master-page': ('style:master-page', None),
'font-face': ('style:font-face', None),
'number': ('number:number-style', None),
'percentage': ('number:percentage-style', None),
'time': ('number:time-style', None),
'date': ('number:date-style', None),
}
def _get_style_tagname(family):
if family not in family_mapping:
raise ValueError, "unknown family: " + family
return family_mapping[family]
def _get_style_family(name):
for family, (tagname, famattr) in family_mapping.iteritems():
if tagname == name:
return family
return None
def _expand_properties(properties):
# This mapping is not exhaustive, it only contains cases where replacing
# '_' with '-' and adding the "fo:" prefix is not enough
mapping = {# text
'font': 'style:font-name',
'size': 'fo:font-size',
'weight': 'fo:font-weight',
'style': 'fo:font-style',
'underline': 'style:text-underline-style',
'display': 'text:display',
'outline': 'style:text-outline',
'family_generic': 'style:font-family-generic',
'pitch': 'style:font-pitch',
# compliance with office suites
'font_style_name': 'style:font-style-name',
# paragraph
'align': 'fo:text-align',
'align-last': 'fo:text-align-last',
'indent': 'fo:text-indent',
'together': 'fo:keep-together',
# TODO 'page-break-before': 'fo:page-break-before',
# TODO 'page-break-after': 'fo:page-break-after',
'shadow': 'fo:text-shadow',
# Graphic
'stroke': 'draw:stroke',
'fill_color': 'draw:fill-color',
'fill_image_width': 'draw:fill-image-width',
'fill_image_height': 'draw:fill-image-height',
'textarea_vertical_align': 'draw:textarea-vertical-align',
'line_distance': 'draw:line-distance',
'guide_overhang': 'draw:guide-overhang',
'guide_distance': 'draw:guide-distance'
}
def map_key(key):
key = mapping.get(key, key).replace('_', '-')
if ":" not in key:
key = "fo:" + key
return key
if type(properties) is dict:
expanded = {}
for key, value in properties.iteritems():
key = map_key(key)
expanded[key] = value
elif type(properties) is list:
expanded = []
for key in properties:
key = map_key(key)
expanded.append(key)
return expanded
def _merge_dicts(d, *args, **kw):
"""Merge two or more dictionaries into a new dictionary object.
"""
new_d = d.copy()
for dic in args:
new_d.update(dic)
new_d.update(kw)
return new_d
#
# Non-public yet useful helpers
#
def _get_elements(context, element_name, content=None, url=None,
svg_title=None, svg_desc=None, dc_creator=None, dc_date=None, **kw):
query = _make_xpath_query(element_name, **kw)
elements = context.get_elements(query)
# Filter the elements with the regex (TODO use XPath)
if content is not None:
elements = [element for element in elements if element.match(content)]
if url is not None:
filtered = []
for element in elements:
url_attr = element.get_attribute('xlink:href')
if search(url, url_attr) is not None:
filtered.append(element)
elements = filtered
if dc_date is not None:
# XXX Date or DateTime?
dc_date = DateTime.encode(dc_date)
for variable, childname in [
(svg_title, 'svg:title'),
(svg_desc, 'svg:desc'),
(dc_creator, 'descendant::dc:creator'),
(dc_date, 'descendant::dc:date')]:
if not variable:
continue
filtered = []
for element in elements:
child = element.get_element(childname)
if child and child.match(variable):
filtered.append(element)
elements = filtered
return elements
def _get_element(context, element_name, position, **kw):
# TODO Transmit position not to load the whole list
result = _get_elements(context, element_name, **kw)
try:
return result[position]
except IndexError:
return None
def _set_value_and_type(element, value=None, value_type=None, text=None,
currency=None):
# Remove possible previous value and type
for name in ('office:value-type', 'office:boolean-value',
'office:value', 'office:date-value', 'office:string-value',
'office:time-value', 'table:formula'):
try:
element.del_attribute(name)
except KeyError:
pass
if type(value) is bool:
if value_type is None:
value_type = 'boolean'
if text is None:
text = u'true' if value else u'false'
value = Boolean.encode(value)
elif isinstance(value, (int, float, long, dec)):
if value_type is None:
value_type = 'float'
if text is None:
text = unicode(value)
value = str(value)
elif type(value) is date:
if value_type is None:
value_type = 'date'
if text is None:
text = unicode(Date.encode(value))
value = Date.encode(value)
elif type(value) is datetime:
if value_type is None:
value_type = 'date'
if text is None:
text = unicode(DateTime.encode(value))
value = DateTime.encode(value)
elif type(value) is str:
if value_type is None:
value_type = 'string'
if text is None:
text = unicode(value)
elif type(value) is unicode:
if value_type is None:
value_type = 'string'
if text is None:
text = value
elif type(value) is timedelta:
if value_type is None:
value_type = 'time'
if text is None:
text = unicode(Duration.encode(value))
value = Duration.encode(value)
elif value is not None:
raise TypeError, 'type "%s" is unknown' % type(value)
if value_type is not None:
element.set_attribute('office:value-type', value_type)
if value_type == 'boolean':
element.set_attribute('office:boolean-value', value)
elif value_type == 'currency':
element.set_attribute('office:value', value)
element.set_attribute('office:currency', currency)
elif value_type == 'date':
element.set_attribute('office:date-value', value)
elif value_type in ('float', 'percentage'):
element.set_attribute('office:value', value)
elif value_type == 'string':
element.set_attribute('office:string-value', value)
elif value_type == 'time':
element.set_attribute('office:time-value', value)
return text
######################################################################
# Public API
######################################################################
def get_value(element, value_type=None, try_get_text=True):
"""Only for "with office:value-type" elements
"""
if value_type is None:
value_type = element.get_attribute('office:value-type')
if value_type == 'boolean':
value = element.get_attribute('office:boolean-value')
return Boolean.decode(value)
elif value_type in ('float', 'percentage', 'currency'):
value = dec(element.get_attribute('office:value'))
# Return 3 instead of 3.0 if possible
if int(value) == value:
return int(value)
return value
elif value_type == 'date':
value = element.get_attribute('office:date-value')
if 'T' in value:
return DateTime.decode(value)
else:
return Date.decode(value)
elif value_type == 'string':
value = element.get_attribute('office:string-value')
if value is not None:
return unicode(value)
if try_get_text:
value = []
for para in element.get_elements('text:p'):
value.append(para.get_text(recursive=True))
if value:
return u"\n".join(value)
return None
elif value_type == 'time':
value = element.get_attribute('office:time-value')
return Duration.decode(value)
elif value_type is None:
return None
raise ValueError, 'unexpected value type "%s"' % value_type
def set_value(element, value):
"""Only for "with office:value-type" elements
"""
tag = element.get_tag()
# A table:cell ?
if tag == 'table:table-cell':
element.clear()
text = _set_value_and_type(element, value=value)
element.set_text_content(text)
return
# A text:variable-set ?
if tag == 'text:variable-set':
name = element.get_attribute('text:name')
display = element.get_attribute('text:display')
element.clear()
text = _set_value_and_type(element, value=value)
element.set_attribute('text:name', name)
if display is not None:
element.set_attribute('text:display', display)
element.set_text(text)
return
# A text:user-field-decl ?
if tag == 'text:user-field-decl':
name = element.get_attribute('text:name')
element.clear()
_set_value_and_type(element, value=value)
element.set_attribute('text:name', name)
return
# Else => error
raise ValueError, 'set_value: unexpected element "%s"' % tag
def convert_unicode(text):
"""Mostly used to compare lxml serialization to what is expected.
"""
result = []
for c in text:
code = ord(c)
if code >= 128:
result.append('&#%d;' % code)
else:
result.append(c)
return ''.join(result)
def oooc_to_ooow(formula):
"""Convert (proprietary) formula from calc format to writer format.
Arguments:
formula -- unicode
Return: unicode
"""
prefix, formula = formula.split(":=", 1)
assert "oooc" in prefix
# Convert cell addresses
formula = formula.replace("[.", "<").replace(":.", ":").replace("]", ">")
# Convert functions
formula = formula.replace("SUM(", "sum ").replace(")", "")
return "ooow:" + formula
def obsolete(old_name, new_func, *args, **kw):
def decorate(*dec_args, **dec_kw):
new_name = new_func.__name__
if args:
new_name += '(' + ', '.join(repr(x) for x in args) + ')'
message = '"%s" is obsolete, call "%s" instead' % (old_name,
new_name)
warn(message, category=DeprecationWarning)
return new_func(*(dec_args + args), **dec_kw)
return decorate
def isiterable(obj):
if isinstance(obj, basestring):
return False
try:
iter(obj)
except TypeError:
return False
return True
|
apache-2.0
| -884,333,073,897,304,000
| 32.808429
| 78
| 0.576609
| false
| 3.862552
| false
| false
| false
|
L33thium/xu4fanctl
|
xu4fanctl-1/sbin/fanctl.py
|
1
|
2490
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##########
## Fan control for odroid xu4
## when hit hiTmp manage fan speed until hit loTmp then stop.
## steps make fan wants to speed down more than speed up, for silence.
## recommanded governor : conservative
############################
import os, sys, signal, re, time, collections
# settings
hiTmp = 90
loTmp = 50
stepUp = 20
stepDown = 5
minSpd = 22 # in percent
# files location
if os.path.isdir("/sys/devices/odroid_fan.14"):
fanctl = "/sys/devices/odroid_fan.14"
elif os.path.isdir("/sys/devices/odroid_fan.13"):
fanctl = "/sys/devices/odroid_fan.13"
fTmp = "/sys/devices/10060000.tmu/temp"
fMode = fanctl+"/fan_mode"
fSpd = fanctl+"/pwm_duty"
class fan():
def __init__(self):
self.tmpLst = collections.deque(maxlen=300)
def setManual(self):
with open(fMode, "w") as f:
f.write("0")
def setAuto(self):
with open(fMode, "w") as f:
f.write("1")
def getTmp(self):
with open(fTmp, "r") as f:
t = f.read()
tmps = re.findall("[0-9]{5}", t)
tmps = map(int, tmps)
#temp = max(tmps) / 1000
temp = sum(tmps) / len(tmps) / 1000
self.tmpLst.append(temp)
tmpAvg = float(sum(self.tmpLst) / len(self.tmpLst))
return [temp, tmpAvg]
def cool(self):
delta = hiTmp - loTmp + 20
tmps = self.getTmp()
temp = tmps[0]
tmpAvg = tmps[1]
time.sleep(1)
while tmpAvg > loTmp:
tmps = self.getTmp()
temp = tmps[0]
tmpAvg = tmps[1]
diff = tmpAvg - loTmp
percent = int(float(diff) / float(delta) * 100)
if temp >= hiTmp:
self.setSpd(100)
else:
self.setSpd(percent)
time.sleep(1)
def setSpd(self, percent=0):
if percent > 100:
percent = 100
pwm = int(float(percent) * 255 / 100)
if pwm < 58 and pwm > 1:
pwm = 58
if pwm < 1: pwm = 1
with open(fSpd, "r") as f:
curPwm = int(f.read())
if not pwm == curPwm:
with open(fSpd, "w") as f:
f.write(str(pwm))
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
def main():
killer = GracefulKiller()
done = False
fan.setManual()
fan.setSpd(0)
while not done:
if killer.kill_now:
fan.setAuto()
break
if fan.getTmp()[0] > hiTmp:
fan.cool()
time.sleep(1)
if __name__ == "__main__":
fan = fan()
try:
main()
except Exception as error:
print('caught this error: ' + repr(error))
fan.setAuto()
|
gpl-3.0
| 5,299,237,560,384,031,000
| 21.232143
| 70
| 0.626908
| false
| 2.49
| false
| false
| false
|
peterloron/archive
|
archive.py
|
1
|
5459
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Migrates files older than specified date from source to destination.
"""
import argparse
import os
import logging
import datetime
import time
import shutil
import random
from threading import Thread
from Queue import Queue
parser = argparse.ArgumentParser(description='Moves old files to a new location.')
parser.add_argument('-a', action="store", dest="age", default=90, type=int)
parser.add_argument('-s', action="store", dest="source_root")
parser.add_argument('-d', action="store", dest="dest_root")
parser.add_argument('-n', action="store_true", dest="no_op", default=False)
parser.add_argument('-t', action="store", dest="num_worker_threads", default=5, type=int)
parser.add_argument('--debug', action="store_true", dest="debug_mode", default=False)
shouldIKeepGoing = True
random.seed()
LOG_FILENAME = './archive.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
# Thread worker. Handles copying the file
def fileHandler(thread_id, args, q):
global shouldIKeepGoing
while shouldIKeepGoing:
(source_target, dest_target) = q.get()
if not args.no_op:
try:
shutil.move(source_target, dest_target)
except Exception, err:
logging.error("Failure while moving file -- %s" % err)
exit()
logging.info("[%d]Moved: %s to %s" % (thread_id, source_target, dest_target))
if args.debug_mode:
print("[%d]Moved: %s to %s" % (thread_id, source_target, dest_target))
q.task_done()
def main():
global shouldIKeepGoing
args = parser.parse_args()
count = 0
AGE_INTERVAL = datetime.timedelta(days=args.age)
NOW = datetime.datetime.now()
file_queue = Queue()
logging.info("***************************************************************")
logging.info("Starting archive run at %s" % time.strftime("%c"))
logging.info("Source: %s" % args.source_root)
logging.info("Dest: %s" % args.dest_root)
logging.info("Age cutoff: %d" % args.age)
# Go through the files in the directory and see if any need to be moved
try:
# fire up some worker threads
for i in range(args.num_worker_threads):
worker = Thread(target=fileHandler, args=(i, args, file_queue,))
worker.setDaemon(True)
worker.start()
for root, dirs, files in os.walk(str(args.source_root), topdown=False):
logging.info("Checking %s..." % root)
for thefile in files:
count = count + 1
source_target = os.path.join(root, thefile)
if os.path.islink(source_target):
break
stats = os.stat(source_target)
mod_date = datetime.datetime.fromtimestamp(stats.st_mtime)
acc_date = datetime.datetime.fromtimestamp(stats.st_mtime)
if args.debug_mode:
print("Source: %s" % source_target)
print("ATIME: %s" % acc_date.strftime("%c"))
print("MTIME: %s" % mod_date.strftime("%c"))
if (NOW - acc_date) > AGE_INTERVAL:
dest_target_path = os.path.join(args.dest_root, os.path.relpath(root, args.source_root))
dest_target = os.path.join(dest_target_path, thefile)
# create the directory if needed
if not os.path.exists(dest_target_path):
if not args.no_op:
os.makedirs(dest_target_path)
logging.info("Created dir: %s" % (dest_target_path))
if args.debug_mode:
print("Created dir: %s" % (dest_target_path))
# add to queue
file_queue.put((source_target, dest_target))
# wait for threads to be done processing the queue items
while not file_queue.empty():
time.sleep(0.1)
# Go through the directories and remove them if we can
for thedir in dirs:
target = os.path.join(root, thedir)
try:
if args.debug_mode:
print("Removing directory: %s" % target)
if not args.no_op:
os.rmdir(target)
logging.info("Removed directory: %s" % target)
except OSError, err:
if args.debug_mode:
print("RMDIR Failed: %s" % err)
continue
# finally, check the root source directory to see if it is now empty and can be removed.
try:
if args.debug_mode:
print("Removing directory: %s" % root)
if not args.no_op:
os.rmdir(root)
logging.info("Removed directory: %s" % root)
except OSError, err:
if args.debug_mode:
print("RMDIR Failed: %s" % err)
logging.info("Processed %d files in %d seconds." % (count, (datetime.datetime.now() - NOW).seconds))
logging.info("Done.")
except KeyboardInterrupt:
shouldIKeepGoing = False
raise
except Exception, err:
logging.error("Failure -- %s" % err)
exit()
# Start program
if __name__ == "__main__":
main()
|
mit
| -2,543,240,342,558,028,300
| 35.885135
| 108
| 0.549551
| false
| 4.040711
| false
| false
| false
|
tanium/pytan
|
EXAMPLES/PYTAN_API/ask_manual_question_complex_query1.py
|
1
|
5859
|
#!/usr/bin/env python
"""
Ask a manual question using human strings by referencing the name of a two sensors sensor.
Supply 3 parameters for the second sensor, one of which is not a valid parameter (and will be ignored).
Supply one option to the second sensor.
Supply two question filters that limit the rows returned in the result to computers that match the sensor Operating System that contains Windows and does not contain Windows.
Supply two question options that 'or' the two question filters and ignore the case of any values while matching the question filters.
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["question_filters"] = [u'Operating System, that contains:Windows',
u'Operating System, that does not contain:Windows']
kwargs["sensors"] = [u'Computer Name',
u'Folder Contents{folderPath=C:\\Program Files, invalidparam=test}, that regex match:.*Shared.*, opt:max_data_age:3600']
kwargs["question_options"] = [u'ignore_case', u'or']
kwargs["qtype"] = u'manual'
print "...CALLING: handler.ask with args: {}".format(kwargs)
response = handler.ask(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: Pretty print of response:"
print pprint.pformat(response)
print "...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console: "
print response['question_object'].query_text
if response['question_results']:
# call the export_obj() method to convert response to CSV and store it in out
export_kwargs = {}
export_kwargs['obj'] = response['question_results']
export_kwargs['export_format'] = 'csv'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: CSV Results of response: "
print out
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.ask with args: {'question_filters': [u'Operating System, that contains:Windows', u'Operating System, that does not contain:Windows'], 'sensors': [u'Computer Name', u'Folder Contents{folderPath=C:\\Program Files, invalidparam=test}, that regex match:.*Shared.*, opt:max_data_age:3600'], 'question_options': [u'ignore_case', u'or'], 'qtype': u'manual'}
2015-09-14 20:14:17,578 INFO pytan.pollers.QuestionPoller: ID 809: Reached Threshold of 99% (3 of 3)
...OUTPUT: Type of response: <type 'dict'>
...OUTPUT: Pretty print of response:
{'poller_object': <pytan.pollers.QuestionPoller object at 0x11b347e90>,
'poller_success': True,
'question_object': <taniumpy.object_types.question.Question object at 0x11b34d590>,
'question_results': <taniumpy.object_types.result_set.ResultSet object at 0x11b34ddd0>}
...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console:
Get Computer Name and Folder Contents[C:\Program Files, test] containing "Shared" from all machines with ( Operating System containing "Windows" or any Operating System not containing "Windows" )
...CALLING: handler.export_obj() with args {'export_format': 'csv', 'obj': <taniumpy.object_types.result_set.ResultSet object at 0x11b34ddd0>}
...OUTPUT: CSV Results of response:
Computer Name,"Folder Contents[C:\Program Files, test]"
c1u14-virtual-machine.(none),[current result unavailable]
WIN-6U71ED4M23D,[current result unavailable]
TPT1.pytanlab.com,[current result unavailable]
'''
'''STDERR from running this:
'''
|
mit
| 7,895,586,940,574,294,000
| 42.69403
| 370
| 0.731866
| false
| 3.440399
| false
| false
| false
|
Ricyteach/parmatter
|
src/parmatter/group/meta.py
|
1
|
7243
|
from ..utilities import args_kwargs_from_args
from collections import OrderedDict as od, namedtuple as nt
import parse
class SpecialAttrsMeta(type):
'''A base metaclass that removes special attribute names from the namespace
prior to passing them for initialization.
Special attributes are designated by the attribute "_special".
Any _special attributes not defined at runtime are ignored.'''
def __new__(mcls, name, bases, mapping):
cls = super().__new__(mcls,name,bases,mapping)
sentinel = object()
reserved_mapping = {n:mapping.pop(n, sentinel) for n in mcls._special}
for k,v in ((k,v) for k,v in reserved_mapping.items() if v is not sentinel):
setattr(cls, k, v)
return cls
@classmethod
def special_check(meta, **kwargs):
'''Check to make sure there are no conflicts with special attribute names.'''
try:
special = meta._special
# check for reserved names
for n in special:
try:
raise ValueError('The attribute name "{}" is reserved.'.format(kwargs[n]))
except KeyError:
continue
# no special names
except AttributeError:
pass
class FormatGroupMeta(SpecialAttrsMeta):
'''A metaclass that produces classes defining lines composed of
formatting members with optional line prefixes and separators between members.
Formatter type must provide a static args_parse() method with a signature of:
args, kwargs = FormatterType.args_parse(*args)
f = FormatterType(*args, **kwargs)
Usage:
class LineDef(metaclass=FormatGroupMeta):
_formatter_type = CustomStaticFormatter
_prefix = 'my prefix'
_sep = ', '
a = '{: 5>s}', 'foo'
b = '{: 10>f}', 0
c = '{}'
'''
_special = '_prefix _sep _formatter_type _formatters'.split()
def __init__(cls, name, bases, mapping):
formatter_type = cls._formatter_type
formatter_defs = {k:v for k,v in mapping.items() if not k.startswith('_') and not callable(v)}
formatter_args = {}
formatter_kwargs = {}
# build the formatter args, kwargs using formatter_type.args_parse
for k,args in formatter_defs.items():
args = [args] if isinstance(args, str) else args
formatter_args[k], formatter_kwargs[k] = formatter_type.args_parse(*args)
formatters = (formatter_type(*formatter_args[k], **formatter_kwargs[k]) for k in formatter_defs)
# pass each set of args and kwargs to the formatter type
cls._formatters = {k:formatter for k,formatter in zip(formatter_defs,formatters)}
# attempt to grab extra types dict from an existing compiler (assume all of them are identical)
try:
cls._extra_types = next(iter(cls._formatters.values()))._parser._extra_types
# no existing compiler
except (AttributeError, StopIteration):
pass
cls.__init__(name,bases,mapping)
def format(cls, *args, _asdict=True, _popmappings=True, **unified_namespace):
'''Return a combined formatted string using joined formatter members.
Mapping objects can represent individual member argslists/namespaces and the values
will be appended to the args of the member name matching the key.
Additional keyword arguments are passed to all formatteras as a "universal namespace".
_popmappings:
If True any Mapping object at the end of the args list is a member namespace. It will
be spun out as the args via the name of that member or method as a key.
_asdict:
If True any object in args list that includes an .asdict or ._asdict attribute will
be treated as a Mapping object via the name of that member or method as a key.'''
# optionally remove any mappings from the args list
if _popmappings:
# the slice of args in which to look for mappings (end to beginning)
slc=slice(-1,None,-1)
# spin out any Mapping (or optionally .asdict/._asdict) objects starting from the end of args
args, kwargs_from_args = args_kwargs_from_args(args, slc=slc, asdict=_asdict, ignore_conflicts=True, terminate_on_failure=True)
else:
args, kwargs_from_args = args, {}
# argslist to be passed to each formatter member on a per-member basis
try:
# use unpacking to disallow multiple argslists to same member name
format_args = od(**kwargs_from_args, **od((k,a) for k,a in zip(cls._formatters, args)))
except TypeError as exc:
if 'multiple values for keyword argument' in str(exc):
key_conflict = next(k for k,_ in zip(cls._formatters, args) if k in kwargs_from_args)
raise TypeError('Multiple argument sets provided under member name: {}.'.format(key_conflict)) from None
else:
raise
# convert any single namespace arguments to an args list
format_args = od((k,(a if not isinstance(a,str) and hasattr(a, '__iter__') else [a])) for k,a in format_args.items())
return cls._prefix + cls._sep.join(formatter.format(*format_args.get(member,[]), **unified_namespace) for member,formatter in cls._formatters.items())
def unformat(cls, string, evaluate_result=True):
'''Inverse of format. Match my format group to the string exactly.
Return a parse.Result or parse.Match instance or None if there's no match.
'''
fmat_str = (cls._sep if cls._sep else ' ').join(member._format_str for member in cls)
# try to get extra type from precompiled parser set at initialization
try:
extra_types = cls._extra_types
# parser wasn't precompiled so just assume the default
except AttributeError:
extra_types = dict(s=str)
print('fmat_str:\n', fmat_str, 'string:\n', string[len(cls._prefix):], sep='\n')
result = parse.parse(fmat_str, string[len(cls._prefix):], extra_types, evaluate_result=evaluate_result)
# replace default output tuple with namedtuple
if result is not None and result.fixed:
result.fixed=list(result.fixed)
def is_positional_field(member_parse):
return member_parse[1:3]!=(None,None) and (member_parse[1] == '' or parse.parse('{:d}',member_parse[1]) is not None or parse.parse('{:d}{}',member_parse[1]) is not None)
fixed_counts=[len([member_parse for member_parse in member.parse(member._format_str) if is_positional_field(member_parse)]) for member in cls]
results=[]
for count in fixed_counts:
r=[]
for _ in range(count):
r.append(result.fixed.pop(0))
results.append(r)
NT=nt(cls.__name__+'Data', ' '.join(cls._formatters))
result.fixed=NT(*(r if len(r)>1 else r[0] for r in results))
return result
def __iter__(cls):
yield from cls._formatters.values()
|
bsd-2-clause
| 441,881,972,316,338,240
| 51.875912
| 185
| 0.619219
| false
| 4.319022
| false
| false
| false
|
FireBladeNooT/Medusa_1_6
|
medusa/providers/torrent/html/scc.py
|
1
|
7435
|
# coding=utf-8
# Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Provider code for SCC."""
from __future__ import unicode_literals
import re
import traceback
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from ..torrent_provider import TorrentProvider
from .... import logger, tv_cache
from ....bs4_parser import BS4Parser
from ....helper.common import convert_size, try_int
class SCCProvider(TorrentProvider):
"""SceneAccess Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(self.__class__, self).__init__('SceneAccess')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://sceneaccess.eu'
self.urls = {
'login': urljoin(self.url, 'login'),
'search': urljoin(self.url, 'all?search={string}&method=1&{cats}'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.categories = {
# Archive, non-scene HD, non-scene SD;
# need to include non-scene because WEB-DL packs get added to those categories
'Season': 'c26=26&c44=44&c45=45',
# TV HD, TV SD, non-scene HD, non-scene SD, foreign XviD, foreign x264
'Episode': 'c11=11&c17=17&c27=27&c33=33&c34=34&c44=44&c45=45',
# Season + Episode
'RSS': 'c11=11&c17=17&c26=26&c27=27&c33=33&c34=34&c44=44&c45=45',
}
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv_cache.TVCache(self, min_time=20)
def search(self, search_strings, age=0, ep_obj=None):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
for mode in search_strings:
logger.log('Search mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {search}'.format
(search=search_string), logger.DEBUG)
search_url = self.urls['search'].format(string=self._strip_year(search_string),
cats=self.categories[mode])
response = self.get_url(search_url, returns='response')
if not response or not response.text:
logger.log('No data returned from provider', logger.DEBUG)
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', attrs={'id': 'torrents-table'})
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
return items
for row in torrent_rows[1:]:
try:
title = row.find('td', class_='ttr_name').find('a').get('title')
torrent_url = row.find('td', class_='td_dl').find('a').get('href')
download_url = urljoin(self.url, torrent_url)
if not all([title, torrent_url]):
continue
seeders = try_int(row.find('td', class_='ttr_seeders').get_text(), 1)
leechers = try_int(row.find('td', class_='ttr_leechers').get_text())
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the "
"minimum seeders: {0}. Seeders: {1}".format
(title, seeders), logger.DEBUG)
continue
torrent_size = row.find('td', class_='ttr_size').contents[0]
size = convert_size(torrent_size) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
logger.log('Failed parsing provider. Traceback: {0!r}'.format
(traceback.format_exc()), logger.ERROR)
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'submit': 'come on in',
}
response = self.get_url(self.urls['login'], post_data=login_params, returns='response')
if not response or not response.text:
logger.log('Unable to connect to provider', logger.WARNING)
return False
if any([re.search(r'Username or password incorrect', response.text),
re.search(r'<title>SceneAccess \| Login</title>', response.text), ]):
logger.log('Invalid username or password. Check your settings', logger.WARNING)
return False
return True
@staticmethod
def _strip_year(search_string):
"""Remove brackets from search string year."""
if not search_string:
return search_string
return re.sub(r'\((?P<year>\d{4})\)', '\g<year>', search_string)
provider = SCCProvider()
|
gpl-3.0
| -8,627,801,519,026,012,000
| 36.550505
| 101
| 0.551715
| false
| 4.198193
| false
| false
| false
|
strus38/WPaaS
|
wpars/glance.py
|
1
|
5431
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Code inspired from Docker and modified to fit our needs
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import flask
import glanceclient
from keystoneclient.v2_0 import client as keystoneclient
class GlanceStorage(object):
"""
This class stores the image layers into OpenStack Glance.
"""
disk_format = 'raw'
container_format = 'wparrip'
def __init__(self, config):
self._config = config
def _get_auth_token(self):
args = {}
for arg in ['username', 'password', 'tenant_name', 'auth_url']:
env_name = 'OS_{0}'.format(arg.upper())
if env_name not in os.environ:
raise ValueError('Cannot find env var "{0}"'.format(env_name))
args[arg] = os.environ[env_name]
keystone = keystoneclient.Client(**args)
return keystone.auth_token
def _get_endpoint(self):
if 'OS_GLANCE_URL' not in os.environ:
raise ValueError('Cannot find env var "OS_GLANCE_URL"')
return os.environ['OS_GLANCE_URL']
def _create_glance_client(self):
token = flask.request.headers.get('X-Meta-Auth-Token')
endpoint = flask.request.headers.get('X-Meta-Glance-Endpoint')
if not token:
token = self._get_auth_token()
if not endpoint:
endpoint = self._get_endpoint()
return glanceclient.Client('1', endpoint=endpoint, token=token)
def _read_image_info_file(image_name):
try:
f = open(image_local+'/'+image_name, "r")
except IOError:
return None
else:
with f:
obj = json.loads(f.read())
return obj
def _init_path(self, path, create=True):
"""This resolve a standard Wparrip <image>.info file
and returns: glance_image obj, property_name
!The image_id should be in sync with what Glance has!
If property name is None, we want to reach the image_data
"""
localpath, filename = os.path.split(path)
obj_res = _read_image_info_file(path)
if not 'id' in obj_res:
raise ValueError('Invalid image info file: {0}'.format(path))
image_id = obj_res['id']
glance = self._create_glance_client()
image = self._find_image_by_id(glance, image_id)
if not image and create is True:
if 'X-Meta-Glance-Image-Id' in flask.request.headers:
try:
i = glance.images.get(
flask.request.headers['X-Meta-Glance-Image-Id'])
if i.status == 'queued':
# We allow taking existing images only when queued
image = i
image.update(properties={'id': image_id},
purge_props=False)
except Exception:
pass
if not image:
image = glance.images.create(
disk_format=self.disk_format,
container_format=self.container_format,
properties={'id': image_id})
try:
image.update(is_public=True, purge_props=False)
except Exception:
pass
propname = 'meta_{0}'.format(filename)
if filename == 'layer':
propname = None
return image, propname
def _find_image_by_id(self, glance, image_id):
filters = {
'disk_format': self.disk_format,
'container_format': self.container_format,
'properties': {'id': image_id}
}
images = [i for i in glance.images.list(filters=filters)]
if images:
return images[0]
def _clear_images_name(self, glance, image_name):
images = glance.images.list(filters={'name': image_name})
for image in images:
image.update(name=None, purge_props=False)
def get_content(self, path):
(image, propname) = self._init_path(path, False)
if not propname:
raise ValueError('Wrong call (should be stream_read)')
if not image or propname not in image.properties:
raise IOError('No such image {0}'.format(path))
return image.properties[propname]
def put_content(self, path, content):
(image, propname) = self._init_path(path)
if not propname:
raise ValueError('Wrong call (should be stream_write)')
props = {propname: content}
image.update(properties=props, purge_props=False)
def stream_read(self, path):
(image, propname) = self._init_path(path, False)
if propname:
raise ValueError('Wrong call (should be get_content)')
if not image:
raise IOError('No such image {0}'.format(path))
return image.data(do_checksum=False)
def stream_write(self, path, fp):
(image, propname) = self._init_path(path)
if propname:
raise ValueError('Wrong call (should be put_content)')
image.update(data=fp, purge_props=False)
def exists(self, path):
(image, propname) = self._init_path(path, False)
if not image:
return False
if not propname:
return True
return (propname in image.properties)
def remove(self, path):
(image, propname) = self._init_path(path, False)
if not image:
return
if propname:
# Delete only the image property
props = image.properties
if propname in props:
del props[propname]
image.update(properties=props)
return
image.delete()
def get_size(self, path):
(image, propname) = self._init_path(path, False)
if not image:
raise OSError('No such image: \'{0}\''.format(path))
return image.size
|
apache-2.0
| -8,204,466,227,459,851,000
| 29.511236
| 75
| 0.690112
| false
| 3.14294
| false
| false
| false
|
csala/zato
|
code/zato-common/src/zato/common/test/__init__.py
|
1
|
11503
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from datetime import datetime
from random import choice, randint
from unittest import TestCase
from uuid import uuid4
# anyjson
from anyjson import loads
# base32_crockford
from base32_crockford import decode
# Bunch
from bunch import Bunch
# mock
from mock import MagicMock, Mock
# nose
from nose.tools import eq_
# six
from six import string_types
# SQLAlchemy
from sqlalchemy import create_engine
# Zato
from zato.common import CHANNEL, DATA_FORMAT, SIMPLE_IO
from zato.common.log_message import CID_LENGTH
from zato.common.odb import model
from zato.common.util import new_cid
def rand_bool():
return choice((True, False))
def rand_csv(count=3):
return ','.join(str(elem) for elem in rand_int(count=count))
def rand_dict():
out = {}
funcs = [rand_bool, rand_int, rand_string]
for x in range(rand_int(30)):
out[choice(funcs)()] = choice(funcs)()
return out
def rand_list():
out = []
funcs = [rand_bool, rand_int, rand_string]
for x in range(rand_int(30)):
out.append(choice(funcs)())
return out
def rand_list_of_dicts():
out = []
for x in range(rand_int(30)):
out.append(rand_dict())
return out
def rand_opaque():
return rand_object()
rand_nested = rand_opaque
def rand_datetime():
return datetime.utcnow().isoformat() # Random in the sense of not repeating
def rand_int(start=1, stop=100, count=1):
if count == 1:
return randint(start, stop)
else:
return [randint(start, stop) for x in range(count)]
def rand_float(start=1.0, stop=100.0):
return float(rand_int(start, stop))
def rand_string(count=1):
if count == 1:
return 'a' + uuid4().hex
else:
return ['a' + uuid4().hex for x in range(count)]
def rand_unicode():
return u'ϠϡϢϣϤϥϦϧϨϩϪϫϬϭ'
def rand_object():
return object()
def rand_date_utc(as_string=False):
value = datetime.utcnow() # Now is as random as any other date
if as_string:
return value.isoformat()
return value
def is_like_cid(cid):
""" Raises ValueError if the cid given on input does not look like a genuine CID
produced by zato.common.util.new_cid
"""
if not isinstance(cid, string_types):
raise ValueError('CID `{}` should be string like instead of `{}`'.format(cid, type(cid)))
len_given = len(cid)
len_expected = CID_LENGTH + 1 # CID_LENGTH doesn't count 'K' in
if len_given != len_expected:
raise ValueError('CID `{}` should have length `{}` instead of `{}`'.format(cid, len_expected, len_given))
if not cid.startswith('K'):
raise ValueError('CID `{}` should start with `K`'.format(cid))
value = decode(cid[1:])
if(value >> 128) != 0:
raise ValueError('There aren\'t 128 bits in CID `{}`'.format(value))
return True
class Expected(object):
""" A container for the data a test expects the service to return.
"""
def __init__(self):
self.data = []
def add(self, item):
self.data.append(item)
def get_data(self):
if not self.data or len(self.data) > 1:
return self.data
else:
return self.data[0]
class FakeBrokerClient(object):
def __init__(self):
self.publish_args = []
self.publish_kwargs = []
self.invoke_async_args = []
self.invoke_async_kwargs = []
def publish(self, *args, **kwargs):
raise NotImplementedError()
def invoke_async(self, *args, **kwargs):
self.invoke_async_args.append(args)
self.invoke_async_kwargs.append(kwargs)
class FakeKVDB(object):
class FakeConn(object):
def __init__(self):
self.setnx_args = None
self.setnx_return_value = True
self.expire_args = None
self.delete_args = None
def return_none(self, *ignored_args, **ignored_kwargs):
return None
get = hget = return_none
def setnx(self, *args):
self.setnx_args = args
return self.setnx_return_value
def expire(self, *args):
self.expire_args = args
def delete(self, args):
self.delete_args = args
def __init__(self):
self.conn = self.FakeConn()
def translate(self, *ignored_args, **ignored_kwargs):
raise NotImplementedError()
class FakeServices(object):
def __getitem__(self, ignored):
return {'slow_threshold': 1234}
class FakeServiceStore(object):
def __init__(self, name_to_impl_name=None, impl_name_to_service=None):
self.services = FakeServices()
self.name_to_impl_name = name_to_impl_name or {}
self.impl_name_to_service = impl_name_to_service or {}
def new_instance(self, impl_name):
return self.impl_name_to_service[impl_name]()
class FakeServer(object):
""" A fake mock server used in test cases.
"""
def __init__(self, service_store_name_to_impl_name=None, service_store_impl_name_to_service=None):
self.kvdb = FakeKVDB()
self.service_store = FakeServiceStore(service_store_name_to_impl_name, service_store_impl_name_to_service)
self.fs_server_config = Bunch()
self.fs_server_config.misc = Bunch()
self.fs_server_config.misc.zeromq_connect_sleep = 0.1
self.fs_server_config.misc.internal_services_may_be_deleted = False
self.repo_location = rand_string()
self.delivery_store = None
self.user_config = Bunch()
class ForceTypeWrapper(object):
""" Makes comparison between two ForceType elements use their names.
"""
def __init__(self, value):
self.value = value
def __cmp__(self, other):
# Compare to either other's name or to other directly. In the latter case it means it's a plain string name
# of a SIO attribute.
return cmp(self.value.name, getattr(other, 'name', other))
class ServiceTestCase(TestCase):
def invoke(self, class_, request_data, expected, mock_data={}, channel=CHANNEL.HTTP_SOAP, job_type=None,
data_format=DATA_FORMAT.JSON, service_store_name_to_impl_name=None, service_store_impl_name_to_service=None):
""" Sets up a service's invocation environment, then invokes and returns
an instance of the service.
"""
instance = class_()
worker_store = MagicMock()
worker_store.worker_config = MagicMock
worker_store.worker_config.outgoing_connections = MagicMock(return_value=(None, None, None, None))
worker_store.worker_config.cloud_openstack_swift = MagicMock(return_value=None)
worker_store.worker_config.cloud_aws_s3 = MagicMock(return_value=None)
worker_store.invoke_matcher.is_allowed = MagicMock(return_value=True)
simple_io_config = {
'int_parameters': SIMPLE_IO.INT_PARAMETERS.VALUES,
'int_parameter_suffixes': SIMPLE_IO.INT_PARAMETERS.SUFFIXES,
'bool_parameter_prefixes': SIMPLE_IO.BOOL_PARAMETERS.SUFFIXES,
}
class_.update(
instance, channel, FakeServer(service_store_name_to_impl_name, service_store_impl_name_to_service),
None, worker_store, new_cid(), request_data, request_data, simple_io_config=simple_io_config,
data_format=data_format, job_type=job_type)
def get_data(self, *ignored_args, **ignored_kwargs):
return expected.get_data()
instance.get_data = get_data
for attr_name, mock_path_data_list in mock_data.iteritems():
setattr(instance, attr_name, Mock())
attr = getattr(instance, attr_name)
for mock_path_data in mock_path_data_list:
for path, value in mock_path_data.iteritems():
splitted = path.split('.')
new_path = '.return_value.'.join(elem for elem in splitted) + '.return_value'
attr.configure_mock(**{new_path:value})
broker_client_publish = getattr(self, 'broker_client_publish', None)
if broker_client_publish:
instance.broker_client = FakeBrokerClient()
instance.broker_client.publish = broker_client_publish
instance.call_hooks('before')
instance.handle()
instance.call_hooks('after')
return instance
def _check_sio_request_input(self, instance, request_data):
for k, v in request_data.iteritems():
self.assertEquals(getattr(instance.request.input, k), v)
sio_keys = set(getattr(instance.SimpleIO, 'input_required', []))
sio_keys.update(set(getattr(instance.SimpleIO, 'input_optional', [])))
given_keys = set(request_data.keys())
diff = sio_keys ^ given_keys
self.assertFalse(diff, 'There should be no difference between sio_keys {} and given_keys {}, diff {}'.format(
sio_keys, given_keys, diff))
def check_impl(self, service_class, request_data, response_data, response_elem, mock_data={}):
expected_data = sorted(response_data.items())
instance = self.invoke(service_class, request_data, None, mock_data)
self._check_sio_request_input(instance, request_data)
if response_data:
if not isinstance(instance.response.payload, basestring):
response = loads(instance.response.payload.getvalue())[response_elem] # Raises KeyError if 'response_elem' doesn't match
else:
response = loads(instance.response.payload)[response_elem]
self.assertEqual(sorted(response.items()), expected_data)
def check_impl_list(self, service_class, item_class, request_data, # noqa
response_data, request_elem, response_elem, mock_data={}): # noqa
expected_keys = response_data.keys()
expected_data = tuple(response_data for x in range(rand_int(10)))
expected = Expected()
for datum in expected_data:
item = item_class()
for key in expected_keys:
value = getattr(datum, key)
setattr(item, key, value)
expected.add(item)
instance = self.invoke(service_class, request_data, expected, mock_data)
response = loads(instance.response.payload.getvalue())[response_elem]
for idx, item in enumerate(response):
expected = expected_data[idx]
given = Bunch(item)
for key in expected_keys:
given_value = getattr(given, key)
expected_value = getattr(expected, key)
eq_(given_value, expected_value)
self._check_sio_request_input(instance, request_data)
def wrap_force_type(self, elem):
return ForceTypeWrapper(elem)
class ODBTestCase(TestCase):
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
model.Base.metadata.create_all(self.engine)
def tearDown(self):
model.Base.metadata.drop_all(self.engine)
|
gpl-3.0
| -7,934,916,597,127,886,000
| 32.205202
| 136
| 0.61511
| false
| 3.799272
| true
| false
| false
|
rarmknecht/nlpfun
|
basic_info.py
|
1
|
6347
|
#!/usr/bin/python2
# Randy Armknecht
# 19 Feb 2014
#
# Playing around with the Natural Language Processing Toolkit (nltk)
# http://www.nltk.org/
#
from __future__ import division
import sys
import nltk
from nltk.corpus import cmudict
from nltk.corpus import stopwords
from pprint import pprint
from hyphen import Hyphenator as hy
DICT = cmudict.dict()
SYLLABLE_AVG = 1.66
# START - Implemented from http://www.slideshare.net/pbpimpale/natural-language-toolkit-nltk-basics
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
def problem_words(text):
return sorted(set(w.lower() for w in text if not w.isalpha()))
def content_fraction(text):
stopwords = nltk.corpus.stopwords.words('english')
content = [w for w in text if w.lower() not in stopwords]
return len(content) / len(text)
def plot_word_freq(text):
text_vocab = [w.lower() for w in text if w.isalpha()]
fdist = nltk.FreqDist(text_vocab)
fdist.plot()
def long_words(text,length=10):
text_vocab = [w.lower() for w in text if w.isalpha()]
return set([w for w in text_vocab if len(w) > length])
def topic_words(text,length=7,freq=7):
text_vocab = [w.lower() for w in text if w.isalpha()]
fdist = nltk.FreqDist(text_vocab)
return sorted([w for w in set(text_vocab) if len(w) > length and fdist[w] > freq])
def vocab_size(text):
return len(set(text))
def vocab_richness(text):
return len(text) / vocab_size(text)
def word_context(text,word):
return text.concordance(word)
# END - Implemented from http://www.slideshare.net/pbpimpale/natural-language-toolkit-nltk-basics
def get_raw(fname):
data = ""
with open(fname) as f:
data = f.read()
return data
def massage_raw(raw):
modified = ''.join([character for character in raw if ord(character) < 128])
sentences = nltk.sent_tokenize(modified)
words = nltk.word_tokenize(modified)
tokens = []
stops = [unicode(word) for word in stopwords.words('english')] + [',', '.', '?', '!', ':', ';', '-', ')', '(']
for w in words:
if w not in stops:
tokens.append(w)
return (nltk.Text(tokens), sentences)
def nsyl(word):
return len([i for i in DICT[word.lower()][0] if i[-1].isdigit()])
# return [len(list(y for y in x if y[-1].isdigit())) for x in DICT[word.lower()]][0]
# http://stackoverflow.com/a/5615724 translated to python
def count_syllables(word):
# Special Cases
if word in ['ll', 'noye', 'shae']:
return 1
# Back to Our Regular Scheduled Programming
vowels = ['a','e','i','o','u','y']
curword = word
syls = 0
lastWasVowel = False
for wc in curword:
foundVowel = False
for v in vowels:
# Don't Count Diphthongs
if v == wc and lastWasVowel:
foundVowel = True
lastWasVowel = True
break;
elif v == wc and not lastWasVowel:
syls += 1
foundVowel = True
lastWasVowel = True
break;
# If Fully cycle and no vowel found, set lastWasVowel to False
if not foundVowel:
lastWasVowel = False
# Remove es, it's usually silent
if len(curword) > 2 and curword[-2:] == "es":
syls -= 1
elif len(curword) > 1 and curword[-1] == "e":
syls -= 1
return syls
# Modified form of https://gist.github.com/drinks/2483508
def flesch_kincaid(text,sentences):
syllables = []
misses = []
words = [word for word in text if (len(word) > 1) or (word.lower() in ['a', 'i'])]
for word in words:
try:
ns = nsyl(word)
syllables.append(ns)
except KeyError:
n = count_syllables(word.lower())
if n == 0:
misses.append(word.lower())
else:
syllables.append(n)
word_count = len(words) - len(misses)
sentence_count = len(sentences)
syllable_count = sum(syllables)
#m_dist = nltk.FreqDist(misses)
#for t in m_dist.keys():
# print m_dist[t], t, count_syllables(t)
#for m in set(misses):
# print "%s %d" % (m, m_dist[m])
words_sents = word_count / sentence_count
syl_words = syllable_count / word_count
if word_count > 0 and sentence_count > 0:
results = {
'words': word_count,
'syllables': syllable_count,
'missed_count': len(misses),
'missed_pct': len(misses) / (word_count + len(misses)),
'sentences': sentence_count,
'grade_level': (0.39 * words_sents) + (11.8 * syl_words) - 15.59,
'reading_ease': 206.835 - (1.015 * words_sents) - (84.6 * syl_words),
}
return results
# From: http://engineroom.trackmaven.com/blog/monthly-challenge-natural-language-processing/
def top10_bigrams(words):
bigram_measure = nltk.collocations.BigramAssocMeasures()
bigram_finder = nltk.collocations.BigramCollocationFinder.from_words(words)
# Filter to top 20 results; otherwise processing is long
bigram_finder.apply_freq_filter(20)
for bigram in bigram_finder.score_ngrams(bigram_measure.raw_freq)[:10]:
print(bigram)
# Modified the above to print trigrams, and look at words with a frequency of at least 10
def top10_trigrams(words):
trigram_measure = nltk.collocations.TrigramAssocMeasures()
trigram_finder = nltk.collocations.TrigramCollocationFinder.from_words(words)
# Filter at least 10 instances of each word, and measure based on pmi metric
# http://www.nltk.org/api/nltk.metrics.html#nltk.metrics.association.NgramAssocMeasures.pmi
trigram_finder.apply_freq_filter(10)
for trigram in trigram_finder.score_ngrams(trigram_measure.pmi)[:10]:
print(trigram)
if __name__ == "__main__":
if len(sys.argv) is not 2:
print("Usage: %s <text_file>" % (sys.argv[0]))
sys.exit(0)
(text,sentences) = massage_raw(get_raw(sys.argv[1]))
pprint(flesch_kincaid(text,sentences))
print("\nBigrams\n====================")
top10_bigrams(text)
print("\nTrigrams\n====================")
top10_trigrams(text)
|
mit
| -6,004,009,814,675,770,000
| 31.382653
| 114
| 0.618245
| false
| 3.202321
| false
| false
| false
|
by46/recipe
|
templates/python.lib/{{cookiecutter.project_safe_name}}/setup.py
|
1
|
1964
|
from __future__ import print_function
import io
import os.path
import re
from distutils.text_file import TextFile
from setuptools import find_packages, setup
home = os.path.abspath(os.path.dirname(__file__))
missing = object()
def read_description(*files, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = [io.open(name, encoding=encoding).read() for name in files]
return sep.join(buf)
def read_dependencies(requirements=missing):
if requirements is None:
return []
if requirements is missing:
requirements = 'requirements.txt'
if not os.path.isfile(requirements):
return []
text = TextFile(requirements, lstrip_ws=True)
try:
return text.readlines()
finally:
text.close()
def read_version(version_file):
with open(version_file, 'rb') as fd:
result = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE)
return result.group(1) if result else '0.0.1'
setup(
name='{{cookiecutter.project_slug}}',
version=read_version('{{cookiecutter.project_slug}}/__init__.py'),
license='The MIT License',
description='demo',
author='recipe',
author_email='recipe@newegg.com',
install_requires=read_dependencies(),
include_package_data=True,
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
]
)
|
mit
| -6,274,117,673,014,789,000
| 29.215385
| 71
| 0.6222
| false
| 4.008163
| false
| false
| false
|
jeffersonfparil/GTWAS_POOL_RADseq_SIM
|
simulateVAR.py
|
1
|
3264
|
#!/usr/bin/env python
import os, sys, math, random
import numpy as np
from Bio import SeqIO
from Bio import Seq
work_DIR = sys.argv[1]
input_SEQ = sys.argv[2]
output_SEQ = sys.argv[3]
output_VCF = sys.argv[4]
varPerScaf = int(sys.argv[5]) #number of variants per scaffold
# #for testing:
# work_DIR = "/mnt/SIMULATED/DNA"
# input_SEQ = "Fixed.fasta"
# output_SEQ = "Variant.fasta"
# output_VCF = "Variant.vcf"
# varPerScaf = 20
os.chdir(work_DIR)
#(1->2) SIMULATE ALLELIC VARIANTS FROM A REFERENCE GENOME AND
# SPIT OUT THE VCF AND FASTA FILES WITH THE SIMULATED MUTATIONS
fasta_sequences = SeqIO.parse(input_SEQ,'fasta')
fasta_index = SeqIO.index(input_SEQ,'fasta')
NEW_FASTA = []
VCF = np.array([np.repeat(0, 9)])
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
LEN = len(sequence)
POS = np.random.choice(range(LEN), replace=False, size=varPerScaf)
for i in range(len(POS)):
availPOS = set(range(LEN)) - set(POS)
while sequence[POS[i]] != "A" and sequence[POS[i]] != "T" and sequence[POS[i]] != "C" and sequence[POS[i]] != "G":
POS[i] = np.random.choice(list(availPOS), replace=False, size=1)
POS.sort()
MUT = np.random.choice(["Substitution", "Duplication", "Deletion"], size=len(POS), p=[0.9, 0.01, 0.09])
#CH0 = np.random.chisquare(0.3, varPerScaf)
CH0 = np.random.choice([1], varPerScaf) #bcftools does not seem to like more than 1 base in the reference
CH1 = np.random.chisquare(0.3, varPerScaf)
CHR = []
for cha in CH0:
CHR.append(int(math.ceil(cha)))
CHA = []
for cha in CH1:
CHA.append(int(math.ceil(cha)))
REF=[]
ALT=[]
for i in range(len(POS)):
if MUT[i] == "Substitution":
SUB=[]
for s in range(int(CHA[i])):
SUB.append(random.choice(["A", "T", "C", "G"]))
while "".join(SUB) == sequence[POS[i]:POS[i]+CHR[i]]:
SUB=[]
for s in range(int(CHA[i])):
SUB.append(random.choice(["A", "T", "C", "G"]))
sequence2 = sequence[:POS[i]] + "".join(SUB) + sequence[POS[i]+CHR[i]:]
REF.append(sequence[POS[i]:POS[i]+CHR[i]])
ALT.append("".join(SUB))
else:
if MUT[i] == "Duplication":
sequence2 = sequence[:POS[i]+CHR[i]] + sequence[POS[i]:POS[i]+CHR[i]] + sequence[POS[i]+CHR[i]+1:]
REF.append(sequence[POS[i]:POS[i]+CHR[i]])
ALT.append(sequence[POS[i]:POS[i]+CHR[i]] + sequence[POS[i]:POS[i]+CHR[i]])
else:
sequence2 = sequence[:POS[i]] + sequence[POS[i]+1:]
#REF.append(sequence[POS[i]-1:POS[i]+CHR[i]])
#ALT.append(sequence2[POS[i]-1:POS[i]])
REF.append(sequence[POS[i]:POS[i]+CHR[i]])
ALT.append('<DEL>')
#fasta.seq = Seq.Seq(sequence2)
#NEW_FASTA.append(fasta)
CHROM = np.repeat(name, varPerScaf)
POS = POS + 1
ID = np.repeat(".", varPerScaf)
QUAL = np.repeat(".", varPerScaf)
FILTER = np.repeat("PASS", varPerScaf)
INFO = np.repeat(".", varPerScaf)
FORMAT = np.repeat("GT", varPerScaf)
vcf = np.stack((CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT), axis=-1)
VCF = np.concatenate((VCF, vcf), axis=0)
#FASTA OUTPUT:
#SeqIO.write(NEW_FASTA, output_SEQ, "fasta")
#WRITE VCF FILE:
VCF = VCF[1:len(VCF)]
np.savetxt("outPy.txt", VCF, fmt='%s' ,delimiter="\t")
os.system("cat VCF.header outPy.txt > " + output_VCF)
os.system("rm outPy.txt")
###:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::###
|
gpl-3.0
| 9,102,283,531,665,851,000
| 34.48913
| 116
| 0.630208
| false
| 2.452292
| false
| false
| false
|
edosedgar/xs-pkg
|
deep_learning/HW2/notmnist.py
|
1
|
2059
|
import os
import numpy as np
from scipy.misc import imread,imresize
from urllib.request import urlretrieve
def load_notmnist(path=".", letters='ABCDEFGHIJ',
img_shape=(28,28),test_size=0.25,one_hot=False):
root = os.path.join(path, "notMNIST_small")
# download data if it's missing. If you have any problems, go to the urls and load it manually.
if not os.path.exists(root):
print("Downloading data...")
urlretrieve(
"http://yaroslavvb.com/upload/notMNIST/notMNIST_small.tar.gz",
"notMNIST_small.tar.gz")
print("Extracting ...")
import tarfile
with tarfile.open("notMNIST_small.tar.gz", "r:gz") as tar:
tar.extractall(path=path)
data,labels = [],[]
print("Parsing...")
for letter in sorted(os.listdir(root)):
if letter not in letters: continue
for img_name in sorted(os.listdir(os.path.join(root, letter))):
img_path = os.path.join(root, letter, img_name)
try:
data.append(imresize(imread(img_path), img_shape))
labels.append(letter,)
except:
print("found broken img: %s [it's ok if <10 images are broken]" % img_path)
data = np.stack(data)[:,None].astype('float32')
data = (data - np.mean(data)) / np.std(data)
#convert classes to ints
letter_to_i = {l:i for i,l in enumerate(letters)}
labels = np.array(list(map(letter_to_i.get, labels)))
if one_hot:
labels = (np.arange(np.max(labels) + 1)[None,:] == labels[:, None]).astype('float32')
#split into train/test
np.random.seed(666)
permutation = np.arange(len(data))
np.random.shuffle(permutation)
data = data[permutation]
labels = labels[permutation]
n_train_samples = int(round(len(data) * (1.0 - test_size)))
X_train, X_test = data[:n_train_samples], data[n_train_samples:]
y_train, y_test = labels[:n_train_samples], labels[n_train_samples:]
return X_train, y_train, X_test, y_test
|
gpl-2.0
| 1,673,140,495,444,828,200
| 34.5
| 99
| 0.606605
| false
| 3.420266
| true
| false
| false
|
yobibyte/DeepFried2
|
DeepFried2/utils.py
|
1
|
2973
|
import DeepFried2 as df
import numpy as _np
from warnings import warn as _warn
from numbers import Number as _Number
def create_param_state_as(other, initial_value=0, prefix='state_for_'):
return df.th.shared(other.get_value()*0 + initial_value,
broadcastable=other.broadcastable,
name=prefix + str(other.name)
)
def _check_dtype_mistake(dtype):
"""
It's a very common mistake (at least for me) to pass-in a float64 when I
really want to pass in a `floatX`, and it would go unnoticed and slow-down
the computations a lot if I wouldn't check it here.
"""
if _np.issubdtype(dtype, _np.floating) and dtype != df.floatX:
_warn("Input array of floating-point dtype {} != df.floatX detected. Is this really what you want?".format(dtype))
def make_tensor(dtype, ndim, name):
_check_dtype_mistake(dtype)
return df.th.tensor.TensorType(dtype, (False,) * ndim)(name)
def tensors_for_ndarrays(datas, basename):
if isinstance(datas, _np.ndarray):
return make_tensor(datas.dtype, datas.ndim, basename)
if isinstance(datas, (list, tuple)):
return [tensors_for_ndarrays(data, "{}_{}".format(basename, i)) for i, data in enumerate(datas)]
# Could potentially make it "any iterable" by removing above check.
# But would need to guarantee we never iterate over it twice, which is harder!
raise TypeError("I only understand lists or tuples of numpy arrays! (possibly nested)")
def count_params(module, learnable_only=True):
return sum(p.get_value().size for p in module.parameters(learnable_only=learnable_only))
def flatten(what, types=(list, tuple), none_to_empty=False):
if what is None and none_to_empty:
return []
if not isinstance(what, types):
return [what]
# NOTE: I actually timed that this is faster than the comprehension,
# even though it probably doesn't matter :)
# 350us vs 250us
ret = []
for sub in what:
ret += flatten(sub, types=types, none_to_empty=none_to_empty)
return ret
def expand(tup, ndim, name=None, expand_nonnum=False):
if isinstance(tup, (tuple, list)) and len(tup) == ndim:
return tup
if isinstance(tup, _Number) or expand_nonnum:
return (tup,) * ndim
if not expand_nonnum:
return tup
raise ValueError("Bad number of dimensions{}: is {} but should be {}.".format((" for " + name) if name else "", len(tup), ndim))
def typename(obj):
return type(obj).__name__
def pad(symb_input, padding):
assert symb_input.ndim == len(padding), "symb_input ({}d) and padding ({}d) must have the same dimensionality".format(symb_input.ndim, len(padding))
padded_shape = tuple((s+2*p) for s,p in zip(symb_input.shape, padding))
padded_input = df.T.zeros(padded_shape)
slicing = [slice(None) if p == 0 else slice(p,s+p) for s,p in zip(symb_input.shape, padding)]
return df.T.set_subtensor(padded_input[slicing], symb_input)
|
mit
| -2,337,813,472,281,590,300
| 34.392857
| 152
| 0.671712
| false
| 3.456977
| false
| false
| false
|
adael/goldminer
|
goldminer/draw.py
|
1
|
11274
|
from math import ceil
from bearlibterminal import terminal
from goldminer import settings, texts, colors
from goldminer.actor import Actor
from goldminer.inventory import Inventory
from goldminer.history import History
from goldminer.geom import Rect
from goldminer.items import Item
from goldminer.worldmap import Tile
from goldminer.util import chunks
class Border:
def __init__(self, top, bottom, left, right, topLeft, topRight, bottomLeft, bottomRight):
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
color_stack = []
double_border = Border(
top=0x2550,
bottom=0x2550,
left=0x2551,
right=0x2551,
topLeft=0x2554,
topRight=0x2557,
bottomLeft=0x255A,
bottomRight=0x255D
)
single_border = Border(
top=0x2500,
bottom=0x2500,
left=0x2502,
right=0x2502,
topLeft=0x250C,
topRight=0x2510,
bottomLeft=0x2514,
bottomRight=0x2518
)
def push_colors():
color_stack.append((terminal.state(terminal.TK_COLOR), terminal.state(terminal.TK_BKCOLOR)))
def pop_colors():
(fg, bg) = color_stack.pop()
terminal.color(fg)
terminal.bkcolor(bg)
def color_for_value(value, colors=None):
if not colors:
colors = ["dark red", "red", "orange", "yellow", "dark green", "green"]
ncolors = len(colors) - 1
percent = round(value * ncolors / 100, 0)
index = int(min(ncolors, max(0, percent)))
return colors[index]
def draw_double_line(x, y, width):
draw_line(x, y, width, "[U+2550]")
def draw_line(x, y, width, code="[U+2500]"):
terminal.print_(x, y, code * width)
def draw_progress_label(x, y, label, value, max_value, color):
label += " [color={}]{}[color=white]/{}".format(color, value, max_value)
terminal.print_(x, y, label)
def draw_progress(x, y, width, percent, color, bkcolor="dark gray"):
fill_width = int(percent * width / 100)
terminal.print_(x, y, "[bkcolor={}]".format(bkcolor) + (" " * width))
terminal.print_(x, y, "[bkcolor={}]".format(color) + (" " * fill_width))
def draw_rect(rect_, border=double_border):
draw_box(rect_.left, rect_.top, rect_.right - 1, rect_.bottom - 1, border)
def draw_box(x1, y1, x2, y2, border=double_border):
for cx in range(x1, x2):
terminal.put(cx, y1, border.top)
terminal.put(cx, y2, border.bottom)
for cy in range(y1, y2):
terminal.put(x1, cy, border.left)
terminal.put(x2, cy, border.right)
terminal.put(x1, y1, border.topLeft)
terminal.put(x2, y1, border.topRight)
terminal.put(x2, y2, border.bottomRight)
terminal.put(x1, y2, border.bottomLeft)
def draw_corners(x1, y1, x2, y2, border=single_border):
terminal.put(x1, y1, border.topLeft)
terminal.put(x2, y1, border.topRight)
terminal.put(x2, y2, border.bottomRight)
terminal.put(x1, y2, border.bottomLeft)
def draw_window(rect_, caption, color="white", bkcolor="black"):
push_colors()
terminal.color(color)
terminal.bkcolor(bkcolor)
terminal.clear_area(rect_.x, rect_.y, rect_.width, rect_.height)
draw_line(rect_.x + 1, rect_.y + 2, rect_.width - 2, "[U+2594]")
draw_rect(rect_)
terminal.print_(rect_.center_x, rect_.y + 1, "[align=center]" + caption)
pop_colors()
def draw_select_box(control, x, y):
padding_left = 2
w, h = calculate_select_box_dimension(control)
w += padding_left
index = 0
py = 0
for item in control.items:
color = colors.white
if item.active and control.item_focused_index == index:
color = colors.yellow
elif not item.active:
color = colors.gray
box = "[bbox={}]".format(w - padding_left)
(_, height) = terminal.measure(box + item.label)
terminal.color(color)
terminal.print_(x + 2, y + py, box + item.label)
if index == control.item_focused_index:
terminal.color(color)
terminal.put(x, y + py, ">")
py += height
index += 1
def calculate_select_box_dimension(ctrl):
w, h = 3, 3
for item in ctrl.items:
w = max(len(item.label), w)
for item in ctrl.items:
box = "[bbox={}]".format(w)
(_, m) = terminal.measure(box + item.label)
h = max(m, h)
return w, h
# GenerateWorldState
def draw_generate_world():
terminal.color(colors.black)
terminal.bkcolor(colors.white_ice)
terminal.clear()
terminal.print_(10, 10, "Generating world...")
# PlayingState
def draw_game_layout():
terminal.color(colors.beige)
draw_rect(settings.screen_rect)
draw_rect(settings.map_window_rect)
draw_rect(settings.gui_rect)
draw_rect(settings.status_rect)
def draw_world(world):
terminal.clear()
draw_game_layout()
draw_world_map(world.camera, world.world_map)
draw_world_actors(world.camera, world.actors)
draw_world_player(world.camera, world.player)
draw_actor_stats(world.player)
draw_history(world.player.history)
world.player.history.trim()
terminal.refresh()
def draw_world_map(camera, world_map):
for x, y in settings.map_rect:
px, py = camera.camera_to_map(x, y)
if world_map.inside_map(px, py):
draw_tile(world_map.tile(px, py), x, y)
def draw_world_actors(camera, actors):
for actor in actors:
x, y = camera.map_to_camera(actor.x, actor.y)
draw_actor(actor, x, y)
def draw_world_player(camera, player):
x, y = camera.map_to_camera(player.x, player.y)
draw_player(player, x, y)
if player.orientation:
push_colors()
(px, py) = camera.map_to_camera(*player.looking_position())
terminal.color(terminal.pick_color(px, py))
terminal.bkcolor("#222222")
terminal.put(px, py, terminal.pick(px, py))
pop_colors()
def draw_tile(tile: Tile, x, y):
if not tile.explored:
return
draw_char(x, y, tile.char, tile.color if tile.in_sight else colors.not_in_sight)
def draw_actor(actor, x, y):
draw_entity(actor, x, y)
def draw_player(player: Actor, x, y):
draw_entity(player, x, y)
def draw_chest(chest, x, y):
draw_entity(chest, x, y)
def draw_entity(entity, x, y):
draw_char(x, y, entity.char, entity.color)
def draw_char(x, y, char, color):
terminal.color(color)
terminal.put(x, y, char)
def draw_actor_stats(actor):
r = settings.gui_rect
terminal.color('azure')
draw_rect(r)
x = r.left + 2
y = r.top + 2
width = r.width - 4
draw_gui_stat(actor.fighter.hp, x, y, width, settings.hp_colors)
y += 3
draw_gui_stat(actor.fighter.water, x, y, width, settings.water_colors)
y += 3
draw_gui_stat(actor.fighter.food, x, y, width, settings.food_colors)
y += 3
draw_gui_stat(actor.fighter.fatigue, x, y, width, colors.get_bright_range(colors.brown))
y += 3
terminal.print_(x, y, "Position: {}x{}".format(actor.x, actor.y))
y += 4
terminal.color("#AA6939")
terminal.print_(x, y, "Inventory:")
draw_double_line(x, y + 1, width)
draw_mini_inventory(actor.inventory, x, y + 3, width)
def draw_gui_stat(stat, x, y, width, colors, bkcolor="dark gray"):
color = color_for_value(stat.percent, colors)
draw_progress_label(x, y, stat.label, int(round(stat.value, 0)), stat.max_value, color)
draw_progress(x, y + 1, width, stat.percent, color, bkcolor)
def draw_mini_inventory(inventory: Inventory, x: int, y: int, width: int):
"""
It draws the in-game mini-inventory
"""
items = ["[color={}]{} [/color]".format(item.color, item.char) for item in inventory.items]
while len(items) < inventory.capacity:
items.append("[color=#404040]- [/color]")
lines = chunks(items, ceil(width/2))
for line_items in lines:
terminal.print_(x, y, "[bbox={}]".format(width) + "".join(line_items))
y += 1
def draw_history(history: History):
r = settings.status_rect
x, y = r.x + 1, r.bottom - 2
color = "white"
for msgtime, msg in reversed(history.messages):
if y <= r.y:
return
s = "{} [color={}][bbox={}]{}".format(msgtime.strftime("%H:%M:%S"), color, r.width, msg)
terminal.print_(x, y, s)
(_, mh) = terminal.measure(s)
y -= mh
color = "dark gray"
# MenuState
def draw_menu_state(lst):
terminal.clear()
caption = ".*{Gold Miner}*."
terminal.color("yellow")
terminal.print_(10, 10, caption)
draw_double_line(10, 11, len(caption))
draw_select_box(lst, 10, 13)
terminal.refresh()
def draw_menu_option_state(lst):
terminal.clear_area(30, 14, 60, 30)
terminal.color("yellow")
terminal.print_(30, 14, "Screen size")
draw_double_line(30, 15, len("Screen size"))
draw_select_box(lst, 30, 16)
terminal.refresh()
def draw_inventory_window(inventory: Inventory, selected_index):
draw_window(settings.gui_rect, "Inventory window", colors.inventory_item_hover_bg, colors.inventory_bk_color)
if inventory.is_empty():
inner_width = settings.gui_rect.width - 2
px = settings.gui_rect.x + 4
py = settings.gui_rect.y + 4
msg = texts.pick(texts.inventory_is_empty)
terminal.print_(px, py, "[bbox={}][color={}]{}".format(inner_width, colors.teal, msg))
terminal.print_(px, py + 2, "[bbox={}][color={}]<< {}".format(inner_width, colors.white, texts.press_back))
else:
draw_inventory_state_items(inventory.items, selected_index)
terminal.refresh()
# Inventory state
def draw_inventory_state_items(items, selected_index):
line_x = settings.gui_rect.x + 1
line_y = settings.gui_rect.y + 3
line_w = settings.gui_rect.width - 3
item_w = 2
item_h = 3
index = 0
for item in items:
text_x = line_x + 4
text_y = line_y + 1
if index == selected_index:
item_bg = colors.inventory_item_hover_bg
item_fg = colors.inventory_item_hover_fg
else:
item_bg = colors.inventory_bk_color
item_fg = colors.inventory_item_fg
label = "[bbox={}][color=white] {}[/color]".format(line_w, item.description)
_, mh = terminal.measure(label)
cy = mh
# draw icon
terminal.bkcolor(colors.inventory_bk_color)
terminal.color(colors.white)
draw_corners(line_x, line_y, line_x + item_w, line_y + item_w)
terminal.color(item.color)
terminal.put(line_x + 1, line_y + 1, item.char)
# draw highlight
terminal.bkcolor(item_bg)
terminal.clear_area(text_x, line_y, line_w - 4, item_h)
# draw text
terminal.print_(text_x, text_y, label)
# restore background color
terminal.bkcolor(colors.black)
# calculations
line_y += max(3, cy + 1)
index += 1
def draw_view_item_window(lst, item: Item):
rect = Rect.from_rect(settings.gui_rect)
draw_window(rect, item.description, colors.white, colors.inventory_bk_color)
terminal.bkcolor(colors.inventory_bk_color)
draw_select_box(lst, rect.x + 1, rect.y + 3)
terminal.refresh()
|
mit
| -7,870,676,331,743,460,000
| 26.700246
| 115
| 0.621873
| false
| 3.122992
| false
| false
| false
|
igoumiri/pyMST
|
adhoc/cyleq.py
|
1
|
7837
|
# old16 is to preserve the code that was adding one element to all the
# radial arrays in adhoc.py and cyleq.py.
# Generic cylindrical equilibrium solutions
def zfunc(rho, bz, bq, lam, press):
return -lam * bq - press / (bz**2 + bq**2) * bz
def qfunc(rho, bz, bq, lam, press):
if rho == 0.0:
return (lam * bz)
else:
return (lam * bz) - (1.0/rho + press / (bz**2 + bq**2) ) * bq
#def press(rho, beta0):
def press_quadratic(rho, beta0):
"""Pressure function that returns quadratic p, gradp."""
p = (beta0 / 2.0)*(1 - rho**2)
gradp = (beta0 / 2.0) * (-2.0 * rho)
return p, gradp
def press_cubic(rho, beta0):
"""Pressure function that returns matched cubic p, gradp.
Found that
p/p0 = 1 - (4/3)rho**3+(1/3)rho**12
(matched polynomial with dp/dr(rho=1) = 0
closely matches measured p profile from Biewer's thesis
I like this type of polynomial since dp/dr = 0 at edge
is required physically.
Note the quartic profile case for completeness:
p = (beta0/2.0)*(-4*rho**3+3*rho**4)
gradp = ( beta0/2.0 ) * ( -12.0*rho**2 + 12.0*rho**3)
"""
p = (beta0 / 2.0)*(1.0 - (4.0/3.0)*rho**3+(1.0/3.0)*rho**12)
gradp = (beta0 / 2.0)*(-4.0*rho**2 + 4.0*rho**11)
return p, gradp
def lam_to_eq(lam, pars, ip,
pmodel='quadratic', beta=0.07, n=51, ret='all',
corr='cyl', d=0.01, a=0.52, Ra=1.50):
"""
Given 1D lambda-profile function and ip as a scaling quantity,
return various field quantities from the cylindrical equilibrium
model.
Note ip must be in mks for this to work right, while ip comes from
MDSplus in kA.
lam: a function that takes in radius x and parameters pars
and outputs lambda at that x. Note that lam in this file
always means lambda * b, the local inverse-scale length
times the minor radius at which the plasma current vanishes.
beta: the average pressure over Bpw**2/(2*mu0), i. e. 'poloidal beta'.
n: number of radial points.
a and Ra: minor and major radius of measurements in mks
ret='scalars': returns ip, btw, btave, b0, beta0 as a tuple,
for use in optimization codes like simplex.
ret='all' (default): returns things from 'scalars', as well as
Ra, a, d, Rb, b, rho, bq, bz, jq, jz, p, gradp, q, lam,
all as named in a dictionary.
"""
import numpy as np
import scipy.integrate as sig
mu0 = np.pi * 4E-7
# The original JSS value:
m_max = 4 # beta iterations
# KJM 2012-02 to use conditional loop with tolerance.
# m_max = 10 # beta iterations
h = 1.0 / n
hh = h / 2.0
# Normalized radial coordinate
rho = np.linspace(0.0, 1.0, n)
# Define B arrays.
bz = np.zeros(n)
bq = np.zeros(n)
# Integrate pressure gradient for profile and average pressure
# factor.
if pmodel == 'quadratic':
press = press_quadratic
elif pmodel == 'cubic':
press = press_cubic
p, gradp = press(rho, 2.0)
p = p - p[-1]
avg_p_fac = 0.5 / sig.simps(rho*p, rho)
# beta0_tol = 1E-3
# 1E-3 gives same number of iterations as m_max=4 with no condition.
for m in range(m_max): #loop for different beta
if m == 0: #first time, zero beta
beta0 = 0.0
else: #next times, derive beta0 for given beta
#general pressure profile
beta0 = avg_p_fac * beta * bq[-1]**2
# print beta0, abs(beta0 - beta0_old) / beta0
# if abs(beta0 - beta0_old) / beta0 < beta0_tol:
# break
# beta0_old = beta0
# print beta0
bz[0] = 1.0 #axis values of
bq[0] = 0.0 #field components
for i in range(n-1):
x = rho[i]
y = lam(x, *pars)
p, z = press(x, beta0)
t1_z = h * zfunc(x, bz[i], bq[i], y, z)
t1_q = h * qfunc(x, bz[i], bq[i], y, z)
x = rho[i] + hh
y = lam(x, *pars)
p, z = press(x, beta0)
t2_z = h * zfunc(x, bz[i]+t1_z/2.0, bq[i]+t1_q/2.0, y, z)
t2_q = h * qfunc(x, bz[i]+t1_z/2.0, bq[i]+t1_q/2.0, y, z)
t3_z = h * zfunc(x, bz[i]+t2_z/2.0, bq[i]+t2_q/2.0, y, z)
t3_q = h * qfunc(x, bz[i]+t2_z/2.0, bq[i]+t2_q/2.0, y, z)
x = rho[i+1]
y = lam(x, *pars)
p, z = press(x, beta0)
t4_z = h * zfunc(x, bz[i]+t3_z, bq[i]+t3_q, y, z)
t4_q = h * qfunc(x, bz[i]+t3_z, bq[i]+t3_q, y, z)
bz[i+1] = bz[i] + (t1_z + 2.0*t2_z + 2.0*t3_z + t4_z) / 6.0
bq[i+1] = bq[i] + (t1_q + 2.0*t2_q + 2.0*t3_q + t4_q) / 6.0
# print m
# Calculate corrections to fields.
#d = 0.01 # outboard gap between LCFS & shell, in meters
if corr == 'tor':
b = a - d / (1.0 - a / Ra) #LCFS plasma radius, in meters
Rb = Ra + a - b - d #LCFS plasma major radius, in meters
# Note b = 0.504694, Rb = 1.50531 for MST.
# Toroidal geometry factors
tg_a = Ra * (1.0 - np.sqrt(1.0 - (a / Ra)**2) )
tg_b = Rb * (1.0 - np.sqrt(1.0 - (b / Rb)**2) )
elif corr == 'cyl':
b = a - d #LCFS plasma radius, in meters
Rb = Ra + a - b - d #LCFS plasma major radius, in meters
# Note b = 0.51, Rb = Ra = 1.5 for MST.
# Get final field profiles, where bz is done before bq to avoid a bug.
bpw = mu0 * ip / 2.0 / np.pi / a
bpw_b = bpw * a / b
bz = bz * bpw_b / bq[-1]
bq = bq * bpw_b / bq[-1]
btave_b = 2.0 * sig.simps(rho * bz, rho)
# New beta0 value may be slightly inconsistent with fields,
# so recalculate it.
beta0 = avg_p_fac * beta * bq[-1]**2
# Find BTW and BTAVE using values at/inside LCFS
if corr == 'tor':
btw = bz[-1] / tg_b * tg_a / (a / b)**2
btave = ( btave_b + bz[-1] * (tg_a / tg_b - 1.0) ) / (a / b)**2
elif corr == 'cyl':
btw = bz[-1]
btave = ( btave_b * b**2 + btw * (a**2 - b**2) ) / a**2
if ret == 'scalars':
return ip, btw, btave, bz[0], beta0
elif ret == 'all':
# Get pressure and gradient in MKS.
p, gradp = press(rho, beta0)
p = bz[0] * bz[0] / mu0 * p
gradp = bz[0] * bz[0] / mu0 / b * gradp
# Safety factor q = r * bt / (Ra * bp)
#q = deriv(r * bz) / deriv(Ra * bq)
y = lam(0.0, *pars)
q = 2.0 * b / Rb / y + np.zeros(n)
q[1:] = rho[1:] * b * bz[1:] / Rb / bq[1:]
# Added 2015-10, KM
q[0] = np.polyval(np.polyfit(rho[1:4], q[1:4], 2), rho[0])
# Get parallel current in MKS.
y = lam(rho, *pars)
jq = y * bq / mu0 / b
jz = y * bz / mu0 / b
# Add perpendicular current for ASSUMED pressure profile.
bb = bz * bz + bq * bq
jq = jq + bz / bb * gradp
jz = jz - bq / bb * gradp
# Get total poloidal and toroidal fluxes (not per radian).
r = rho*b
psi = 2.0*np.pi*Ra*sig.cumtrapz(
np.append(bq, bpw), np.append(r, a), initial=0.0)
Psi = psi[-1]
psi = psi[:-1]
phi = 2.0*np.pi*sig.cumtrapz(
np.append(r, a)*np.append(bz, btw), np.append(r, a),
initial=0.0)
Phi = phi[-1]
phi = phi[:-1]
return {
'ip':ip, 'bpw':bpw, 'btw':btw, 'btave':btave, 'b0':bz[0],
'beta0':beta0, 'F':btw/btave, 'Theta':bpw/btave,
'bpw_b':bpw_b, 'btw_b':bz[-1], 'btave_b':btave_b,
'b0':bz[0], 'beta0':beta0,
'a':a, 'Ra':Ra, 'd':d, 'b':b, 'Rb':Rb, 'rho':rho, 'r':r,
'bq':bq, 'bz':bz, 'jq':jq, 'jz':jz,
'Psi':Psi, 'psi':psi, 'Phi':Phi, 'phi':phi,
'p':p, 'gradp':gradp,
'q':q, 'lam':y,
'pars':pars, 'pmodel':pmodel, 'beta':beta,
'corr':corr
}
|
mit
| -1,738,313,170,859,749,000
| 35.451163
| 74
| 0.511803
| false
| 2.62634
| false
| false
| false
|
ownport/ansiblite
|
src/ansiblite/playbook/handler.py
|
1
|
2018
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansiblite.errors import AnsibleError
from ansiblite.playbook.attribute import FieldAttribute
from ansiblite.playbook.task import Task
class Handler(Task):
_listen = FieldAttribute(isa='list')
def __init__(self, block=None, role=None, task_include=None):
self._flagged_hosts = []
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
def __repr__(self):
''' returns a human readable representation of the handler '''
return "HANDLER: %s" % self.get_name()
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Handler(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def flag_for_host(self, host):
#assert instanceof(host, Host)
if host not in self._flagged_hosts:
self._flagged_hosts.append(host)
def has_triggered(self, host):
return host in self._flagged_hosts
def serialize(self):
result = super(Handler, self).serialize()
result['is_handler'] = True
return result
|
gpl-3.0
| -1,476,278,930,421,509,400
| 35.690909
| 97
| 0.701685
| false
| 3.873321
| false
| false
| false
|
wavelets/GroundHog
|
groundhog/layers/rec_layers.py
|
1
|
34050
|
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.nin))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.nhid)
else:
init_state = TT.alloc(floatX(0), self.nhid)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
|
bsd-3-clause
| -443,794,965,629,435,000
| 39.729665
| 113
| 0.472041
| false
| 3.732734
| false
| false
| false
|
mufaddalq/cloudstack-datera-driver
|
api/test/integration/api/test/account/testCreateAccount.py
|
1
|
2136
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import factory
import marvin
from marvin import cloudstackTestCase
from marvin.integration.lib.base import *
from marvin.integration.lib import utils
class AccountFactory(factory.Factory):
FACTORY_FOR = createAccount.createAccountCmd
firstname = 'firstname-'+random_gen()
lastname = 'lastname-'+random_gen()
email = factory.lazy_attribute(lambda e: '{0}.{1}@cloudstack.org'.format(e.firstname, e.lastname).lower())
class AdminAccountFactory(AccountFactory):
accounttype = 1
class UserAccountFactory(AccountFactory):
accounttype = 0
class TestCreateAccount(cloudstackTestCase):
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.userApiClient = self.testClient.getUserApiClient(account='test'+utils.random_gen(), 'ROOT')
def test_createAccountAsAdmin(self):
"""
creates an account for a user as admin
"""
Account.create(self.apiClient, services=None)
from marvin.cloudstackAPI.createAccount import createAccountCmd
self.assertEqual(True, False)
def test_createAccountAsUser(self):
"""
negative: create account as a user
"""
self.assertEqual(True, False)
def tearDown(self):
self.apiClient.close()
self.userApiClient.close()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 2,810,052,616,260,023,300
| 34.016393
| 110
| 0.722378
| false
| 4.076336
| true
| false
| false
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/gooey/gui/build_spec_validator.py
|
1
|
1627
|
'''
Validates that the json has meaningful keys
'''
import itertools
a = {
'required' : [
{
'component': 'TextField',
'data': {
'display_name': 'filename',
'help_text': 'path to file you want to process',
'command_args': ['-f', '--infile']
}
},
{
'component': 'FileChooser',
'data': {
'display_name': 'Output Location',
'help_text': 'Where to save the file',
'command_args': ['-o', '--outfile']
}
}
],
'optional' : [
{
'component': 'RadioGroup',
'data': [
{
'display_name': 'Output Location',
'help_text': 'Where to save the file',
'command_args': ['-o', '--outfile']
}, {
'display_name': 'Output Location',
'help_text': 'Where to save the file',
'command_args': ['-o', '--outfile']
}
]
}
]
}
VALID_WIDGETS = (
'FileChooser',
'DirChooser',
'DateChooser',
'TextField',
'Dropdown',
'Counter',
'RadioGroup'
)
class MalformedBuildSpecException(Exception):
pass
def validate(json_string):
required = json_string.get('required')
optional = json_string.get('optional')
if not required or not optional:
raise MalformedBuildSpecException("All objects must be children of 'required,' or 'optional'")
objects = [item for key in json_string for item in json_string[key]]
for obj in objects:
if obj['component'] not in VALID_WIDGETS:
raise MalformedBuildSpecException("Invalid Component name: {0}".format(obj['component']))
if __name__ == '__main__':
validate(a)
|
mit
| 2,159,226,274,328,498,200
| 19.594937
| 98
| 0.559312
| false
| 3.723112
| false
| false
| false
|
nblago/utils
|
src/utils/tns_query.py
|
1
|
2954
|
# -*- coding: utf-8 -*-
"""
Created on Wed Deb 14 14:21:41 2018
Script to query TNS with different parameters:
either for transients discovered between two different dates,
or a cone search radius around a given RA, DEC
@author: nadiablago
"""
from __future__ import print_function
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
import sys, os
#reload(sys)
#sys.setdefaultencoding('utf8')
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.table import Column
import astropy.units as u
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
def get_tns_date2date(date1, date2):
'''
Queries the TNS and obtains the targets reported between two dates.
It parses the coordinates and transporms them into decimals.
It writes a csv table with RA, DEC in degrees, which is directly ingestable into a postresql file.
date1: in the format of: YYYY-MM-DD
date2: in the format of: YYYY-MM-DD
'''
url = "https://wis-tns.weizmann.ac.il/search?&date_start%5Bdate%5D={0}&date_end%5Bdate%5D={1}&format=csv&num_page=500".format(date1, date2)
cont_url = urlopen(url)
cont = cont_url.read()
t = Table.read(StringIO(cont), format='csv')
coords = np.array([t["RA"], t["DEC"]]).T
c = SkyCoord(coords, frame='icrs', unit=(u.hourangle, u.deg))
radeg = Column(c.ra, name='RA')
decdeg = Column(c.dec, name='DEC')
t.remove_column("RA")
t.remove_column("DEC")
t.add_column(radeg, index=1)
t.add_column(decdeg, index=2)
t.write("tns_query_%s_%s.csv"%(date1, date2), format="csv")
return t
def get_tns_ra_dec(ra, dec, rad=15):
'''
Queries the TNS and obtains the targets reported for the specified RA, DEC position.
Provided that ASASSN targets are there, a 7 arcsec position error is expected.
By default we will use 10 arcsec.
ra: float
position in degrees
dec: float
position in degrees
rad: float, optional
Search radius in arcseconds.
'''
url = "https://wis-tns.weizmann.ac.il/search?&name=&ra={0}&decl={1}&radius={2}&coords_unit=arcsec&format=csv".format(ra, dec, rad)
cont_url = urlopen(url)
cont = cont_url.read()
t = Table.read(StringIO(cont), format='ascii.csv')
if len(t) > 0:
coords = np.array([t["RA"], t["DEC"]]).T
c = SkyCoord(coords, frame='icrs', unit=(u.hourangle, u.deg))
basecoord = SkyCoord(ra, dec, frame='icrs', unit=(u.deg, u.deg))
#In case there are several objects in the match radius, we select the closest one
dist = c.separation(basecoord)
closest = t[np.argmin(dist)]
else:
closest = None
return closest
|
mit
| 9,166,153,378,906,148,000
| 28.55
| 143
| 0.646919
| false
| 3.315376
| false
| false
| false
|
Letractively/aha-gae
|
aha/wsgi/appinit.py
|
1
|
3932
|
# -*- coding: utf-8 -*-
# appinits.py
# Init functions for application
__author__ = 'Atsushi Shibata <shibata@webcore.co.jp>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
__all__ = ['initConfig', 'initPlugins', 'run', 'get_app']
import os
import sys
import re
import logging
import wsgiref.handlers
def initConfig(basedir):
"""
Initialize config object
"""
# add the project's directory to the import path list.
sys.path = [basedir,
os.path.join(basedir, 'application'),
os.path.join(basedir, 'lib')]+sys.path
import aha
config = aha.Config()
# setup the templates location
config.application_dir = os.path.join(basedir, 'application')
config.messages_dir = os.path.join(config.application_dir, 'messages')
config.template_dirs = [os.path.join(config.application_dir, 'template'),
'plugin']
config.debug = False
config.useappstatus = False
if os.environ.get('SERVER_SOFTWARE', '').startswith('Dev'):
config.debug = True
return config
def initPlugins(basedir):
"""
Initialize the installed plugins
"""
plugin_root = os.path.join(basedir, 'plugin')
if os.path.exists(plugin_root):
plugins = os.listdir(plugin_root)
for plugin in plugins:
if not re.match('^__|^\.', plugin):
try:
exec('from plugin import %s' % plugin)
except ImportError, e:
from traceback import format_exc
logging.error('Unable to import %s' % (plugin))
logging.error(format_exc())
except SyntaxError, e:
from traceback import format_exc
logging.error('Unable to import name %s' % (plugin))
logging.error(format_exc())
_debugged_app = None
def run(debug = False, useappstatus = False, dispatcher = None):
"""
A function to run wsgi server
"""
from aha.wsgi.cwsgiapp import CustomHandler
app = get_app(debug, dispatcher)
if useappstatus:
from google.appengine.ext.appstats import recording
app = app = recording.appstats_wsgi_middleware(app)
from google.appengine.ext.webapp.util import run_wsgi_app
run_wsgi_app(app)
else:
CustomHandler().run(app)
def get_app(debug = False, dispatcher = None):
"""
A function to get wsgi server object.
"""
if debug:
# use our debug.utils with Jinja2 templates
from aha.wsgi.cwsgiapp import (CWSGIApplication, MainHandler)
from aha.wsgi.debug import utils
app = CWSGIApplication(
[(r'.*', MainHandler)],
debug = debug)
sys.modules['werkzeug.debug.utils'] = utils
import inspect
inspect.getsourcefile = inspect.getfile
patch_werkzeug()
from werkzeug import DebuggedApplication
global _debugged_app
if not _debugged_app:
_debugged_app = app = DebuggedApplication(app, evalex = True)
else:
app = _debugged_app
return app
else:
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import WSGIApplication
from aha.wsgi.cwsgiapp import MainHandler
app = WSGIApplication(
[(r'.*', MainHandler)],
debug = debug)
return app
def patch_werkzeug():
"""
A function to patch werkzeug to make it work on app engine
"""
from werkzeug.debug.console import HTMLStringO
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
# Apply all other patches.
HTMLStringO.seek = seek
HTMLStringO.readline = readline
|
bsd-3-clause
| -6,219,672,598,750,610,000
| 26.893617
| 77
| 0.591302
| false
| 4.121593
| true
| false
| false
|
shea256/coinrpc
|
coinrpc/config.py
|
1
|
1552
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
coinrpc
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import os
from commontools import log
NAMECOIND_ENABLED = True
BITCOIND_ENABLED = False
DEBUG = True
#--------------------------------------------------
if NAMECOIND_ENABLED:
NAMECOIND_USE_HTTPS = True
try:
NAMECOIND_PORT = os.environ['NAMECOIND_PORT']
NAMECOIND_SERVER = os.environ['NAMECOIND_SERVER']
NAMECOIND_USER = os.environ['NAMECOIND_USER']
NAMECOIND_PASSWD = os.environ['NAMECOIND_PASSWD']
except:
#log.debug("Namecoind not configured")
#default settings with a public server
NAMECOIND_PORT = 8332
NAMECOIND_SERVER = '107.170.167.141'
NAMECOIND_USER = 'opennamesystem'
NAMECOIND_PASSWD = 'opennamesystem'
try:
NAMECOIND_WALLET_PASSPHRASE = os.environ['NAMECOIND_WALLET_PASSPHRASE']
except:
NAMECOIND_WALLET_PASSPHRASE = ''
#--------------------------------------------------
if BITCOIND_ENABLED:
BITCOIND_USE_HTTPS = True
try:
BITCOIND_PORT = os.environ['BITCOIND_PORT']
BITCOIND_SERVER = os.environ['BITCOIND_SERVER']
BITCOIND_USER = os.environ['BITCOIND_USER']
BITCOIND_PASSWD = os.environ['BITCOIND_PASSWD']
BITCOIND_WALLET_PASSPHRASE = os.environ['BITCOIND_WALLET_PASSPHRASE']
except:
#log.debug("Bitcoind not configured")
BITCOIND_PORT = 5005
BITCOIND_SERVER = BITCOIND_USER = BITCOIND_PASSWD = ''
try:
BITCOIND_WALLET_PASSPHRASE = os.environ['BITCOIND_WALLET_PASSPHRASE']
except:
BITCOIND_WALLET_PASSPHRASE = ''
|
mit
| -7,431,524,886,803,951,000
| 24.048387
| 73
| 0.67268
| false
| 2.746903
| false
| false
| false
|
rustychris/stomel
|
src/equilateral_paver.py
|
1
|
5386
|
# Make a grid with all equilateral triangles
# Currently only supports a rectangular domain, constant density,
# and either vertical or horizontal orientation
import trigrid
import numpy as np
class EquilateralPaver(trigrid.TriGrid):
def __init__(self,L,W,dens,orientation='horizontal',**kwargs):
super(EquilateralPaver,self).__init__(**kwargs)
self.L = L # x dimension
self.W = W # y dimension
self.dens = dens
self.orientation = orientation
if self.orientation == 'vertical':
self.L,self.W = self.W,self.L
self.create_grid()
if self.orientation == 'vertical':
self.L,self.W = self.W,self.L
self.points = self.points[:,::-1]
self.cells = self.cells[:,::-1]
self.renumber()
def create_grid(self):
# first, how many rows - here we assume orientation is horizontal,
# so the left and right sides are ragged.
cos30 = np.cos(30*np.pi/180.)
n_rows = self.W / (cos30 * self.dens)
# to make sure that the first and last points line up, we need an
# even number of rows of cells:
n_rows = 2 * int( (n_rows+1.0)/ 2 )
self.n_rows = n_rows
# Let the length L be fudge-able - as in we prefer perfectly equilateral triangles
# over a perfectly L-length grid. the width W can still be exact.
dens = self.W / (n_rows * cos30)
print "That will make n_rows=%d and adjusted edge length %f"%(n_rows,dens)
# this is the number of cells...
n_cols = int(self.L / dens)
self.n_cols = n_cols
# Stack them up
for r in range(n_rows+1):
y = self.W * float(r)/n_rows
odd = r%2
x_off = odd * 0.5*dens
for c in range(n_cols+1):
x = x_off + dens*float(c)
n = self.add_node( np.array([x,y]) )
if c > 0:
if r==0:
self.add_edge(n-1,n,cright=-1,marker=1)
elif r==n_rows:
self.add_edge(n-1,n,cleft=-1,marker=1)
else:
self.add_edge(n,n-1)
# HERE: need to finish adding in the markers and closed boundary code.
if r>0:
cright=-2
cleft=-2
marker = 0
if odd:
if c==0:
cleft=-1
marker=1
elif c==n_cols:
cright=-1
marker=1
self.add_edge(n-(n_cols+1),n,marker=marker,cleft=cleft,cright=cright)
if c<n_cols:
self.add_edge(n,n-n_cols)
else:
if c==0:
cleft=-1
marker=1
elif c==n_cols:
cright=-1
marker=1
self.add_edge(n-(n_cols+1),n,cleft=cleft,cright=cright,marker=marker)
if c>0:
self.add_edge(n,n-(n_cols+1)-1)
class RotatedEquilateralPaver(EquilateralPaver):
""" Create a ragged-edged grid where the triangles are rotated the given
angle, in radians, CCW from parallel to the x-axis.
"""
def __init__(self,L,W,dens,angle=0,**kwargs):
self.final_L = L
self.final_W = W
# find the L and W needed to still be big enough after we've rotated -
# adding a bit of extra to avoid funny edge effects:
Lprime = L*np.cos(angle) + W*np.sin(angle) + 4*dens
Wprime = W*np.cos(angle) + L*np.sin(angle) + 4*dens
super(RotatedEquilateralPaver,self).__init__(L=Lprime, W=Wprime, dens=dens, **kwargs)
self.rotate_grid(angle)
self.trim_grid()
self.renumber()
def rotate_grid(self,angle):
""" rotates the oversized grid and translates to get the origin in the right place.
"""
# translate to get centered on the extra bit we asked for:
self.points[:] -= 2*self.dens
# rotate
self.points[:] = trigrid.rot(angle,self.points)
# and get our origin to a nice place
self.points[:,0] += self.final_L * np.sin(angle)**2
self.points[:,1] -= self.final_L * np.sin(angle)*np.cos(angle)
def trim_grid(self):
""" with the oversized grid created, and the origin correctly placed, remove points
and associated edges/cells that fall outside the actual footprint
"""
to_delete = (self.points[:,0] < 0) | (self.points[:,0]>self.final_L) | \
(self.points[:,1] < 0) | (self.points[:,1]>self.final_W)
for n in np.nonzero(to_delete)[0]:
self.delete_node(n,remove_edges=True)
if __name__ == '__main__':
#ep = EquilateralPaver(10000.,5000.,500.,orientation='horizontal')
#ep.plot()
ep = RotatedEquilateralPaver(10000.,5000.,510.,angle=15*pi/180.)
cla()
ep.plot()
|
gpl-2.0
| -8,226,103,744,813,507,000
| 35.890411
| 93
| 0.492573
| false
| 3.803672
| false
| false
| false
|
hugobranquinho/ines
|
ines/middlewares/gzipper.py
|
1
|
3689
|
# -*- coding: utf-8 -*-
from io import BytesIO
from gzip import compress as gzip_compress
from pyramid.decorator import reify
from ines.middlewares import Middleware
class Gzip(Middleware):
name = 'gzip'
def __init__(self, config, application, **settings):
super(Gzip, self).__init__(config, application, **settings)
self.compress_level = int(settings.get('compress_level') or 9)
self.content_types = (
settings.get('content_types', '').split()
or ['text/', 'application/', 'image/svg'])
self.all_content_types = '*' in self.content_types
def __call__(self, environ, start_response):
return GzipMiddlewareSession(self)(environ, start_response)
class GzipMiddlewareSession(object):
def __init__(self, middleware):
self.middleware = middleware
self.compressible = False
self.status = None
self.headers = []
self.exc_info = None
def __call__(self, environ, start_response):
if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
return self.middleware.application(environ, start_response)
self.start_response = start_response
app_iter = self.middleware.application(environ, self.gzip_start_response)
if app_iter is not None and self.compressible:
binary = gzip_compress(b''.join(app_iter), self.middleware.compress_level)
if hasattr(app_iter, 'close'):
app_iter.close()
self.remove_header('content-length')
self.headers.append(('content-encoding', 'gzip'))
self.set_header('content-length', len(binary))
start_response(self.status, self.headers, self.exc_info)
return [binary]
return app_iter
@reify
def buffer(self):
return BytesIO()
def remove_header(self, name):
i = len(self.headers)
name = name.lower()
for key, value in reversed(self.headers):
i -= 1
if key == name:
self.headers.pop(i)
def get_header(self, name):
name = name.lower()
for key, value in self.headers:
if key == name:
return value
def in_headers(self, name):
name = name.lower()
for key, value in self.headers:
if key == name:
return True
else:
return False
def set_header(self, name, new_value):
name = name.lower()
for i, (key, value) in enumerate(self.headers):
if key == name:
self.headers[i] = (name, str(new_value))
break
else:
self.headers.append((name, str(new_value)))
def gzip_start_response(self, status, headers, exc_info=None):
self.headers = [(key.lower(), value) for key, value in headers]
if not self.in_headers('content-encoding'):
content_type = self.get_header('content-type')
if content_type and 'zip' not in content_type:
content_type = content_type.split(';')[0]
if self.middleware.all_content_types:
self.compressible = True
else:
for start_content_type in self.middleware.content_types:
if content_type.startswith(start_content_type):
self.compressible = True
break
if self.compressible:
self.status = status
self.exc_info = exc_info
return self.buffer.write
return self.start_response(status, headers, exc_info)
|
mit
| 4,223,264,974,435,183,600
| 32.234234
| 86
| 0.564381
| false
| 4.216
| false
| false
| false
|
marianotepper/csnmf
|
csnmf/third_party/mrnmf/nmf_process_algorithms.py
|
1
|
4252
|
"""
Copyright (c) 2014, Austin R. Benson, David F. Gleich,
Purdue University, and Stanford University.
All rights reserved.
This file is part of MRNMF and is under the BSD 2-Clause License,
which can be found at http://opensource.org/licenses/BSD-2-Clause
Copyright (c) 2015, Mariano Tepper,
Duke University.
All rights reserved.
Mariano Tepper made the following changes to this file:
- modified names and line lengths to adhere more closely to PEP8
- changed docstrings
- some numpy operations are more numpy-ish now.
- small edits, refactoring, and cleanups
- removed some code
"""
import numpy as np
from scipy.optimize import nnls
def spa(data, r, colnorms):
"""
Successive projection algorithm (SPA) for NMF. This algorithm
computes the column indices.
:param data: The data matrix.
:type data: numpy.ndarray
:param r: The target separation rank.
:type r: int
:param colnorms: The column L1 norms.
:type colnorms: numpy.ndarray
:return: A list of r columns chosen by SPA.
:rtype: list of int
"""
idx = np.nonzero(colnorms)
x = np.copy(data)
x[:, idx] /= colnorms[idx]
cols = []
m, n = x.shape
for _ in xrange(r):
col_norms = np.linalg.norm(x, ord=2, axis=0)
col_norms[cols] = -1
col_ind = np.argmax(col_norms)
cols.append(col_ind)
col = np.atleast_2d(x[:, col_ind]) # col is a row vector
x = np.dot(np.eye(m) - np.dot(col.T, col) / col_norms[col_ind], x)
return cols
def xray(x, r):
"""
X-ray algorithm for NMF. This algorithm computes the column
indices.
:param x: The data matrix.
:type x: numpy.ndarray
:param r: The target separation rank.
:type r: int
:return: A list of r columns chosen by X-ray.
:rtype: list of int
"""
cols = []
R = np.copy(x)
while len(cols) < r:
# Loop until we choose a column that has not been selected.
while True:
p = np.random.random((1, x.shape[0]))
scores = np.linalg.norm(np.dot(R.T, x), ord=2, axis=0)
scores /= np.squeeze(np.dot(p, x))
scores[cols] = -1 # IMPORTANT
best_col = np.argmax(scores)
if best_col in cols:
# Re-try
continue
else:
cols.append(best_col)
H, rel_res = nnls_frob(x, cols)
R = x - np.dot(x[:, cols], H)
break
return cols
def nnls_frob(x, cols):
"""
Compute H, the coefficient matrix, by nonnegative least squares
to minimize the Frobenius norm. Given the data matrix X and the
columns cols, H is
.. math:: \arg\min_{Y \ge 0} \| X - X(:, cols) H \|_F.
:param X: The data matrix.
:type X: numpy.ndarray
:param cols: The column indices.
:type cols: list of int
:return: The matrix H and the relative residual.
"""
ncols = x.shape[1]
x_sel = x[:, cols]
H = np.zeros((len(cols), ncols))
for i in xrange(ncols):
sol, res = nnls(x_sel, x[:, i])
H[:, i] = sol
rel_res = np.linalg.norm(x - np.dot(x_sel, H), 'fro')
rel_res /= np.linalg.norm(x, 'fro')
return H, rel_res
def select_columns(data, alg, r, colnorms=None):
""" Compute an approximate separable NMF of the matrix data. By
compute, we mean choose r columns and a best fitting coefficient
matrix H. The r columns are selected by the 'alg' option, which
is one of 'SPA' or 'XRAY'. The coefficient matrix H is the
one that produces the smallest Frobenius norm error.
:param data: The data matrix.
:type data: numpy.ndarray
:param alg: Choice of algorithm for computing the columns. One of
'SPA' or 'XRAY'.
:type alg: string
:param r: The target separation rank.
:type r: int
:param colnorms: The column L1 norms, needed only by SPA.
:type colnorms: numpy.ndarray
:return The selected columns, the matrix H, and the relative residual.
"""
if alg == 'XRAY':
cols = xray(data, r)
elif alg == 'SPA':
cols = spa(data, r, colnorms)
else:
raise Exception('Unknown algorithm: {0}'.format(alg))
return cols
|
bsd-2-clause
| 8,538,298,718,568,246,000
| 30.264706
| 74
| 0.601834
| false
| 3.39075
| false
| false
| false
|
timeartist/flask_chutes
|
flask_chutes/__init__.py
|
1
|
3171
|
from flask_sockets import Sockets
from flask import Flask
from redis import StrictRedis
from json import loads, dumps
from multiprocessing import Process
from gevent import sleep, Greenlet
from geventwebsocket.exceptions import WebSocketError
processes = {}
def enable_chutes(app, endpoint='/chutes'):
'''
Factory method to add the chutes socket endpoint to your existing Flask app
Input:
app - Flask App Object to be extended
Returns:
None
'''
assert isinstance(app, Flask)
connection = app.config['REDIS_CONN']
r = StrictRedis(**connection)
sockets = Sockets(app)
@sockets.route(endpoint)
def _chutes(ws):
try:
i = 0
redis_key = None
channel = None
while True:
if i == 0:
msg = ws.receive()
print msg
sign_on = loads(msg)
channel = sign_on['channel']
if channel not in processes:
processes[channel] = []
redis_key = 'c:%s'%channel
i += 1
ps = r.pubsub()
ps.subscribe(redis_key)
process = Greenlet(socket_sentinel_publish, *(ws, ps))
process.start()
processes[channel].append(process)
process = Greenlet(socket_sentinel_client_listener, *(ws, r, redis_key))
process.start()
processes[channel].append(process)
resp = r.blpop(redis_key, 30)
print resp
if ws.closed:
print 'Websocket Connection Closed by Client'
break
if resp and isinstance(resp[-1], (str, unicode)):
print 'WS:', channel, '->', resp[-1]
ws.send(resp[-1])
else:
ws.send(dumps({'data':None}))
except WebSocketError, e:
_processes = processes[channel]
for process in _processes:
process.kill()
class Chute(object):
def __init__(self, channel, **kwargs):
self.r = StrictRedis(**kwargs)
self.channel = channel
self._r_key = 'c:%s'%channel
def send(self, data, timeout=90):
self.r.lpush(self._r_key, data)
self.r.expire(self._r_key, timeout)
def publish(self, data):
self.r.publish(self._r_key, data)
def listen(self):
ps = self.r.pubsub()
ps.subscribe(channel)
for item in ps.listen():
yield item
def socket_sentinel_publish(ws, ps):
for msg in ps.listen():
print msg
if msg:
ws.send(msg['data'])
def socket_sentinel_client_listener(ws, r, channel):
while True:
msg = ws.receive()
print msg
r.publish(channel, msg)
|
mit
| -6,333,327,705,612,059,000
| 28.091743
| 92
| 0.48029
| false
| 4.754123
| false
| false
| false
|
LLNL/spack
|
var/spack/repos/builtin/packages/precice/package.py
|
2
|
6339
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Precice(CMakePackage):
"""preCICE (Precise Code Interaction Coupling Environment) is a
coupling library for partitioned multi-physics simulations.
Partitioned means that preCICE couples existing programs (solvers)
capable of simulating a subpart of the complete physics involved in
a simulation."""
homepage = 'https://www.precice.org'
git = 'https://github.com/precice/precice.git'
url = 'https://github.com/precice/precice/archive/v1.2.0.tar.gz'
maintainers = ['fsimonis', 'MakisH']
version('develop', branch='develop')
version('2.0.2', sha256='72864480f32696e7b6da94fd404ef5cd6586e2e1640613e46b75f1afac8569ed')
version('2.0.1', sha256='e4fe2d2063042761ab325f8c802f88ae088c90862af288ad1a642967d074bd50')
version('2.0.0', sha256='c8979d366f06e35626a8da08a1c589df77ec13972eb524a1ba99a011e245701f')
version('1.6.1', sha256='7d0c54faa2c69e52304f36608d93c408629868f16f3201f663a0f9b2008f0763')
version('1.6.0', sha256='c3b16376fda9eb3449adb6cc3c1e267c3dc792a5d118e37d93a32a59b5a4bc6f')
version('1.5.2', sha256='051e0d7655a91f8681901e5c92812e48f33a5779309e2f104c99f5a687e1a418')
version('1.5.1', sha256='fbe151f1a9accf9154362c70d15254935d4f594d189982c3a99fdb3dd9d9e665')
version('1.5.0', sha256='a2a794becd08717e3049252134ae35692fed71966ed32e22cca796a169c16c3e')
version('1.4.1', sha256='dde4882edde17882340f9f601941d110d5976340bd71af54c6e6ea22ae56f1a5')
version('1.4.0', sha256='3499bfc0941fb9f004d5e32eb63d64f93e17b4057fab3ada1cde40c8311bd466')
version('1.3.0', sha256='610322ba1b03df8e8f7d060d57a6a5afeabd5db4e8c4a638d04ba4060a3aec96')
version('1.2.0', sha256='0784ecd002092949835151b90393beb6e9e7a3e9bd78ffd40d18302d6da4b05b')
# Skip version 1.1.1 entirely, the cmake was lacking install.
variant('mpi', default=True, description='Enable MPI support')
variant('petsc', default=True, description='Enable PETSc support')
variant('python', default=False, description='Enable Python support')
variant('shared', default=True, description='Build shared libraries')
depends_on('cmake@3.5:', type='build')
depends_on('cmake@3.10.2:', type='build', when='@1.4:')
depends_on('boost@1.60.0:')
depends_on('boost@1.65.1:', when='@1.4:')
depends_on('boost@:1.72.99', when='@:2.0.2')
depends_on('eigen@3.2:')
depends_on('eigen@:3.3.7', type='build', when='@:1.5') # bug in prettyprint
depends_on('libxml2')
depends_on('mpi', when='+mpi')
depends_on('petsc@3.6:', when='+petsc')
# Python 3 support was added in version 2.0
depends_on('python@2.7:2.8', when='@:1.9+python', type=('build', 'run'))
depends_on('python@3:', when='@2:+python', type=('build', 'run'))
# numpy 1.17+ requires Python 3
depends_on('py-numpy@:1.16', when='@:1.9+python', type=('build', 'run'))
depends_on('py-numpy@1.17:', when='@2:+python', type=('build', 'run'))
# We require C++11 compiler support as well as
# library support for time manipulators (N2071, N2072)
conflicts('%gcc@:4')
conflicts('%clang@:3.7')
conflicts('%intel@:14')
conflicts('%pgi@:14')
def cmake_args(self):
"""Populate cmake arguments for precice."""
spec = self.spec
# The xSDK installation policies were implemented after 1.5.2
xsdk_mode = spec.satisfies("@1.6:")
# Select the correct CMake variables by version
mpi_option = "MPI"
if spec.satisfies("@2:"):
mpi_option = "PRECICE_MPICommunication"
petsc_option = "PETSC"
if spec.satisfies("@2:"):
petsc_option = "PRECICE_PETScMapping"
python_option = "PYTHON"
if spec.satisfies("@2:"):
python_option = "PRECICE_PythonActions"
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
cmake_args = [
'-DBUILD_SHARED_LIBS:BOOL=%s' % variant_bool('+shared'),
]
cmake_args.append('-D%s:BOOL=%s' % (mpi_option, variant_bool('+mpi')))
# Boost
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_BOOST=ON')
cmake_args.append('-DBOOST_ROOT=%s' % spec['boost'].prefix)
# Eigen3
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_EIGEN3=ON')
cmake_args.append(
'-DEIGEN3_INCLUDE_DIR=%s' % spec['eigen'].headers.directories[0])
# LibXML2
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_LIBXML2=ON')
libxml2_includes = spec['libxml2'].headers.directories[0]
cmake_args.extend([
'-DLIBXML2_INCLUDE_DIRS=%s' % libxml2_includes,
'-DLIBXML2_LIBRARIES=%s' % spec['libxml2'].libs[0],
])
# PETSc
if '+petsc' in spec:
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PETSC:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % petsc_option)
cmake_args.extend([
'-DPETSC_DIR=%s' % spec['petsc'].prefix,
'-DPETSC_ARCH=.'
])
else:
cmake_args.append('-D%s:BOOL=OFF' % petsc_option)
# Python
if '+python' in spec:
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
numpy_include = join_path(
spec['py-numpy'].prefix,
spec['python'].package.site_packages_dir,
'numpy', 'core', 'include')
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PYTHON:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % python_option)
cmake_args.extend([
'-DPYTHON_INCLUDE_DIR=%s' % python_include,
'-DNumPy_INCLUDE_DIR=%s' % numpy_include,
'-DPYTHON_LIBRARY=%s' % python_library
])
else:
cmake_args.append('-D%s:BOOL=OFF' % python_option)
return cmake_args
|
lgpl-2.1
| -7,378,302,579,113,582,000
| 41.26
| 95
| 0.62218
| false
| 3.015699
| false
| false
| false
|
fdemian/Morpheus
|
api/routes/Alerts.py
|
1
|
4990
|
import json
from api.model.sessionHelper import get_session
from api.model.models import Notification
from api.authentication.AuthenticatedHandler import AuthenticatedHandler
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from tornado.gen import coroutine
from api.Utils import authenticated
class AlertsHandler(AuthenticatedHandler):
def data_received(self, chunk):
pass
# GET /alerts
@authenticated
def get(self):
if not self.settings['notifications_enabled']:
response = {'message': "Notifications disabled."}
self.set_status(501, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
session_object = get_session()
session = session_object()
current_user = self.current_user
alerts = session.query(Notification).filter(Notification.user_id == current_user, Notification.read == False)\
.order_by(Notification.id.desc())\
.all()
data = []
for notification in alerts:
json_notification = {
'id': notification.id,
'type': notification.type,
'text': notification.text,
'link': notification.link,
'read': notification.read
}
data.append(json_notification)
response = {"notifications": data}
self.set_status(200, 'Ok ')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
# TODO: change to POST method?
# -- REASON: Successful response returns a body (and shouldn't).
# -- Otherwise the method is same as PUT.
# PUT /alerts
@authenticated
def put(self):
request = self.request.body.decode("utf-8")
json_request = json.loads(request)
session_object = get_session()
session = session_object()
try:
notification_id = json_request["id"]
notification = session.query(Notification).filter(Notification.id == notification_id).one()
# Modify all the fields.
notification.type = json_request["type"]
notification.text = json_request["text"]
notification.link = json_request["link"]
notification.read = json_request["read"]
session.commit()
status = 200
status_str = 'Ok'
response = {'id': notification_id}
except NoResultFound:
status = 500
status_str = "Error"
response = {'message': 'No notifications with the id' + notification_id + 'found.'}
except MultipleResultsFound:
status = 500
status_str = "Error"
response = {'message': 'More than one notification with the id' + notification_id + ' was found.'}
self.set_header("Content-Type", "application/jsonp;charset=UTF-8")
self.set_header("Access-Control-Allow-Origin", "*")
self.set_status(status, status_str)
self.write(response)
return
@coroutine
def post(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def delete(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def trace(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def connect(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def options(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def patch(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def head(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
|
bsd-2-clause
| 4,065,950,719,263,779,000
| 29.426829
| 118
| 0.589379
| false
| 4.331597
| false
| false
| false
|
piotrmaslanka/vanad
|
interfaces/vanad.py
|
1
|
7505
|
from __future__ import division
from socket import socket, AF_INET, SOCK_STREAM
from time import time
from select import select
from struct import pack, unpack
from threading import Lock
RQT_GET = 0x00
RQT_ASSIGN = 0x01
RQT_DELETE = 0x02
def REQ_to_bytes(request, tablespace, key, value):
return pack('!BBLL', request, tablespace, len(key), len(value)) + key + value
def GET_to_bytes(tablespace, key):
return REQ_to_bytes(RQT_GET, tablespace, key, '')
def ASSIGN_to_bytes(tablespace, key, value):
return REQ_to_bytes(RQT_ASSIGN, tablespace, key, value)
def DELETE_to_bytes(tablespace, key):
return REQ_to_bytes(RQT_DELETE, tablespace, key, '')
def scan_frame(frame):
"""
Scans a Vanad server reply frame, and asserts if this could be a frame.
If this cannot be a valid frame, it will raise an exception of
undefined type and arguments.
Will return values if this is a valid frame
@return: tuple (int resultcode, bytearray data)
"""
# Unzip the header. Will throw if not sufficient bytes there
resultcode, data_len = unpack('!BL', str(frame[:5]))
# Check if frame is OK with length, if not - throw Exception
if len(frame) != 5 + data_len: raise Exception
# Extract data and rest of the data
data = frame[5:5+data_len]
return resultcode, data
class VanadConnection(object):
"""
Class that represents a connection to a Vanad database
Will autoreconnect upon detecting socket lossage and repeat the query, as needed
Will behave smoothly even if user orders a query in the middle of database's
restart.
Will connect only if there'a need to do so.
If database is reliably down for longer periods of time, this WILL HANG!
"""
def __init__(self, address, connect_timeout=4, txrx_timeout=4, eo_timeout=8):
"""
Connect to a remote database.
@type address: tuple of (str address, int port)
@param address: SOCK_STREAM-compatible address of target database
@type connect_timeout: int
@param connect_timeout: timeout in seconds that will be used during
connecting to database
@type txrx_timeout: int
@param txrx_timeout: timeout for send/recv operations
@type eo_timeout: dont-care
@param eo_timeout: supported for legacy applications. dont-care.
"""
self.lock = Lock()
self.connect_timeout = connect_timeout
self.txrx_timeout = txrx_timeout
self.remote_address = address
self.connected = False
self.last_activity = 0 # an int with time() of last activity
self.socket = None # a socket.socket object will be here
self.default_tablespace = 0 # default tablespace
def __shut_sock(self):
try:
self.socket.close()
except:
pass
self.socket = None
self.connected = False
def __ensure_connected(self, force_reconnect=False):
"""PRIVATE METHOD.
Ensured that connection to database is on.
If it isn't, it will make it so.
If it can't be done, it will hang."""
if time() - self.last_activity > 3: # Connection down
self.__shut_sock()
while (not self.connected) or force_reconnect: # Assure that you are connected
# we don't close our sockets here, because closing a socket might take a while
# we just plainly discard it. Mail me if you got a better idea.
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(self.connect_timeout)
try:
self.socket.connect(self.remote_address)
except: # timeout or active denial
try:
self.socket.close()
except:
pass
self.socket = None
else:
self.connected = True
self.last_activity = time()
self.socket.settimeout(self.txrx_timeout)
def set_default_tablespace(self, id):
"""
Sets a new tablespace as default one
@type id: int in (0..255)
@param id: number of new default tablespace
"""
self.default_tablespace = id
def __transact(self, to_send):
"""
Transacts with the database. Will return value that got returned.
Will raise exception if it could not be completed, and should be retried.
"""
# Send now
self.socket.sendall(to_send)
# Now, wait for reception
recvdata = bytearray()
while True:
k = self.socket.recv(1024)
if len(k) == 0: raise Exception # server closed connection
recvdata.extend(k)
try:
result, value = scan_frame(recvdata)
except: # Frame not ready yet
pass
else: # Frame completed
break
self.last_activity = time() # Note the activity
if result == 0x01: return None # Not found for GET's
if len(value) == 0: return None # None and empty string have same meaning
return value
def get(self, key, tablespace=None):
"""
Fetches a record from database.
@type key: str
@param key: Key to fetch with
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to fetch from. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
f = self.__transact(GET_to_bytes(tablespace, key))
self.lock.release()
return f
except:
self.__ensure_connected(force_reconnect=True)
def assign(self, key, value, tablespace=None):
"""
Writes a record to database
@type key: str
@param key: Key to write
@type value: str
@param value: Value to write
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to write to. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
self.__transact(ASSIGN_to_bytes(tablespace, key, value))
self.lock.release()
return
except:
self.__ensure_connected(force_reconnect=True)
def delete(self, key, tablespace=None):
"""
Deletes a record from database
@type key: str
@param key: Key to delete
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to write to. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
self.__transact(DELETE_to_bytes(tablespace, key))
self.lock.release()
return
except:
self.__ensure_connected(force_reconnect=True)
|
gpl-3.0
| -2,702,130,170,785,214,500
| 31.489177
| 90
| 0.582678
| false
| 4.320668
| false
| false
| false
|
fboender/miniorganizer
|
src/lib/kiwi/db/sqlalch.py
|
1
|
6532
|
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Ali Afshar <aafshar@gmail.com>
## Johan Dahlin <jdahlin@async.com.br>
##
"""
SQLAlchemy integration for Kiwi
"""
from sqlalchemy import and_, or_
from kiwi.db.query import NumberQueryState, StringQueryState, \
DateQueryState, DateIntervalQueryState, QueryExecuter, \
NumberIntervalQueryState
from kiwi.interfaces import ISearchFilter
class SQLAlchemyQueryExecuter(QueryExecuter):
def __init__(self, session):
QueryExecuter.__init__(self)
self.session = session
self.table = None
self._query_callbacks = []
self._filter_query_callbacks = {}
self._query = self._default_query
self._full_text_indexes = {}
#
# Public API
#
def set_table(self, table):
"""
Sets the SQLObject table/object for this executer
@param table: a SQLObject subclass
"""
self.table = table
def add_query_callback(self, callback):
"""
Adds a generic query callback
@param callback: a callable
"""
if not callable(callback):
raise TypeError
self._query_callbacks.append(callback)
def add_filter_query_callback(self, search_filter, callback):
"""
Adds a query callback for the filter search_filter
@param search_filter: a search filter
@param callback: a callable
"""
if not ISearchFilter.providedBy(search_filter):
raise TypeError
if not callable(callback):
raise TypeError
l = self._filter_query_callbacks.setdefault(search_filter, [])
l.append(callback)
def set_query(self, callback):
"""
Overrides the default query mechanism.
@param callback: a callable which till take two arguments:
(query, connection)
"""
if callback is None:
callback = self._default_query
elif not callable(callback):
raise TypeError
self._query = callback
#
# QueryBuilder
#
def search(self, states):
"""
Execute a search.
@param states:
"""
if self.table is None:
raise ValueError("table cannot be None")
table = self.table
queries = []
for state in states:
search_filter = state.filter
assert state.filter
# Column query
if search_filter in self._columns:
query = self._construct_state_query(
table, state, self._columns[search_filter])
if query:
queries.append(query)
# Custom per filter/state query.
elif search_filter in self._filter_query_callbacks:
for callback in self._filter_query_callbacks[search_filter]:
query = callback(state)
if query:
queries.append(query)
else:
if (self._query == self._default_query and
not self._query_callbacks):
raise ValueError(
"You need to add a search column or a query callback "
"for filter %s" % (search_filter))
for callback in self._query_callbacks:
query = callback(states)
if query:
queries.append(query)
if queries:
query = and_(*queries)
else:
query = None
result = self._query(query)
return result
#
# Private
#
def _default_query(self, query):
return self.session.query(self.table).select(query)
def _construct_state_query(self, table, state, columns):
queries = []
for column in columns:
query = None
table_field = getattr(table.c, column)
if isinstance(state, NumberQueryState):
query = self._parse_number_state(state, table_field)
elif isinstance(state, NumberIntervalQueryState):
query = self._parse_number_interval_state(state, table_field)
elif isinstance(state, StringQueryState):
query = self._parse_string_state(state, table_field)
elif isinstance(state, DateQueryState):
query = self._parse_date_state(state, table_field)
elif isinstance(state, DateIntervalQueryState):
query = self._parse_date_interval_state(state, table_field)
else:
raise NotImplementedError(state.__class__.__name__)
if query:
queries.append(query)
if queries:
return or_(*queries)
def _parse_number_state(self, state, table_field):
if state.value is not None:
return table_field == state.value
def _parse_number_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return and_(*queries)
def _parse_string_state(self, state, table_field):
if state.text is not None:
text = '%%%s%%' % state.text.lower()
return table_field.like(text)
def _parse_date_state(self, state, table_field):
if state.date:
return table_field == state.date
def _parse_date_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return and_(*queries)
|
gpl-3.0
| -430,581,801,005,307,300
| 31.497512
| 78
| 0.58068
| false
| 4.467852
| false
| false
| false
|
celliern/triflow
|
triflow/core/simulation.py
|
1
|
15719
|
#!/usr/bin/env python
# coding=utf8
import inspect
import logging
import pprint
import time
import warnings
from collections import namedtuple
from uuid import uuid1
import pendulum
import streamz
import tqdm
from numpy import isclose
from . import schemes
from ..plugins.container import TriflowContainer
logging.getLogger(__name__).addHandler(logging.NullHandler())
logging = logging.getLogger(__name__)
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
tqdm = tqdm.tqdm_notebook if is_interactive() else tqdm.tqdm
class Timer:
def __init__(self, last, total):
self.last = last
self.total = total
def __repr__(self):
repr = """last: {last}
total: {total}"""
return repr.format(last=(pendulum.now()
.subtract(
seconds=self.last)
.diff()),
total=(pendulum.now()
.subtract(
seconds=self.total)
.diff()))
def null_hook(t, fields, pars):
return fields, pars
PostProcess = namedtuple(
"PostProcess", ["name", "function", "description"])
class Simulation(object):
"""High level container used to run simulation build on triflow Model.
This object is an iterable which will yield every time step until the
parameters 'tmax' is reached if provided.
By default, the solver use a 6th order ROW solver, an implicit method
with integrated time-stepping.
Parameters
----------
model : triflow.Model
Contain finite difference approximation and routine of the dynamical
system
fields : triflow.BaseFields or dict (any mappable)
triflow container or mappable filled with initial conditions
parameters : dict
physical parameters of the simulation
dt : float
time stepping for output. if time_stepping is False, the internal
time stepping will be the same.
t : float, optional, default 0.
initial time
tmax : float, optional, default None
Control the end of the simulation. If None (the default), the com-
putation will continue until interrupted by the user (using Ctrl-C
or a SIGTERM signal).
id : None, optional
Name of the simulation. A 2 word slug will be generated if not
provided.
hook : callable, optional, default null_hook.
Any callable taking the actual time, fields and parameters and
return modified fields and parameters.
Will be called every internal time step and can be used to include
time dependent or conditionnal parameters, boundary conditions...
The default null_hook has no impact on the computation.
scheme : callable, optional, default triflow.schemes.RODASPR
An callable object which take the simulation state and return
the next step.
Its signature is scheme.__call__(fields, t, dt, pars, hook)
and it should return the next time and the updated fields.
It take the model and extra positional and named arguments.
time_stepping : boolean, default True
Indicate if the time step is controlled by an algorithm dependant of
the temporal scheme (see the doc on time stepping for extra info).
**kwargs
extra arguments passed to the scheme.
Attributes
----------
dt : float
output time step
fields : triflow.Fields
triflow container filled with actual data
i : int
actual iteration
id : str
name of the simulation
model : triflow.Model
triflow Model used in the simulation
parameters : dict
physical parameters of the simulation
status : str
status of the simulation, one of the following one:
('created', 'running', 'finished', 'failed')
t : float
actual time
tmax : float or None, default None
stopping time of the simulation. Not stopping if set to None.
Properties
----------
post_processes: list of triflow.core.simulation.PostProcess
contain all the post processing function attached to the simulation.
container: triflow.TriflowContainer
give access to the attached container, if any.
timer: triflow.core.simulation.Timer
return the cpu time of the previous step and the total running time of
the simulation.
stream: streamz.Stream
Streamz starting point, fed by the simulation state after each
time_step. This interface is used for post-processing, saving the data
on disk by the TriflowContainer and display the fields in real-time.
Examples
--------
>>> import numpy as np
>>> import triflow
>>> model = triflow.Model(["k1 * dxxU",
... "k2 * dxxV"],
... ["U", "V"],
... ["k1", "k2"])
>>> x = np.linspace(0, 100, 1000, endpoint=False)
>>> U = np.cos(x * 2 * np.pi / 100)
>>> V = np.sin(x * 2 * np.pi / 100)
>>> fields = model.fields_template(x=x, U=U, V=V)
>>> pars = {'k1': 1, 'k2': 1, 'periodic': True}
>>> simulation = triflow.Simulation(model, fields, pars, dt=5., tmax=50.)
>>> for t, fields in simulation:
... pass
>>> print(t)
50.0
""" # noqa
def __init__(self, model, fields, parameters, dt, t=0, tmax=None,
id=None, hook=null_hook,
scheme=schemes.RODASPR,
time_stepping=True, **kwargs):
def intersection_kwargs(kwargs, function):
"""Inspect the function signature to identify the relevant keys
in a dictionary of named parameters.
"""
func_signature = inspect.signature(function)
func_parameters = func_signature.parameters
kwargs = {key: value
for key, value
in kwargs.items() if key in func_parameters}
return kwargs
kwargs["time_stepping"] = time_stepping
self.id = str(uuid1())[:6] if not id else id
self.model = model
self.parameters = parameters
self.fields = model.fields_template(**fields)
self.t = t
self.user_dt = self.dt = dt
self.tmax = tmax
self.i = 0
self._stream = streamz.Stream()
self._pprocesses = []
self._scheme = scheme(model,
**intersection_kwargs(kwargs,
scheme.__init__))
if (time_stepping and
self._scheme not in [schemes.RODASPR,
schemes.ROS3PRL,
schemes.ROS3PRw]):
self._scheme = schemes.time_stepping(
self._scheme,
**intersection_kwargs(kwargs,
schemes.time_stepping))
self.status = 'created'
self._total_running = 0
self._last_running = 0
self._created_timestamp = pendulum.now()
self._started_timestamp = None
self._last_timestamp = None
self._actual_timestamp = pendulum.now()
self._hook = hook
self._container = None
self._iterator = self.compute()
def _compute_one_step(self, t, fields, pars):
"""
Compute one step of the simulation, then update the timers.
"""
fields, pars = self._hook(t, fields, pars)
self.dt = (self.tmax - t
if self.tmax and (t + self.dt >= self.tmax)
else self.dt)
before_compute = time.process_time()
t, fields = self._scheme(t, fields, self.dt,
pars, hook=self._hook)
after_compute = time.process_time()
self._last_running = after_compute - before_compute
self._total_running += self._last_running
self._last_timestamp = self._actual_timestamp
self._actual_timestamp = pendulum.now()
return t, fields, pars
def compute(self):
"""Generator which yield the actual state of the system every dt.
Yields
------
tuple : t, fields
Actual time and updated fields container.
"""
fields = self.fields
t = self.t
pars = self.parameters
self._started_timestamp = pendulum.now()
self.stream.emit(self)
try:
while True:
t, fields, pars = self._compute_one_step(t, fields, pars)
self.i += 1
self.t = t
self.fields = fields
self.parameters = pars
for pprocess in self.post_processes:
pprocess.function(self)
self.stream.emit(self)
yield self.t, self.fields
if self.tmax and (isclose(self.t, self.tmax)):
self._end_simulation()
return
except RuntimeError:
self.status = 'failed'
raise
def _end_simulation(self):
if self.container:
self.container.flush()
self.container.merge()
def run(self, progress=True, verbose=False):
"""Compute all steps of the simulation. Be careful: if tmax is not set,
this function will result in an infinit loop.
Returns
-------
(t, fields):
last time and result fields.
"""
total_iter = int((self.tmax // self.user_dt) if self.tmax else None)
log = logging.info if verbose else logging.debug
if progress:
with tqdm(initial=(self.i if self.i < total_iter else total_iter),
total=total_iter) as pbar:
for t, fields in self:
pbar.update(1)
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
for t, fields in self:
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
def __repr__(self):
repr = """{simulation_name:=^30}
created: {created_date}
started: {started_date}
last: {last_date}
time: {t:g}
iteration: {iter:g}
last step: {step_time}
total time: {running_time}
Physical parameters
-------------------
{parameters}
Hook function
-------------
{hook_source}
=========== Model ===========
{model_repr}"""
repr = repr.format(simulation_name=" %s " % self.id,
parameters="\n\t".join(
[("%s:" % key).ljust(12) +
pprint.pformat(value)
for key, value
in self.parameters.items()]),
t=self.t,
iter=self.i,
model_repr=self.model,
hook_source=inspect.getsource(self._hook),
step_time=(None if not self._last_running else
pendulum.now()
.subtract(
seconds=self._last_running)
.diff()),
running_time=(pendulum.now()
.subtract(
seconds=self._total_running)
.diff()),
created_date=(self._created_timestamp
.to_cookie_string()),
started_date=(self._started_timestamp
.to_cookie_string()
if self._started_timestamp
else "None"),
last_date=(self._last_timestamp
.to_cookie_string()
if self._last_timestamp
else "None"))
return repr
def attach_container(self, path=None, save="all",
mode="w", nbuffer=50, force=False):
"""add a Container to the simulation which allows some
persistance to the simulation.
Parameters
----------
path : str or None (default: None)
path for the container. If None (the default), the data lives only
in memory (and are available with `simulation.container`)
mode : str, optional
"a" or "w" (default "w")
save : str, optional
"all" will save every time-step,
"last" will only get the last time step
nbuffer : int, optional
wait until nbuffer data in the Queue before save on disk.
timeout : int, optional
wait until timeout since last flush before save on disk.
force : bool, optional (default False)
if True, remove the target folder if not empty. if False, raise an
error.
"""
self._container = TriflowContainer("%s/%s" % (path, self.id)
if path else None,
save=save,
mode=mode, metadata=self.parameters,
force=force, nbuffer=nbuffer)
self._container.connect(self.stream)
return self._container
@property
def post_processes(self):
return self._pprocesses
@property
def stream(self):
return self._stream
@property
def container(self):
return self._container
@property
def timer(self):
return Timer(self._last_running, self._total_running)
def add_post_process(self, name, post_process, description=""):
"""add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
"""
self._pprocesses.append(PostProcess(name=name,
function=post_process,
description=description))
self._pprocesses[-1].function(self)
def remove_post_process(self, name):
"""remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
"""
self._pprocesses = [post_process
for post_process in self._pprocesses
if post_process.name != name]
def __iter__(self):
return self.compute()
def __next__(self):
return next(self._iterator)
|
gpl-3.0
| 905,709,783,713,381,900
| 34.888128
| 79
| 0.531204
| false
| 4.69364
| false
| false
| false
|
gencer/sentry
|
src/sentry/api/endpoints/group_events.py
|
1
|
2698
|
from __future__ import absolute_import
import six
from sentry import tagstore
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers import serialize
from sentry.api.paginator import DateTimePaginator
from sentry.models import Environment, Event, Group
from sentry.search.utils import parse_query
from sentry.utils.apidocs import scenario, attach_scenarios
from rest_framework.response import Response
from sentry.search.utils import InvalidQuery
from django.db.models import Q
@scenario('ListAvailableSamples')
def list_available_samples_scenario(runner):
group = Group.objects.filter(project=runner.default_project).first()
runner.request(method='GET', path='/issues/%s/events/' % group.id)
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
@attach_scenarios([list_available_samples_scenario])
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
events = Event.objects.filter(
group_id=group.id,
)
query = request.GET.get('query')
if query:
try:
query_kwargs = parse_query(group.project, query, request.user)
except InvalidQuery as exc:
return Response({'detail': six.text_type(exc)}, status=400)
if query_kwargs['query']:
q = Q(message__icontains=query_kwargs['query'])
if len(query) == 32:
q |= Q(event_id__exact=query_kwargs['query'])
events = events.filter(q)
if query_kwargs['tags']:
try:
environment_id = self._get_environment_id_from_request(
request, group.project.organization_id)
except Environment.DoesNotExist:
event_ids = []
else:
event_ids = tagstore.get_group_event_ids(
group.project_id, group.id, environment_id, query_kwargs['tags'])
if event_ids:
events = events.filter(
id__in=event_ids,
)
else:
events = events.none()
return self.paginate(
request=request,
queryset=events,
order_by='-datetime',
on_results=lambda x: serialize(x, request.user),
paginator_cls=DateTimePaginator,
)
|
bsd-3-clause
| 6,260,924,192,562,631,000
| 31.902439
| 89
| 0.588213
| false
| 4.504174
| false
| false
| false
|
derekjchow/models
|
research/object_detection/core/target_assigner.py
|
1
|
29856
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder as bcoder
from object_detection.core import box_list
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import shape_utils
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder, bcoder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder,
negative_class_weight=negative_class_weight)
def batch_assign_targets(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.to_float(positive_anchors)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background * (1 - tf.to_float(negative_mask)))
cls_weights_without_background = (
(1 - implicit_class_weight) * tf.to_float(explicit_example_mask)
+ implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
|
apache-2.0
| -8,006,630,844,042,189,000
| 45.796238
| 80
| 0.675744
| false
| 4.046077
| false
| false
| false
|
caihaibin/Blog
|
handlers/blog.py
|
1
|
6084
|
import datetime
import config
import PyRSS2Gen
from google.appengine.ext import webapp
from models import blog
import view
class IndexHandler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.filter('publish =', True)
query.order('-pub_date')
template_values = {'page_title': 'Home',
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/index.html', template_values)
class PostHandler(webapp.RequestHandler):
def get(self, year, month, day, slug):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for the given slug
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to check for slug uniqueness in the specified time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.filter('slug = ', slug)
post = query.get()
if post == None:
page = view.Page()
page.render_error(self, 404)
else:
template_values = {
'post': post,
}
page = view.Page()
page.render(self, 'templates/post.html', template_values)
class TagHandler(webapp.RequestHandler):
def get(self, tag):
query = blog.Post.all()
query.filter('publish =', True)
query.filter('tags = ', tag)
query.order('-pub_date')
template_values = {'page_title': 'Posts tagged "%s"' % (tag),
'page_description': 'Posts tagged "%s"' % (tag),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class YearHandler(webapp.RequestHandler):
def get(self, year):
year = int(year)
# Build the time span to check for posts
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year + 1, 1, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
template_values = {'page_title': 'Yearly Post Archive: %d' % (year),
'page_description': 'Yearly Post Archive: %d' % (year),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class MonthHandler(webapp.RequestHandler):
def get(self, year, month):
year = int(year)
month = int(month)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, 1)
end_year = year if month < 12 else year + 1
end_month = month + 1 if month < 12 else 1
end_date = datetime.datetime(end_year, end_month, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
month_text = start_date.strftime('%B %Y')
template_values = {'page_title': 'Monthly Post Archive: %s' % (month_text),
'page_description': 'Monthly Post Archive: %s' % (month_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class DayHandler(webapp.RequestHandler):
def get(self, year, month, day):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
day_text = start_date.strftime('%x')
template_values = {'page_title': 'Daily Post Archive: %s' % (day_text),
'page_description': 'Daily Post Archive: %s' % (day_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class RSS2Handler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.filter('publish =', True)
query.order('-pub_date')
posts = query.fetch(10)
rss_items = []
for post in posts:
item = PyRSS2Gen.RSSItem(title=post.title,
link="%s%s" % (config.SETTINGS['url'], post.get_absolute_url()),
description=post.excerpt_html or post.body_html,
guid=PyRSS2Gen.Guid("%s%s" % (config.SETTINGS['url'], post.get_absolute_url())),
pubDate=post.pub_date
)
rss_items.append(item)
rss = PyRSS2Gen.RSS2(title=config.SETTINGS['title'],
link=config.SETTINGS['url'],
description=config.SETTINGS['description'],
lastBuildDate=datetime.datetime.now(),
items=rss_items
)
rss_xml = rss.to_xml()
self.response.headers['Content-Type'] = 'application/rss+xml'
self.response.out.write(rss_xml)
|
mit
| -8,702,056,349,484,685,000
| 34.372093
| 117
| 0.545529
| false
| 4.007905
| true
| false
| false
|
zoho/books-python-wrappers
|
books/service/ZohoBooks.py
|
1
|
7982
|
#$Id$#
from books.api.ContactsApi import ContactsApi
from books.api.ContactPersonsApi import ContactPersonsApi
from books.api.EstimatesApi import EstimatesApi
from books.api.InvoicesApi import InvoicesApi
from books.api.RecurringInvoicesApi import RecurringInvoicesApi
from books.api.CreditNotesApi import CreditNotesApi
from books.api.CustomerPaymentsApi import CustomerPaymentsApi
from books.api.ExpensesApi import ExpensesApi
from books.api.RecurringExpensesApi import RecurringExpensesApi
from books.api.BillsApi import BillsApi
from books.api.VendorPaymentsApi import VendorPaymentsApi
from books.api.BankAccountsApi import BankAccountsApi
from books.api.BankTransactionsApi import BankTransactionsApi
from books.api.BankRulesApi import BankRulesApi
from books.api.ChartOfAccountsApi import ChartOfAccountsApi
from books.api.JournalsApi import JournalsApi
from books.api.BaseCurrencyAdjustmentApi import BaseCurrencyAdjustmentApi
from books.api.ProjectsApi import ProjectsApi
from books.api.SettingsApi import SettingsApi
from books.api.ItemsApi import ItemsApi
from books.api.OrganizationsApi import OrganizationsApi
from books.api.UsersApi import UsersApi
class ZohoBooks:
"""
This class is used to create an object for books service and to provide instance for all APIs.
"""
def __init__(self, authtoken, organization_id):
"""Initialize the parameters for Zoho books.
Args:
authtoken(str): User's Authtoken.
organization_id(str): User's Organization id.
"""
self.authtoken=authtoken
self.organization_id=organization_id
def get_contacts_api(self):
"""Get instance for contacts api.
Returns:
instance: Contacts api instance.
"""
contacts_api = ContactsApi(self.authtoken, self.organization_id)
return contacts_api
def get_contact_persons_api(self):
"""Get instance for contact persons api.
Returns:
instance: Contact persons api.
"""
contact_persons_api = ContactPersonsApi(self.authtoken,
self.organization_id)
return contact_persons_api
def get_estimates_api(self):
"""Get instance for estimates api.
Returns:
instance: Estimates api.
"""
estimates_api = EstimatesApi(self.authtoken, self.organization_id)
return estimates_api
def get_invoices_api(self):
"""Get instance for invoice api.
Returns:
instance: Invoice api.
"""
invoices_api = InvoicesApi(self.authtoken, self.organization_id)
return invoices_api
def get_recurring_invoices_api(self):
"""Get instance for recurring invoices api.
Returns:
instance: Recurring invoice api.
"""
recurring_invoices_api = RecurringInvoicesApi(self.authtoken, \
self.organization_id)
return recurring_invoices_api
def get_creditnotes_api(self):
"""Get instance for creditnotes api.
Returns:
instance: Creditnotes api.
"""
creditnotes_api = CreditNotesApi(self.authtoken, self.organization_id)
return creditnotes_api
def get_customer_payments_api(self):
"""Get instance for customer payments api.
Returns:
instance: Customer payments api.
"""
customer_payments_api = CustomerPaymentsApi(self.authtoken,
self.organization_id)
return customer_payments_api
def get_expenses_api(self):
"""Get instance for expenses api.
Returns:
instance: Expenses api.
"""
expenses_api = ExpensesApi(self.authtoken, self.organization_id)
return expenses_api
def get_recurring_expenses_api(self):
"""Get instance for recurring expenses api.
Returns:
instance: Recurring expenses api.
"""
recurring_expenses_api = RecurringExpensesApi(self.authtoken,
self.organization_id)
return recurring_expenses_api
def get_bills_api(self):
"""Get instance for bills api.
Returns:
instance: Bills api
"""
bills_api = BillsApi(self.authtoken, self.organization_id)
return bills_api
def get_vendor_payments_api(self):
"""Get instance for vendor payments api.
Returns:
instance: vendor payments api
"""
vendor_payments_api = VendorPaymentsApi(self.authtoken,
self.organization_id)
return vendor_payments_api
def get_bank_accounts_api(self):
"""Get instancce for bank accounts api.
Returns:
instance: Bank accounts api.
"""
bank_accounts_api = BankAccountsApi(self.authtoken,
self.organization_id)
return bank_accounts_api
def get_bank_transactions_api(self):
"""Get instance for bank transactions api.
Returns:
instance: Bank Transactions api.
"""
bank_transactions_api = BankTransactionsApi(self.authtoken,
self.organization_id)
return bank_transactions_api
def get_bank_rules_api(self):
"""Get instance for bank rules api.
Returns:
instance: Bank rules api.
"""
bank_rules_api = BankRulesApi(self.authtoken, self.organization_id)
return bank_rules_api
def get_chart_of_accounts_api(self):
"""Get instancce for chart of accounts api
Returns:
instance: Chart of accounts api.
"""
chart_of_accounts_api = ChartOfAccountsApi(self.authtoken,
self.organization_id)
return chart_of_accounts_api
def get_journals_api(self):
"""Get instance for journals api.
Returns:
instance: Journals api.
"""
journals_api = JournalsApi(self.authtoken, self.organization_id)
return journals_api
def get_base_currency_adjustment_api(self):
"""Get instance for base currency adjustment api
Returns:
instance: Base currency adjustments api.
"""
base_currency_adjustment_api = BaseCurrencyAdjustmentApi(\
self.authtoken, self.organization_id)
return base_currency_adjustment_api
def get_projects_api(self):
"""Get instance for projects api.
Returns:
instance: Projects api.
"""
projects_api = ProjectsApi(self.authtoken, self.organization_id)
return projects_api
def get_settings_api(self):
"""Get instance for settings api.
Returns:
instance: Settings api.
"""
settings_api = SettingsApi(self.authtoken, self.organization_id)
return settings_api
def get_items_api(self):
"""Get instance for items api.
Returns:
instance: Items api.
"""
items_api = ItemsApi(self.authtoken, self.organization_id)
return items_api
def get_users_api(self):
"""Get instance for users api.
Returns:
instance: Users api.
"""
users_api = UsersApi(self.authtoken, self.organization_id)
return users_api
def get_organizations_api(self):
"""Get instance for organizations api.
Returns:
instance: Organizations api.
"""
organizations_api = OrganizationsApi(self.authtoken, self.organization_id)
return organizations_api
|
mit
| -840,055,033,120,695,400
| 28.562963
| 98
| 0.609997
| false
| 4.439377
| false
| false
| false
|
philtgun/horse
|
horse-welcome.py
|
1
|
1475
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
import requests
def greeting():
"Plays greeting audio via REST API"
r = requests.get("http://localhost:3000/playAudio/welcome.mp3");
def decCount(i):
if i > 0:
i -= 1
T_POLL = 0.5 # sec
T_INSIDE_ACTIVE = 20 # sec
T_WELCOME_DELAY = 2 # sec
T_WELCOME_COOLDOWN = 60 # 1 min
T_DOOR_INACTIVE = 300 # 5 min
PIN_PIR = 15
PIN_DOOR = 14
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_PIR, GPIO.IN)
GPIO.setup(PIN_DOOR, GPIO.IN)
GPIO.setup(PIN_DOOR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
isInBefore = False # nobody
doorOpenBefore = False
countIn = 0
countDoor = 0
print "Starting"
while True:
doorOpen = GPIO.input(PIN_DOOR)
isIn = GPIO.input(PIN_PIR)
print "[DEBUG] doorOpen: {}, isIn: {}, countIn: {}, countDoor {}".format(doorOpen, isIn, countIn, countDoor)
if doorOpen and not doorOpenBefore:
print "Somebody's opened the door"
countDoor = T_INSIDE_ACTIVE / T_POLL
if isIn and not isInBefore:
if countDoor > 0 and countIn == 0:
print "Hello!"
time.sleep(T_WELCOME_DELAY)
greeting()
time.sleep(T_WELCOME_COOLDOWN)
countOut = 0
# countIn = 0 # probably redundant
else:
print "Somebody's leaving! No hello for {} min".format(T_DOOR_INACTIVE / 60)
countIn = T_DOOR_INACTIVE / T_POLL
time.sleep(T_POLL)
isInBefore = isIn
doorOpenBefore = doorOpen
countDoor -= 1 if countDoor > 0 else 0
countIn -= 1 if countIn > 0 else 0
|
mit
| 4,151,467,977,349,972,500
| 21.348485
| 110
| 0.661695
| false
| 2.711397
| false
| false
| false
|
jennywoites/MUSSA
|
MUSSA_Flask/app/API_Rest/GeneradorPlanCarreras/GeneradorPLE/OptimizadorCodigoPulp.py
|
1
|
4966
|
from app.API_Rest.GeneradorPlanCarreras.Constantes import *
MENOR_IGUAL = 0
MAYOR_IGUAL = 1
def obtener_variables_candidatas(parametros):
variables_candidatas = {}
with open(parametros.nombre_archivo_pulp, 'r') as arch:
for linea in arch:
linea = linea.rstrip('\n')
ecuacion = linea.split("prob += (")
if len(ecuacion) < 2: #No es la linea buscada
continue
menor_igual = ecuacion[1].split(" <= 0")
mayor_igual = ecuacion[1].split(" >= 0")
variable_actual = menor_igual[0] #o mayor igual, es equivalente
acumulados = variables_candidatas.get(variable_actual, [0,0])
acumulados[MENOR_IGUAL] += 1 if len(menor_igual) == 2 else 0
acumulados[MAYOR_IGUAL] += 1 if len(menor_igual) == 2 else 0
variables_candidatas[variable_actual] = acumulados
return variables_candidatas
def obtener_variables_a_eliminar(parametros):
variables_candidatas = obtener_variables_candidatas(parametros)
variables_a_eliminar = []
for candidata in variables_candidatas:
multiples_variables = candidata.split()
if len(multiples_variables) > 1: #Solo me sirve si es una unica variable
continue
acumulados = variables_candidatas[candidata]
if acumulados[MENOR_IGUAL] == acumulados[MAYOR_IGUAL] == 1:
variables_a_eliminar.append(candidata)
return variables_a_eliminar
def reemplazar_todas_las_apariciones(texto, valor_a_reeemplazar, nuevo_valor):
anterior = ""
while (anterior != texto):
anterior = texto
texto = texto.replace(valor_a_reeemplazar, nuevo_valor)
return texto
def define_variable_mayor_a_cero(linea):
inicio = "prob += ("
final = " >= 0)"
linea_aux = linea[len(inicio):]
if final in linea_aux:
linea_aux = linea_aux.replace(final, "")
variable_mayor_a_cero = linea_aux.split()
if len(variable_mayor_a_cero) == 1:
return True
return False
def define_variable_menor_a_infinito(linea):
inicio = "prob += ("
final = " <= 0 + (1 - 0) * {})".format(INFINITO)
linea_aux = linea[len(inicio):]
if final in linea_aux:
linea_aux = linea_aux.replace(final, "")
variable_menor_a_infinito = linea_aux.split()
if len(variable_menor_a_infinito) == 1:
return True
return False
def reemplazar_productos_franjas_por_cero(parametros, linea):
for franja in range(parametros.franja_minima, parametros.franja_maxima +1):
producto = " {} * 0".format(franja)
linea = reemplazar_todas_las_apariciones(linea, producto, " 0")
return linea
def limpiar_linea(parametros, linea, variables_a_eliminar):
for variable in variables_a_eliminar:
if variable not in linea:
continue
if "LpVariable" in linea:
return "" #La linea no se escribe, no es necesario revisar las demas variables
if "arch.write" in linea:
return """ arch.write("{};0" + '\\n')\n""".format(variable)
linea = reemplazar_todas_las_apariciones(linea, variable, "0")
linea = reemplazar_apariciones_suma_cero(linea)
linea = reemplazar_productos_franjas_por_cero(parametros, linea)
linea = reemplazar_apariciones_suma_cero(linea)
return linea
def reemplazar_apariciones_suma_cero(linea):
linea = reemplazar_todas_las_apariciones(linea, "+ 0 ", "")
linea = reemplazar_todas_las_apariciones(linea, "- 0 ", "")
linea = reemplazar_todas_las_apariciones(linea, " 0 + 0 ", "0")
linea = reemplazar_todas_las_apariciones(linea, "(0 + 0)", "0")
linea = reemplazar_todas_las_apariciones(linea, " 0 + 0)", "0)")
return linea
def limpiar_archivo(parametros, variables_a_eliminar, arch, arch_optimizado):
for linea in arch:
linea = limpiar_linea(parametros, linea, variables_a_eliminar)
if not linea:
continue
if linea == "prob += (0 <= 0)\n" or linea == "prob += (0 >= 0)\n":
continue #Es una tautologia, no hace falta escribirla
if define_variable_mayor_a_cero(linea):
continue #Todas las variables de este problema son mayores o iguales que 0
if define_variable_menor_a_infinito(linea):
continue #Todas las variables son menores a infinito, es una ecuacion anulable
arch_optimizado.write(linea)
def guardar_archivo_optimizado(parametros, variables_a_eliminar):
with open(parametros.nombre_archivo_pulp, 'r') as arch:
with open(parametros.nombre_archivo_pulp_optimizado, 'w') as arch_optimizado:
limpiar_archivo(parametros, variables_a_eliminar, arch, arch_optimizado)
def optimizar_codigo_pulp(parametros):
variables_a_eliminar = obtener_variables_a_eliminar(parametros)
guardar_archivo_optimizado(parametros, variables_a_eliminar)
|
gpl-3.0
| -6,937,123,647,893,344,000
| 32.106667
| 90
| 0.638743
| false
| 2.88051
| false
| false
| false
|
platformio/platformio-core
|
platformio/debug/config/blackmagic.py
|
1
|
1286
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.debug.config.base import DebugConfigBase
class BlackmagicDebugConfig(DebugConfigBase):
GDB_INIT_SCRIPT = """
define pio_reset_halt_target
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
end
define pio_reset_run_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
monitor swdp_scan
attach 1
set mem inaccessible-by-default off
$LOAD_CMDS
$INIT_BREAK
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
"""
|
apache-2.0
| 6,271,656,748,231,535,000
| 25.244898
| 74
| 0.734059
| false
| 3.247475
| false
| false
| false
|
ntt-sic/heat
|
heat/engine/resources/route_table.py
|
1
|
6007
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import clients
from heat.openstack.common import log as logging
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.neutron import neutron
from heat.engine.resources.vpc import VPC
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
logger = logging.getLogger(__name__)
class RouteTable(resource.Resource):
PROPERTIES = (
VPC_ID, TAGS,
) = (
'VpcId', 'Tags',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
properties_schema = {
VPC_ID: properties.Schema(
properties.Schema.STRING,
_('VPC ID for where the route table is created.'),
required=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
_('List of tags to be attached to this resource.'),
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
implemented=False,
)
),
}
def handle_create(self):
client = self.neutron()
props = {'name': self.physical_resource_name()}
router = client.create_router({'router': props})['router']
self.resource_id_set(router['id'])
def check_create_complete(self, *args):
client = self.neutron()
attributes = client.show_router(
self.resource_id)['router']
if not neutron.NeutronResource.is_built(attributes):
return False
network_id = self.properties.get(self.VPC_ID)
default_router = VPC.router_for_vpc(client, network_id)
if default_router and default_router.get('external_gateway_info'):
# the default router for the VPC is connected
# to the external router, so do it for this too.
external_network_id = default_router[
'external_gateway_info']['network_id']
client.add_gateway_router(self.resource_id, {
'network_id': external_network_id})
return True
def handle_delete(self):
client = self.neutron()
router_id = self.resource_id
try:
client.delete_router(router_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
# just in case this router has been added to a gateway, remove it
try:
client.remove_gateway_router(router_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
class SubnetRouteTableAssociation(resource.Resource):
PROPERTIES = (
ROUTE_TABLE_ID, SUBNET_ID,
) = (
'RouteTableId', 'SubnetId',
)
properties_schema = {
ROUTE_TABLE_ID: properties.Schema(
properties.Schema.STRING,
_('Route table ID.'),
required=True
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
_('Subnet ID.'),
required=True
),
}
def handle_create(self):
client = self.neutron()
subnet_id = self.properties.get(self.SUBNET_ID)
router_id = self.properties.get(self.ROUTE_TABLE_ID)
#remove the default router association for this subnet.
try:
previous_router = self._router_for_subnet(subnet_id)
if previous_router:
client.remove_interface_router(
previous_router['id'],
{'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
client.add_interface_router(
router_id, {'subnet_id': subnet_id})
def _router_for_subnet(self, subnet_id):
client = self.neutron()
subnet = client.show_subnet(
subnet_id)['subnet']
network_id = subnet['network_id']
return VPC.router_for_vpc(client, network_id)
def handle_delete(self):
client = self.neutron()
subnet_id = self.properties.get(self.SUBNET_ID)
router_id = self.properties.get(self.ROUTE_TABLE_ID)
try:
client.remove_interface_router(router_id, {
'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
# add back the default router
try:
default_router = self._router_for_subnet(subnet_id)
if default_router:
client.add_interface_router(
default_router['id'], {'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'AWS::EC2::RouteTable': RouteTable,
'AWS::EC2::SubnetRouteTableAssociation': SubnetRouteTableAssociation,
}
|
apache-2.0
| -2,251,101,202,301,618,700
| 30.615789
| 78
| 0.575828
| false
| 4.334055
| false
| false
| false
|
otsaloma/gaupol
|
gaupol/agents/open.py
|
1
|
19435
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Opening subtitle files and creating new projects."""
import aeidon
import gaupol
import os
from aeidon.i18n import _
from gi.repository import Gtk
class OpenAgent(aeidon.Delegate):
"""Opening subtitle files and creating new projects."""
@aeidon.deco.export
def add_page(self, page):
"""Add `page` to the application."""
self.pages.append(page)
page.connect("close-request", self._on_page_close_request)
page.project.connect("action-done", self._on_project_action_done)
page.project.connect("action-redone", self._on_project_action_redone)
page.project.connect("action-undone", self._on_project_action_undone)
callback = self._on_tab_widget_button_press_event
page.tab_widget.connect("button-press-event", callback, page)
self.connect_view_signals(page.view)
page.project.clipboard.set_texts(self.clipboard.get_texts())
scroller = Gtk.ScrolledWindow()
policy = Gtk.PolicyType.AUTOMATIC
scroller.set_policy(policy, policy)
scroller.add(page.view)
self.notebook.append_page(scroller, page.tab_widget)
self.notebook.set_tab_reorderable(scroller, True)
self.notebook.child_set_property(scroller, "tab-expand", True)
self.notebook.child_set_property(scroller, "tab-fill", True)
self.notebook.show_all()
self.set_current_page(page)
self.emit("page-added", page)
@aeidon.deco.export
def add_to_recent_files(self, path, format, doc):
"""Add `path` to recent files managed by the recent manager."""
# XXX: The group field is not available for Python,
# we cannot differentiate between main and translation files.
# https://bugzilla.gnome.org/show_bug.cgi?id=695970
uri = aeidon.util.path_to_uri(path)
recent = Gtk.RecentData()
recent.mime_type = format.mime_type
recent.app_name = "gaupol"
recent.app_exec = "gaupol %F"
self.recent_manager.add_full(uri, recent)
@aeidon.deco.export
def append_file(self, path, encoding=None):
"""Append subtitles from file at `path` to the current project."""
encodings = self._get_encodings(encoding)
doc = aeidon.documents.MAIN
temp = self._open_file(path, encodings, doc, check_open=False)
gaupol.util.set_cursor_busy(self.window)
current = self.get_current_page()
offset = current.project.subtitles[-1].end
temp.project.shift_positions(None, offset)
rows = self._append_subtitles(current, temp.project.subtitles)
amount = len(rows)
current.view.set_focus(rows[0], None)
current.view.select_rows(rows)
current.view.scroll_to_row(rows[0])
basename = temp.get_main_basename()
message = _('Appended {amount:d} subtitles from "{basename}"')
self.flash_message(message.format(**locals()))
gaupol.util.set_cursor_normal(self.window)
def _append_subtitles(self, page, subtitles):
"""Append `subtitles` to `page` and return new indices."""
n = len(page.project.subtitles)
indices = list(range(n, n + len(subtitles)))
page.project.block("action-done")
page.project.insert_subtitles(indices, subtitles)
page.project.set_action_description(
aeidon.registers.DO, _("Appending file"))
page.project.unblock("action-done")
return tuple(indices)
def _check_file_exists(self, path):
"""Raise :exc:`gaupol.Default` if no file at `path`."""
gaupol.util.raise_default(not os.path.isfile(path))
def _check_file_not_open(self, path):
"""Raise :exc:`gaupol.Default` if file at `path` already open."""
for page in self.pages:
files = [page.project.main_file, page.project.tran_file]
paths = [x.path for x in files if x]
if not path in paths: continue
self.set_current_page(page)
message = _('File "{}" is already open')
self.flash_message(message.format(os.path.basename(path)))
raise gaupol.Default
def _check_file_size(self, path):
"""Raise :exc:`gaupol.Default` if size of file at `path` too large."""
size_mb = os.stat(path).st_size / 1048576
if size_mb <= 1: return
basename = os.path.basename(path)
self._show_size_warning_dialog(basename, size_mb)
def _check_sort_count(self, path, sort_count):
"""Raise :exc:`gaupol.Default` if `sort_count` too large."""
if sort_count <= 0: return
basename = os.path.basename(path)
self._show_sort_warning_dialog(basename, sort_count)
@aeidon.deco.export
def connect_view_signals(self, view):
"""Connect to signals emitted by `view`."""
view.connect_selection_changed(self._on_view_selection_changed)
view.connect_after("move-cursor", self._on_view_move_cursor)
view.connect("button-press-event", self._on_view_button_press_event)
for column in view.get_columns():
renderer = column.get_cells()[0]
callback = self._on_view_renderer_edited
renderer.connect("edited", callback, column)
callback = self._on_view_renderer_editing_started
renderer.connect("editing-started", callback, column)
callback = self._on_view_renderer_editing_canceled
renderer.connect("editing-canceled", callback, column)
button = column.get_widget().get_ancestor(Gtk.Button)
callback = self._on_view_header_button_press_event
button.connect("button-press-event", callback)
def _get_encodings(self, first=None):
"""Return a sequence of encodings to try when opening files."""
encodings = [first]
if gaupol.conf.encoding.try_locale:
encoding = aeidon.encodings.get_locale_code()
encodings.append(encoding)
encodings += gaupol.conf.encoding.fallback
try_auto = gaupol.conf.encoding.try_auto
if try_auto and aeidon.util.chardet_available():
encodings.append("auto")
encodings = list(filter(None, encodings))
encodings = encodings or ["utf_8"]
return tuple(aeidon.util.get_unique(encodings))
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_append_file_activate(self, *args):
"""Append subtitles from file to the current project."""
gaupol.util.set_cursor_busy(self.window)
dialog = gaupol.AppendDialog(self.window)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
paths = dialog.get_filenames()
encoding = dialog.get_encoding()
dialog.destroy()
if response != Gtk.ResponseType.OK: return
if not paths: return
gaupol.util.iterate_main()
self.append_file(paths[0], encoding)
@aeidon.deco.export
def _on_new_project_activate(self, *args):
"""Create a new project."""
if gaupol.fields.TRAN_TEXT in gaupol.conf.editor.visible_fields:
gaupol.conf.editor.visible_fields.remove(gaupol.fields.TRAN_TEXT)
page = gaupol.Page(next(self.counter))
page.project.insert_subtitles((0,), register=None)
self.add_page(page)
@aeidon.deco.export
def _on_notebook_drag_data_received(self, notebook, context, x, y,
selection_data, info, time):
"""Open main files from dragged URIs."""
uris = selection_data.get_uris()
paths = list(map(aeidon.util.uri_to_path, uris))
videos = list(filter(aeidon.util.is_video_file, paths))
subtitles = list(set(paths) - set(videos))
self.open_main(subtitles)
if self.get_current_page() and len(videos) == 1:
self.load_video(videos[0])
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_open_main_files_activate(self, *args):
"""Open main files."""
doc = aeidon.documents.MAIN
paths, encoding = self._select_files(_("Open"), doc)
self.open_main(paths, encoding)
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_open_translation_file_activate(self, *args):
"""Open a translation file."""
page = self.get_current_page()
if page.project.tran_changed:
self._show_translation_warning_dialog(page)
doc = aeidon.documents.TRAN
paths, encoding = self._select_files(_("Open Translation"), doc)
self.open_translation(paths[0], encoding)
@aeidon.deco.export
def _on_select_video_file_activate(self, *args):
"""Select a video file."""
gaupol.util.set_cursor_busy(self.window)
page = self.get_current_page()
path = page.project.video_path
title = _("Select Video")
label = _("_Select")
dialog = gaupol.VideoDialog(self.window, title, label)
if page.project.main_file is not None:
directory = os.path.dirname(page.project.main_file.path)
dialog.set_current_folder(directory)
if page.project.video_path is not None:
dialog.set_filename(page.project.video_path)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
path = dialog.get_filename()
dialog.destroy()
if response != Gtk.ResponseType.OK: return
page.project.video_path = path
self.update_gui()
@aeidon.deco.export
def _on_split_project_activate(self, *args):
"""Split the current project in two."""
gaupol.util.flash_dialog(gaupol.SplitDialog(self.window, self))
def _open_file(self, path, encodings, doc, check_open=True):
"""Open file at `path` and return corresponding page if successful."""
self._check_file_exists(path)
if check_open:
self._check_file_not_open(path)
self._check_file_size(path)
basename = os.path.basename(path)
page = (gaupol.Page() if doc == aeidon.documents.MAIN
else self.get_current_page())
for encoding in encodings:
with aeidon.util.silent(UnicodeError):
n = self._try_open_file(page, doc, path, encoding)
self._check_sort_count(path, n)
return page
# Report if all codecs failed to decode file.
self._show_encoding_error_dialog(basename)
raise gaupol.Default
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def open_main(self, path, encoding=None):
"""Open file at `path` as a main file."""
if gaupol.fields.TRAN_TEXT in gaupol.conf.editor.visible_fields:
gaupol.conf.editor.visible_fields.remove(gaupol.fields.TRAN_TEXT)
encodings = self._get_encodings(encoding)
gaupol.util.set_cursor_busy(self.window)
for path in aeidon.util.flatten([path]):
try:
# Skip files that are already open,
# but show a status message when that happens.
self._check_file_not_open(path)
except gaupol.Default:
continue
try:
page = self._open_file(path, encodings, aeidon.documents.MAIN)
except gaupol.Default:
gaupol.util.set_cursor_normal(self.window)
raise # gaupol.Default
self.add_page(page)
format = page.project.main_file.format
self.add_to_recent_files(path, format, aeidon.documents.MAIN)
# Refresh view to get row heights etc. correct.
page.view.set_focus(0, page.view.columns.MAIN_TEXT)
gaupol.util.set_cursor_normal(self.window)
self.update_gui()
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def open_translation(self, path, encoding=None, align_method=None):
"""Open file at `path` as a translation file."""
if align_method is not None:
gaupol.conf.file.align_method = align_method
encodings = self._get_encodings(encoding)
page = self._open_file(path, encodings, aeidon.documents.TRAN)
gaupol.util.set_cursor_busy(self.window)
col = page.view.columns.TRAN_TEXT
if not page.view.get_column(col).get_visible():
self.get_column_action(gaupol.fields.TRAN_TEXT).activate()
format = page.project.tran_file.format
self.add_to_recent_files(path, format, aeidon.documents.TRAN)
gaupol.util.set_cursor_normal(self.window)
def _select_files(self, title, doc):
"""Show a :class:`gaupol.OpenDialog` to select files."""
gaupol.util.set_cursor_busy(self.window)
dialog = gaupol.OpenDialog(self.window, title, doc)
page = self.get_current_page()
if page is not None and page.project.main_file is not None:
directory = os.path.dirname(page.project.main_file.path)
dialog.set_current_folder(directory)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
paths = dialog.get_filenames()
encoding = dialog.get_encoding()
dialog.destroy()
gaupol.util.raise_default(response != Gtk.ResponseType.OK)
gaupol.util.iterate_main()
return paths, encoding
def _show_encoding_error_dialog(self, basename):
"""Show an error dialog after failing to decode file."""
title = _('Failed to decode file "{}" with all attempted codecs').format(basename)
message = _("Please try to open the file with a different character encoding.")
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_format_error_dialog(self, basename):
"""Show an error dialog after failing to recognize file format."""
title = _('Failed to recognize format of file "{}"').format(basename)
message = _("Please check that the file you are trying to open is a subtitle file of a format supported by Gaupol.")
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_io_error_dialog(self, basename, message):
"""Show an error dialog after failing to read file."""
title = _('Failed to open file "{}"').format(basename)
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_parse_error_dialog(self, basename, format):
"""Show an error dialog after failing to parse file."""
title = _('Failed to parse file "{}"').format(basename)
message = _("Please check that the file you are trying to open is a valid {} file.").format(format.label)
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_size_warning_dialog(self, basename, size):
"""Show a warning dialog when trying to open a large file."""
title = _('Open abnormally large file "{}"?').format(basename)
message = _("Size of the file is {:.1f} MB, which is abnormally large for a text-based subtitle file. Please, check that you are not trying to open a binary file.").format(size)
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.NO)
dialog.add_button(_("_Open"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.NO)
response = gaupol.util.flash_dialog(dialog)
gaupol.util.raise_default(response != Gtk.ResponseType.YES)
def _show_sort_warning_dialog(self, basename, count):
"""Show a warning dialog when subtitles have been sorted."""
title = _('Open unsorted file "{}"?').format(basename)
message = _("The order of {:d} subtitles needs to be changed. If {:d} sounds like a lot, the file may be erroneously composed.")
message = message.format(count, count)
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.NO)
dialog.add_button(_("_Open"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.YES)
response = gaupol.util.flash_dialog(dialog)
gaupol.util.raise_default(response != Gtk.ResponseType.YES)
def _show_translation_warning_dialog(self, page):
"""Show a warning dialog if opening a new translation file."""
title = _('Save changes to translation document "{}" before opening a new one?').format(page.get_translation_basename())
message = _("If you don't save, changes will be permanently lost.")
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("Open _Without Saving"), Gtk.ResponseType.NO)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
dialog.add_button(_("_Save"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.YES)
response = gaupol.util.flash_dialog(dialog)
if response == Gtk.ResponseType.YES:
return self.save_translation(page)
gaupol.util.raise_default(response != Gtk.ResponseType.NO)
def _try_open_file(self, page, doc, path, encoding, **kwargs):
"""Try to open file at `path` and return subtitle sort count."""
if encoding == "auto":
encoding = aeidon.encodings.detect(path)
if encoding is None: raise UnicodeError
kwargs["align_method"] = gaupol.conf.file.align_method
basename = os.path.basename(path)
try:
return page.project.open(doc, path, encoding, **kwargs)
except aeidon.FormatError:
self._show_format_error_dialog(basename)
except IOError as error:
self._show_io_error_dialog(basename, str(error))
except aeidon.ParseError:
bom_encoding = aeidon.encodings.detect_bom(path)
encoding = bom_encoding or encoding
with aeidon.util.silent(Exception):
format = aeidon.util.detect_format(path, encoding)
self._show_parse_error_dialog(basename, format)
raise gaupol.Default
|
gpl-3.0
| 3,600,033,596,375,179,000
| 45.944444
| 185
| 0.639516
| false
| 3.712512
| false
| false
| false
|
ULHPC/modules
|
easybuild/easybuild-framework/easybuild/tools/filetools.py
|
1
|
38368
|
# #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Set of file tools.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import glob
import os
import re
import shutil
import stat
import time
import urllib2
import zlib
from vsc.utils import fancylogger
import easybuild.tools.environment as env
from easybuild.tools.build_log import EasyBuildError, print_msg # import build_log must stay, to use of EasyBuildLog
from easybuild.tools.config import build_option
from easybuild.tools import run
_log = fancylogger.getLogger('filetools', fname=False)
# easyblock class prefix
EASYBLOCK_CLASS_PREFIX = 'EB_'
# character map for encoding strings
STRING_ENCODING_CHARMAP = {
r' ': "_space_",
r'!': "_exclamation_",
r'"': "_quotation_",
r'#': "_hash_",
r'$': "_dollar_",
r'%': "_percent_",
r'&': "_ampersand_",
r'(': "_leftparen_",
r')': "_rightparen_",
r'*': "_asterisk_",
r'+': "_plus_",
r',': "_comma_",
r'-': "_minus_",
r'.': "_period_",
r'/': "_slash_",
r':': "_colon_",
r';': "_semicolon_",
r'<': "_lessthan_",
r'=': "_equals_",
r'>': "_greaterthan_",
r'?': "_question_",
r'@': "_atsign_",
r'[': "_leftbracket_",
r'\'': "_apostrophe_",
r'\\': "_backslash_",
r']': "_rightbracket_",
r'^': "_circumflex_",
r'_': "_underscore_",
r'`': "_backquote_",
r'{': "_leftcurly_",
r'|': "_verticalbar_",
r'}': "_rightcurly_",
r'~': "_tilde_",
}
try:
# preferred over md5/sha modules, but only available in Python 2.5 and more recent
import hashlib
md5_class = hashlib.md5
sha1_class = hashlib.sha1
except ImportError:
import md5, sha
md5_class = md5.md5
sha1_class = sha.sha
# default checksum for source and patch files
DEFAULT_CHECKSUM = 'md5'
# map of checksum types to checksum functions
CHECKSUM_FUNCTIONS = {
'md5': lambda p: calc_block_checksum(p, md5_class()),
'sha1': lambda p: calc_block_checksum(p, sha1_class()),
'adler32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.adler32)),
'crc32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.crc32)),
'size': lambda p: os.path.getsize(p),
}
class ZlibChecksum(object):
"""
wrapper class for adler32 and crc32 checksums to
match the interface of the hashlib module
"""
def __init__(self, algorithm):
self.algorithm = algorithm
self.checksum = algorithm(r'') # use the same starting point as the module
self.blocksize = 64 # The same as md5/sha1
def update(self, data):
"""Calculates a new checksum using the old one and the new data"""
self.checksum = self.algorithm(data, self.checksum)
def hexdigest(self):
"""Return hex string of the checksum"""
return '0x%s' % (self.checksum & 0xffffffff)
def read_file(path, log_error=True):
"""Read contents of file at given path, in a robust way."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
f = open(path, 'r')
txt = f.read()
f.close()
return txt
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
if log_error:
raise EasyBuildError("Failed to read %s: %s", path, err)
else:
return None
def write_file(path, txt, append=False):
"""Write given contents to file at given path (overwrites current file contents!)."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
mkdir(os.path.dirname(path), parents=True)
if append:
f = open(path, 'a')
else:
f = open(path, 'w')
f.write(txt)
f.close()
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
raise EasyBuildError("Failed to write to %s: %s", path, err)
def remove_file(path):
"""Remove file at specified path."""
try:
if os.path.exists(path):
os.remove(path)
except OSError, err:
raise EasyBuildError("Failed to remove %s: %s", path, err)
def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False):
"""
Given filename fn, try to extract in directory dest
- returns the directory name in case of success
"""
if not os.path.isfile(fn):
raise EasyBuildError("Can't extract file %s: no such file", fn)
mkdir(dest, parents=True)
# use absolute pathnames from now on
abs_dest = os.path.abspath(dest)
# change working directory
try:
_log.debug("Unpacking %s in directory %s.", fn, abs_dest)
os.chdir(abs_dest)
except OSError, err:
raise EasyBuildError("Can't change to directory %s: %s", abs_dest, err)
if not cmd:
cmd = extract_cmd(fn, overwrite=overwrite)
else:
# complete command template with filename
cmd = cmd % fn
if not cmd:
raise EasyBuildError("Can't extract file %s with unknown filetype", fn)
if extra_options:
cmd = "%s %s" % (cmd, extra_options)
run.run_cmd(cmd, simple=True)
return find_base_dir()
def which(cmd):
"""Return (first) path in $PATH for specified command, or None if command is not found."""
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path is command is there, and both readable and executable
if os.access(cmd_path, os.R_OK | os.X_OK):
_log.info("Command %s found at %s" % (cmd, cmd_path))
return cmd_path
_log.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)" % (cmd, paths))
return None
def det_common_path_prefix(paths):
"""Determine common path prefix for a given list of paths."""
if not isinstance(paths, list):
raise EasyBuildError("det_common_path_prefix: argument must be of type list (got %s: %s)", type(paths), paths)
elif not paths:
return None
# initial guess for common prefix
prefix = paths[0]
found_common = False
while not found_common and prefix != os.path.dirname(prefix):
prefix = os.path.dirname(prefix)
found_common = all([p.startswith(prefix) for p in paths])
if found_common:
# prefix may be empty string for relative paths with a non-common prefix
return prefix.rstrip(os.path.sep) or None
else:
return None
def download_file(filename, url, path):
"""Download a file from the given URL, to the specified path."""
_log.debug("Trying to download %s from %s to %s", filename, url, path)
timeout = build_option('download_timeout')
if timeout is None:
# default to 10sec timeout if none was specified
# default system timeout (used is nothing is specified) may be infinite (?)
timeout = 10
_log.debug("Using timeout of %s seconds for initiating download" % timeout)
# make sure directory exists
basedir = os.path.dirname(path)
mkdir(basedir, parents=True)
# try downloading, three times max.
downloaded = False
max_attempts = 3
attempt_cnt = 0
while not downloaded and attempt_cnt < max_attempts:
try:
# urllib2 does the right thing for http proxy setups, urllib does not!
url_fd = urllib2.urlopen(url, timeout=timeout)
_log.debug('response code for given url %s: %s' % (url, url_fd.getcode()))
write_file(path, url_fd.read())
_log.info("Downloaded file %s from url %s to %s" % (filename, url, path))
downloaded = True
url_fd.close()
except urllib2.HTTPError as err:
if 400 <= err.code <= 499:
_log.warning("URL %s was not found (HTTP response code %s), not trying again" % (url, err.code))
break
else:
_log.warning("HTTPError occured while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except IOError as err:
_log.warning("IOError occurred while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except Exception, err:
raise EasyBuildError("Unexpected error occurred when trying to download %s to %s: %s", url, path, err)
if not downloaded and attempt_cnt < max_attempts:
_log.info("Attempt %d of downloading %s to %s failed, trying again..." % (attempt_cnt, url, path))
if downloaded:
_log.info("Successful download of file %s from url %s to path %s" % (filename, url, path))
return path
else:
_log.warning("Download of %s to %s failed, done trying" % (url, path))
return None
def find_easyconfigs(path, ignore_dirs=None):
"""
Find .eb easyconfig files in path
"""
if os.path.isfile(path):
return [path]
if ignore_dirs is None:
ignore_dirs = []
# walk through the start directory, retain all files that end in .eb
files = []
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for f in filenames:
if not f.endswith('.eb') or f == 'TEMPLATE.eb':
continue
spec = os.path.join(dirpath, f)
_log.debug("Found easyconfig %s" % spec)
files.append(spec)
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
return files
def search_file(paths, query, short=False, ignore_dirs=None, silent=False):
"""
Search for a particular file (only prints)
"""
if ignore_dirs is None:
ignore_dirs = ['.git', '.svn']
if not isinstance(ignore_dirs, list):
raise EasyBuildError("search_file: ignore_dirs (%s) should be of type list, not %s",
ignore_dirs, type(ignore_dirs))
# compile regex, case-insensitive
query = re.compile(query, re.I)
var_lines = []
hit_lines = []
var_index = 1
var = None
for path in paths:
hits = []
hit_in_path = False
print_msg("Searching (case-insensitive) for '%s' in %s " % (query.pattern, path), log=_log, silent=silent)
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
for filename in filenames:
if query.search(filename):
if not hit_in_path:
var = "CFGS%d" % var_index
var_index += 1
hit_in_path = True
hits.append(os.path.join(dirpath, filename))
# do not consider (certain) hidden directories
# note: we still need to consider e.g., .local !
# replace list elements using [:], so os.walk doesn't process deleted directories
# see http://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
hits = sorted(hits)
if hits:
common_prefix = det_common_path_prefix(hits)
if short and common_prefix is not None and len(common_prefix) > len(var) * 2:
var_lines.append("%s=%s" % (var, common_prefix))
hit_lines.extend([" * %s" % os.path.join('$%s' % var, fn[len(common_prefix) + 1:]) for fn in hits])
else:
hit_lines.extend([" * %s" % fn for fn in hits])
for line in var_lines + hit_lines:
print_msg(line, log=_log, silent=silent, prefix=False)
def compute_checksum(path, checksum_type=DEFAULT_CHECKSUM):
"""
Compute checksum of specified file.
@param path: Path of file to compute checksum for
@param checksum_type: Type of checksum ('adler32', 'crc32', 'md5' (default), 'sha1', 'size')
"""
if not checksum_type in CHECKSUM_FUNCTIONS:
raise EasyBuildError("Unknown checksum type (%s), supported types are: %s",
checksum_type, CHECKSUM_FUNCTIONS.keys())
try:
checksum = CHECKSUM_FUNCTIONS[checksum_type](path)
except IOError, err:
raise EasyBuildError("Failed to read %s: %s", path, err)
except MemoryError, err:
_log.warning("A memory error occured when computing the checksum for %s: %s" % (path, err))
checksum = 'dummy_checksum_due_to_memory_error'
return checksum
def calc_block_checksum(path, algorithm):
"""Calculate a checksum of a file by reading it into blocks"""
# We pick a blocksize of 16 MB: it's a multiple of the internal
# blocksize of md5/sha1 (64) and gave the best speed results
try:
# in hashlib, blocksize is a class parameter
blocksize = algorithm.blocksize * 262144 # 2^18
except AttributeError, err:
blocksize = 16777216 # 2^24
_log.debug("Using blocksize %s for calculating the checksum" % blocksize)
try:
f = open(path, 'rb')
for block in iter(lambda: f.read(blocksize), r''):
algorithm.update(block)
f.close()
except IOError, err:
raise EasyBuildError("Failed to read %s: %s", path, err)
return algorithm.hexdigest()
def verify_checksum(path, checksums):
"""
Verify checksum of specified file.
@param file: path of file to verify checksum of
@param checksum: checksum value (and type, optionally, default is MD5), e.g., 'af314', ('sha', '5ec1b')
"""
# if no checksum is provided, pretend checksum to be valid
if checksums is None:
return True
# make sure we have a list of checksums
if not isinstance(checksums, list):
checksums = [checksums]
for checksum in checksums:
if isinstance(checksum, basestring):
# default checksum type unless otherwise specified is MD5 (most common(?))
typ = DEFAULT_CHECKSUM
elif isinstance(checksum, tuple) and len(checksum) == 2:
typ, checksum = checksum
else:
raise EasyBuildError("Invalid checksum spec '%s', should be a string (MD5) or 2-tuple (type, value).",
checksum)
actual_checksum = compute_checksum(path, typ)
_log.debug("Computed %s checksum for %s: %s (correct checksum: %s)" % (typ, path, actual_checksum, checksum))
if actual_checksum != checksum:
return False
# if we land here, all checksums have been verified to be correct
return True
def find_base_dir():
"""
Try to locate a possible new base directory
- this is typically a single subdir, e.g. from untarring a tarball
- when extracting multiple tarballs in the same directory,
expect only the first one to give the correct path
"""
def get_local_dirs_purged():
# e.g. always purge the log directory
ignoreDirs = ["easybuild"]
lst = os.listdir(os.getcwd())
for ignDir in ignoreDirs:
if ignDir in lst:
lst.remove(ignDir)
return lst
lst = get_local_dirs_purged()
new_dir = os.getcwd()
while len(lst) == 1:
new_dir = os.path.join(os.getcwd(), lst[0])
if not os.path.isdir(new_dir):
break
try:
os.chdir(new_dir)
except OSError, err:
raise EasyBuildError("Changing to dir %s from current dir %s failed: %s", new_dir, os.getcwd(), err)
lst = get_local_dirs_purged()
# make sure it's a directory, and not a (single) file that was in a tarball for example
while not os.path.isdir(new_dir):
new_dir = os.path.dirname(new_dir)
_log.debug("Last dir list %s" % lst)
_log.debug("Possible new dir %s found" % new_dir)
return new_dir
def extract_cmd(filepath, overwrite=False):
"""
Determines the file type of file fn, returns extract cmd
- based on file suffix
- better to use Python magic?
"""
filename = os.path.basename(filepath)
exts = [x.lower() for x in filename.split('.')]
target = '.'.join(exts[:-1])
cmd_tmpl = None
# gzipped or gzipped tarball
if exts[-1] in ['gz']:
if exts[-2] in ['tar']:
# unzip .tar.gz in one go
cmd_tmpl = "tar xzf %(filepath)s"
else:
cmd_tmpl = "gunzip -c %(filepath)s > %(target)s"
elif exts[-1] in ['tgz', 'gtgz']:
cmd_tmpl = "tar xzf %(filepath)s"
# bzipped or bzipped tarball
elif exts[-1] in ['bz2']:
if exts[-2] in ['tar']:
cmd_tmpl = 'tar xjf %(filepath)s'
else:
cmd_tmpl = "bunzip2 %(filepath)s"
elif exts[-1] in ['tbz', 'tbz2', 'tb2']:
cmd_tmpl = "tar xjf %(filepath)s"
# xzipped or xzipped tarball
elif exts[-1] in ['xz']:
if exts[-2] in ['tar']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
else:
cmd_tmpl = "unxz %(filepath)s"
elif exts[-1] in ['txz']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
# tarball
elif exts[-1] in ['tar']:
cmd_tmpl = "tar xf %(filepath)s"
# zip file
elif exts[-1] in ['zip']:
if overwrite:
cmd_tmpl = "unzip -qq -o %(filepath)s"
else:
cmd_tmpl = "unzip -qq %(filepath)s"
if cmd_tmpl is None:
raise EasyBuildError('Unknown file type for file %s (%s)', filepath, exts)
return cmd_tmpl % {'filepath': filepath, 'target': target}
def det_patched_files(path=None, txt=None, omit_ab_prefix=False):
"""Determine list of patched files from a patch."""
# expected format: "+++ path/to/patched/file"
# also take into account the 'a/' or 'b/' prefix that may be used
patched_regex = re.compile(r"^\s*\+{3}\s+(?P<ab_prefix>[ab]/)?(?P<file>\S+)", re.M)
if path is not None:
try:
f = open(path, 'r')
txt = f.read()
f.close()
except IOError, err:
raise EasyBuildError("Failed to read patch %s: %s", path, err)
elif txt is None:
raise EasyBuildError("Either a file path or a string representing a patch should be supplied")
patched_files = []
for match in patched_regex.finditer(txt):
patched_file = match.group('file')
if not omit_ab_prefix and match.group('ab_prefix') is not None:
patched_file = match.group('ab_prefix') + patched_file
if patched_file in ['/dev/null']:
_log.debug("Ignoring patched file %s" % patched_file)
else:
patched_files.append(patched_file)
return patched_files
def guess_patch_level(patched_files, parent_dir):
"""Guess patch level based on list of patched files and specified directory."""
patch_level = None
for patched_file in patched_files:
# locate file by stripping of directories
tf2 = patched_file.split(os.path.sep)
n_paths = len(tf2)
path_found = False
level = None
for level in range(n_paths):
if os.path.isfile(os.path.join(parent_dir, *tf2[level:])):
path_found = True
break
if path_found:
patch_level = level
break
else:
_log.debug('No match found for %s, trying next patched file...' % patched_file)
return patch_level
def apply_patch(patch_file, dest, fn=None, copy=False, level=None):
"""
Apply a patch to source code in directory dest
- assume unified diff created with "diff -ru old new"
"""
if not os.path.isfile(patch_file):
raise EasyBuildError("Can't find patch %s: no such file", patch_file)
return
if fn and not os.path.isfile(fn):
raise EasyBuildError("Can't patch file %s: no such file", fn)
return
if not os.path.isdir(dest):
raise EasyBuildError("Can't patch directory %s: no such directory", dest)
return
# copy missing files
if copy:
try:
shutil.copy2(patch_file, dest)
_log.debug("Copied patch %s to dir %s" % (patch_file, dest))
return 'ok'
except IOError, err:
raise EasyBuildError("Failed to copy %s to dir %s: %s", patch_file, dest, err)
return
# use absolute paths
apatch = os.path.abspath(patch_file)
adest = os.path.abspath(dest)
if not level:
# guess value for -p (patch level)
# - based on +++ lines
# - first +++ line that matches an existing file determines guessed level
# - we will try to match that level from current directory
patched_files = det_patched_files(path=apatch)
if not patched_files:
raise EasyBuildError("Can't guess patchlevel from patch %s: no testfile line found in patch", apatch)
return
patch_level = guess_patch_level(patched_files, adest)
if patch_level is None: # patch_level can also be 0 (zero), so don't use "not patch_level"
# no match
raise EasyBuildError("Can't determine patch level for patch %s from directory %s", patch_file, adest)
else:
_log.debug("Guessed patch level %d for patch %s" % (patch_level, patch_file))
else:
patch_level = level
_log.debug("Using specified patch level %d for patch %s" % (patch_level, patch_file))
try:
os.chdir(adest)
_log.debug("Changing to directory %s" % adest)
except OSError, err:
raise EasyBuildError("Can't change to directory %s: %s", adest, err)
return
patch_cmd = "patch -b -p%d -i %s" % (patch_level, apatch)
result = run.run_cmd(patch_cmd, simple=True)
if not result:
raise EasyBuildError("Patching with patch %s failed", patch_file)
return
return result
def modify_env(old, new):
"""NO LONGER SUPPORTED: use modify_env from easybuild.tools.environment instead"""
_log.nosupport("moved modify_env to easybuild.tools.environment", "2.0")
def convert_name(name, upper=False):
"""
Converts name so it can be used as variable name
"""
# no regexps
charmap = {
'+': 'plus',
'-': 'min'
}
for ch, new in charmap.items():
name = name.replace(ch, new)
if upper:
return name.upper()
else:
return name
def adjust_permissions(name, permissionBits, add=True, onlyfiles=False, onlydirs=False, recursive=True,
group_id=None, relative=True, ignore_errors=False):
"""
Add or remove (if add is False) permissionBits from all files (if onlydirs is False)
and directories (if onlyfiles is False) in path
"""
name = os.path.abspath(name)
if recursive:
_log.info("Adjusting permissions recursively for %s" % name)
allpaths = [name]
for root, dirs, files in os.walk(name):
paths = []
if not onlydirs:
paths += files
if not onlyfiles:
paths += dirs
for path in paths:
allpaths.append(os.path.join(root, path))
else:
_log.info("Adjusting permissions for %s" % name)
allpaths = [name]
failed_paths = []
fail_cnt = 0
for path in allpaths:
try:
if relative:
# relative permissions (add or remove)
perms = os.stat(path)[stat.ST_MODE]
if add:
os.chmod(path, perms | permissionBits)
else:
os.chmod(path, perms & ~permissionBits)
else:
# hard permissions bits (not relative)
os.chmod(path, permissionBits)
if group_id:
# only change the group id if it the current gid is different from what we want
cur_gid = os.stat(path).st_gid
if not cur_gid == group_id:
_log.debug("Changing group id of %s to %s" % (path, group_id))
os.chown(path, -1, group_id)
else:
_log.debug("Group id of %s is already OK (%s)" % (path, group_id))
except OSError, err:
if ignore_errors:
# ignore errors while adjusting permissions (for example caused by bad links)
_log.info("Failed to chmod/chown %s (but ignoring it): %s" % (path, err))
fail_cnt += 1
else:
failed_paths.append(path)
if failed_paths:
raise EasyBuildError("Failed to chmod/chown several paths: %s (last error: %s)", failed_paths, err)
# we ignore some errors, but if there are to many, something is definitely wrong
fail_ratio = fail_cnt / float(len(allpaths))
max_fail_ratio = 0.5
if fail_ratio > max_fail_ratio:
raise EasyBuildError("%.2f%% of permissions/owner operations failed (more than %.2f%%), "
"something must be wrong...", 100 * fail_ratio, 100 * max_fail_ratio)
elif fail_cnt > 0:
_log.debug("%.2f%% of permissions/owner operations failed, ignoring that..." % (100 * fail_ratio))
def patch_perl_script_autoflush(path):
# patch Perl script to enable autoflush,
# so that e.g. run_cmd_qa receives all output to answer questions
txt = read_file(path)
origpath = "%s.eb.orig" % path
write_file(origpath, txt)
_log.debug("Patching Perl script %s for autoflush, original script copied to %s" % (path, origpath))
# force autoflush for Perl print buffer
lines = txt.split('\n')
newtxt = '\n'.join([
lines[0], # shebang line
"\nuse IO::Handle qw();",
"STDOUT->autoflush(1);\n", # extra newline to separate from actual script
] + lines[1:])
write_file(path, newtxt)
def mkdir(path, parents=False, set_gid=None, sticky=None):
"""
Create a directory
Directory is the path to create
@param parents: create parent directories if needed (mkdir -p)
@param set_gid: set group ID bit, to make subdirectories and files inherit group
@param sticky: set the sticky bit on this directory (a.k.a. the restricted deletion flag),
to avoid users can removing/renaming files in this directory
"""
if set_gid is None:
set_gid = build_option('set_gid_bit')
if sticky is None:
sticky = build_option('sticky_bit')
if not os.path.isabs(path):
path = os.path.abspath(path)
# exit early if path already exists
if not os.path.exists(path):
_log.info("Creating directory %s (parents: %s, set_gid: %s, sticky: %s)", path, parents, set_gid, sticky)
# set_gid and sticky bits are only set on new directories, so we need to determine the existing parent path
existing_parent_path = os.path.dirname(path)
try:
if parents:
# climb up until we hit an existing path or the empty string (for relative paths)
while existing_parent_path and not os.path.exists(existing_parent_path):
existing_parent_path = os.path.dirname(existing_parent_path)
os.makedirs(path)
else:
os.mkdir(path)
except OSError, err:
raise EasyBuildError("Failed to create directory %s: %s", path, err)
# set group ID and sticky bits, if desired
bits = 0
if set_gid:
bits |= stat.S_ISGID
if sticky:
bits |= stat.S_ISVTX
if bits:
try:
new_subdir = path[len(existing_parent_path):].lstrip(os.path.sep)
new_path = os.path.join(existing_parent_path, new_subdir.split(os.path.sep)[0])
adjust_permissions(new_path, bits, add=True, relative=True, recursive=True, onlydirs=True)
except OSError, err:
raise EasyBuildError("Failed to set groud ID/sticky bit: %s", err)
else:
_log.debug("Not creating existing path %s" % path)
def path_matches(path, paths):
"""Check whether given path matches any of the provided paths."""
if not os.path.exists(path):
return False
for somepath in paths:
if os.path.exists(somepath) and os.path.samefile(path, somepath):
return True
return False
def rmtree2(path, n=3):
"""Wrapper around shutil.rmtree to make it more robust when used on NFS mounted file systems."""
ok = False
for i in range(0, n):
try:
shutil.rmtree(path)
ok = True
break
except OSError, err:
_log.debug("Failed to remove path %s with shutil.rmtree at attempt %d: %s" % (path, n, err))
time.sleep(2)
if not ok:
raise EasyBuildError("Failed to remove path %s with shutil.rmtree, even after %d attempts.", path, n)
else:
_log.info("Path %s successfully removed." % path)
def move_logs(src_logfile, target_logfile):
"""Move log file(s)."""
mkdir(os.path.dirname(target_logfile), parents=True)
src_logfile_len = len(src_logfile)
try:
# there may be multiple log files, due to log rotation
app_logs = glob.glob('%s*' % src_logfile)
for app_log in app_logs:
# retain possible suffix
new_log_path = target_logfile + app_log[src_logfile_len:]
# retain old logs
if os.path.exists(new_log_path):
i = 0
oldlog_backup = "%s_%d" % (new_log_path, i)
while os.path.exists(oldlog_backup):
i += 1
oldlog_backup = "%s_%d" % (new_log_path, i)
shutil.move(new_log_path, oldlog_backup)
_log.info("Moved existing log file %s to %s" % (new_log_path, oldlog_backup))
# move log to target path
shutil.move(app_log, new_log_path)
_log.info("Moved log file %s to %s" % (src_logfile, new_log_path))
except (IOError, OSError), err:
raise EasyBuildError("Failed to move log file(s) %s* to new log file %s*: %s" ,
src_logfile, target_logfile, err)
def cleanup(logfile, tempdir, testing):
"""Cleanup the specified log file and the tmp directory"""
if not testing and logfile is not None:
try:
for log in glob.glob('%s*' % logfile):
os.remove(log)
except OSError, err:
raise EasyBuildError("Failed to remove log file(s) %s*: %s", logfile, err)
print_msg('temporary log file(s) %s* have been removed.' % (logfile), log=None, silent=testing)
if not testing and tempdir is not None:
try:
shutil.rmtree(tempdir, ignore_errors=True)
except OSError, err:
raise EasyBuildError("Failed to remove temporary directory %s: %s", tempdir, err)
print_msg('temporary directory %s has been removed.' % (tempdir), log=None, silent=testing)
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copied from Lib/shutil.py in python 2.7, since we need this to work for python2.4 aswell
and this code can be improved...
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
class Error(EnvironmentError):
pass
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
_log.debug("copytree: skipping copy of %s" % ignored_names)
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def encode_string(name):
"""
This encoding function handles funky software names ad infinitum, like:
example: '0_foo+0x0x#-$__'
becomes: '0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'
The intention is to have a robust escaping mechanism for names like c++, C# et al
It has been inspired by the concepts seen at, but in lowercase style:
* http://fossies.org/dox/netcdf-4.2.1.1/escapes_8c_source.html
* http://celldesigner.org/help/CDH_Species_01.html
* http://research.cs.berkeley.edu/project/sbp/darcsrepo-no-longer-updated/src/edu/berkeley/sbp/misc/ReflectiveWalker.java
and can be extended freely as per ISO/IEC 10646:2012 / Unicode 6.1 names:
* http://www.unicode.org/versions/Unicode6.1.0/
For readability of >2 words, it is suggested to use _CamelCase_ style.
So, yes, '_GreekSmallLetterEtaWithPsiliAndOxia_' *could* indeed be a fully
valid software name; software "electron" in the original spelling anyone? ;-)
"""
# do the character remapping, return same char by default
result = ''.join(map(lambda x: STRING_ENCODING_CHARMAP.get(x, x), name))
return result
def decode_string(name):
"""Decoding function to revert result of encode_string."""
result = name
for (char, escaped_char) in STRING_ENCODING_CHARMAP.items():
result = re.sub(escaped_char, char, result)
return result
def encode_class_name(name):
"""return encoded version of class name"""
return EASYBLOCK_CLASS_PREFIX + encode_string(name)
def decode_class_name(name):
"""Return decoded version of class name."""
if not name.startswith(EASYBLOCK_CLASS_PREFIX):
# name is not encoded, apparently
return name
else:
name = name[len(EASYBLOCK_CLASS_PREFIX):]
return decode_string(name)
def run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None):
"""NO LONGER SUPPORTED: use run_cmd from easybuild.tools.run instead"""
_log.nosupport("run_cmd was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None):
"""NO LONGER SUPPORTED: use run_cmd_qa from easybuild.tools.run instead"""
_log.nosupport("run_cmd_qa was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def parse_log_for_error(txt, regExp=None, stdout=True, msg=None):
"""NO LONGER SUPPORTED: use parse_log_for_error from easybuild.tools.run instead"""
_log.nosupport("parse_log_for_error was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def det_size(path):
"""
Determine total size of given filepath (in bytes).
"""
installsize = 0
try:
# walk install dir to determine total size
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if os.path.exists(fullpath):
installsize += os.path.getsize(fullpath)
except OSError, err:
_log.warn("Could not determine install size: %s" % err)
return installsize
|
mit
| -7,227,287,605,544,151,000
| 34.362212
| 125
| 0.607225
| false
| 3.771923
| false
| false
| false
|
awadhn/robotframework-run-keyword-async
|
runKeywordAsync/runKeywordAsync.py
|
1
|
3148
|
import sys
import os
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.output.logger import LOGGER
class runKeywordAsync:
def __init__(self):
self._thread_pool = {}
self._last_thread_handle = 1
#self._robot_log_level = BuiltIn().get_variable_value("${LOG_LEVEL}")
def run_method_async(self, keyword, *args, **kwargs):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded_method(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def run_keyword_async(self, keyword, *args):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded(keyword, *args)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def wait_async_all(self, timeout=60):
timeout = int(timeout)
results = []
for thread in self._thread_pool:
try:
result = self._thread_pool[thread].result_queue.get(True, timeout)
results.append(result)
except:
#BuiltIn().set_log_level(self._robot_log_level)
for thread in self._thread_pool:
self._thread_pool[thread].terminate()
raise Exception("Process " + str(thread) + " Failed")
#BuiltIn().set_log_level(self._robot_log_level)
self._thread_pool = {}
self._last_thread_handle = 1
return results
def get_async_return(self, handle, timeout=60):
timeout = int(timeout)
if handle in self._thread_pool:
try:
result = self._thread_pool[handle].result_queue.get(True, timeout)
del self._thread_pool[handle]
BuiltIn().set_log_level(self._robot_log_level)
return result
except:
raise Exception("Process " + str(handle) + " Failed")
else:
raise Exception("Passed Process id " + str(handle) + " is not a valid id")
def _threaded_method(self, keyword, *args, **kwargs):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args, **kwargs):
''' Calls the decorated function and puts the result in a queue '''
ret = BuiltIn().call_method(keyword, *args, **kwargs)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
th.result_queue = q
return th
def _threaded(self, keyword, *args):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args):
''' Calls the decorated function and puts the result in a queue '''
LOGGER.unregister_xml_logger()
ret = BuiltIn().run_keyword(keyword, *args)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args)
th.result_queue = q
return th
|
mit
| 6,771,067,669,261,232,000
| 34.772727
| 86
| 0.576239
| false
| 4.020434
| false
| false
| false
|
zooniverse/panoptes-cli
|
panoptes_cli/commands/workflow.py
|
1
|
5255
|
import yaml
import click
from panoptes_cli.scripts.panoptes import cli
from panoptes_client import Workflow
@cli.group()
def workflow():
"""Contains commands for managing workflows."""
pass
@workflow.command()
@click.argument('workflow-id', required=False, type=int)
@click.option(
'--project-id',
'-p',
help="List workflows linked to the given project.",
required=False,
type=int,
)
@click.option(
'--quiet',
'-q',
is_flag=True,
help='Only print workflow IDs (omit names).',
)
def ls(workflow_id, project_id, quiet):
"""Lists workflow IDs and names."""
if workflow_id and not project_id:
workflow = Workflow.find(workflow_id)
if quiet:
click.echo(workflow.id)
else:
echo_workflow(workflow)
return
args = {}
if project_id:
args['project_id'] = project_id
if workflow_id:
args['workflow_id'] = workflow_id
workflows = Workflow.where(**args)
if quiet:
click.echo(" ".join([w.id for w in workflows]))
else:
for workflow in workflows:
echo_workflow(workflow)
@workflow.command()
@click.argument('workflow-id', required=True)
def info(workflow_id):
workflow = Workflow.find(workflow_id)
click.echo(yaml.dump(workflow.raw))
@workflow.command(name='retire-subjects')
@click.argument('workflow-id', type=int)
@click.argument('subject-ids', type=int, nargs=-1)
@click.option(
'--reason',
'-r',
help="The reason for retiring the subject.",
type=click.Choice((
'classification_count',
'flagged',
'blank',
'consensus',
'other'
)),
default='other'
)
def retire_subjects(workflow_id, subject_ids, reason):
"""
Retires subjects from the given workflow.
The subjects will no longer be served to volunteers for classification.
"""
workflow = Workflow.find(workflow_id)
workflow.retire_subjects(subject_ids, reason)
@workflow.command(name='add-subject-sets')
@click.argument('workflow-id', type=int)
@click.argument('subject-set-ids', type=int, nargs=-1)
def add_subject_sets(workflow_id, subject_set_ids):
"""Links existing subject sets to the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.add_subject_sets(subject_set_ids)
@workflow.command(name='remove-subject-sets')
@click.argument('workflow-id', type=int)
@click.argument('subject-set-ids', type=int, nargs=-1)
def remove_subject_sets(workflow_id, subject_set_ids):
"""Unlinks the given subject sets from the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.remove_subject_sets(subject_set_ids)
@workflow.command()
@click.argument('workflow-id', type=int)
def activate(workflow_id):
"""Activates the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.active = True
workflow.save()
@workflow.command()
@click.argument('workflow-id', type=int)
def deactivate(workflow_id):
"""Deactivates the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.active = False
workflow.save()
@workflow.command(name="download-classifications")
@click.argument('workflow-id', required=True, type=int)
@click.argument('output-file', required=True, type=click.File('wb'))
@click.option(
'--generate',
'-g',
help="Generates a new export before downloading.",
is_flag=True
)
@click.option(
'--generate-timeout',
'-T',
help=(
"Time in seconds to wait for new export to be ready. Defaults to "
"unlimited. Has no effect unless --generate is given."
),
required=False,
type=int,
)
def download_classifications(
workflow_id,
output_file,
generate,
generate_timeout
):
"""
Downloads a workflow-specific classifications export for the given workflow.
OUTPUT_FILE will be overwritten if it already exists. Set OUTPUT_FILE to -
to output to stdout.
"""
workflow = Workflow.find(workflow_id)
if generate:
click.echo("Generating new export...", err=True)
export = workflow.get_export(
'classifications',
generate=generate,
wait_timeout=generate_timeout
)
with click.progressbar(
export.iter_content(chunk_size=1024),
label='Downloading',
length=(int(export.headers.get('content-length')) / 1024 + 1),
file=click.get_text_stream('stderr'),
) as chunks:
for chunk in chunks:
output_file.write(chunk)
@workflow.command()
@click.option(
'--force',
'-f',
is_flag=True,
help='Delete without asking for confirmation.',
)
@click.argument('workflow-ids', required=True, nargs=-1, type=int)
def delete(force, workflow_ids):
for workflow_id in workflow_ids:
workflow = Workflow.find(workflow_id)
if not force:
click.confirm(
'Delete workflow {} ({})?'.format(
workflow_id,
workflow.display_name,
),
abort=True,
)
workflow.delete()
def echo_workflow(workflow):
click.echo(
u'{} {}'.format(
workflow.id,
workflow.display_name
)
)
|
apache-2.0
| 8,488,461,908,351,289,000
| 24.028571
| 80
| 0.634634
| false
| 3.740214
| false
| false
| false
|
noironetworks/group-based-policy
|
gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py
|
1
|
3818
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from subprocess import call
from subprocess import PIPE
from subprocess import Popen
import sys
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class ConfigureIPtables(object):
def __init__(self, json_blob):
ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE)
output = ps.communicate()[0]
if "0" in output:
LOG.info("Enabling IP forwarding ...")
call(["sysctl", "-w", "net.ipv4.ip_forward=1"])
else:
LOG.info("IP forwarding already enabled")
try:
self.rules_json = jsonutils.loads(json_blob)
except ValueError:
sys.exit('Given json_blob is not a valid json')
def update_chain(self):
ps = Popen(["iptables", "-L"], stdout=PIPE)
output = ps.communicate()[0]
# check if chain is present if not create new chain
if "testchain" not in output:
LOG.info("Creating new chain ...")
call(["iptables", "-F"])
call(["iptables", "-N", "testchain"])
call(
["iptables", "-t", "filter",
"-A", "FORWARD", "-j", "testchain"])
call(["iptables", "-A", "FORWARD", "-j", "DROP"])
# flush chain of existing rules
call(["iptables", "-F", "testchain"])
# return
# Update chain with new rules
LOG.info("Updating chain with new rules ...")
count = 0
for rule in self.rules_json.get('rules'):
LOG.info("adding rule %(count)d", {'count': count})
try:
action_values = ["LOG", "ACCEPT"]
action = rule['action'].upper()
if action not in action_values:
sys.exit(
"Action %s is not valid action! Please enter "
"valid action (LOG or ACCEPT)" % (action))
service = rule['service'].split('/')
except KeyError as e:
sys.exit('KeyError: Rule does not have key %s' % (e))
if len(service) > 1:
ps = Popen(["iptables", "-A", "testchain", "-p", service[
0], "--dport", service[1], "-j", action],
stdout=PIPE)
else:
ps = Popen(
["iptables", "-A", "testchain", "-p", service[0],
"-j", action], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(msg)s",
{'msg': output})
count = count + 1
ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state",
"ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(output)s",
{'output': output})
def main():
if len(sys.argv) < 2:
sys.exit('Usage: %s json-blob' % sys.argv[0])
else:
json_blob = sys.argv[1]
test = ConfigureIPtables(json_blob)
test.update_chain()
if __name__ == "__main__":
main()
|
apache-2.0
| 8,109,188,629,917,404,000
| 35.711538
| 78
| 0.529073
| false
| 4.172678
| true
| false
| false
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/core/indexes/multi.py
|
1
|
99739
|
# pylint: disable=E1101,E1103,W0232
import datetime
import warnings
from functools import partial
from sys import getsizeof
import numpy as np
from pandas._libs import index as libindex, lib, Timestamp
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_platform_int,
is_object_dtype,
is_iterator,
is_list_like,
is_scalar)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.common import (_any_not_none,
_values_from_object,
is_bool_indexer,
is_null_slice,
is_true_slices)
import pandas.core.base as base
from pandas.util._decorators import (Appender, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.io.formats.printing import pprint_thing
from pandas.core.config import get_option
from pandas.core.indexes.base import (
Index, _ensure_index,
_get_na_value, InvalidIndexError,
_index_shared_docs)
from pandas.core.indexes.frozen import (
FrozenNDArray, FrozenList, _ensure_frozen)
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat)
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex
Index : The base pandas Index type
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True,
name=None, **kwargs):
# compat with Index
if name is not None:
names = name
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, labels=None, levels=None):
"""
Parameters
----------
labels : optional list
Labels to check for validity. Defaults to current labels.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
* if length of levels and labels don't match or any label would
exceed level bounds
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels = labels or self.labels
levels = levels or self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" %
([len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(
_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, lev, lab in zip(level, self.levels, labels):
new_labels[l] = _ensure_frozen(
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
if verify_integrity:
self._verify_integrity(labels=new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if labels is None:
labels = deepcopy(self.labels)
else:
if levels is None:
levels = self.levels
if labels is None:
labels = self.labels
return MultiIndex(levels=levels, labels=labels, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
labels=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
if 'name' in kwargs:
kwargs['names'] = kwargs.pop('name', None)
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any([f(l) for l in self._inferred_type_levels])
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))
label_nbytes = sum((i.nbytes for i in self.labels))
names_nbytes = sum((getsizeof(i, objsize) for i in self.names))
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('labels', ibase.default_pprint(self._labels,
max_seq_items=False))]
if _any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return sum(name == n for n in self.names) > 1
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_labels = []
# go through the levels and format them
for level, label in zip(self.levels, self.labels):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (label == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
label = label.values()
label[mask] = nan_index
new_levels.append(level)
new_labels.append(label)
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names,
sortorder=self.sortorder, verify_integrity=False)
return mi.values
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.labels[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
labels, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_labels, uniques = algos.factorize(indexer[mask],
sort=True)
labels = np.empty(len(indexer), dtype=indexer.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(labels)
return grouper, labels, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# choose our engine based on our size
# the hashing based MultiIndex for larger
# sizes, and the MultiIndexOjbect for smaller
# xref: https://github.com/pandas-dev/pandas/pull/16324
l = len(self)
if l > 10000:
return libindex.MultiIndexHashEngine(lambda: self, l)
return libindex.MultiIndexObjectEngine(lambda: self.values, l)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
# Need to box timestamps, etc.
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
taken = lev._box_values(algos.take_1d(lev._values, lab))
elif box:
taken = algos.take_1d(lev._box_values(lev._values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
taken = algos.take_1d(np.asarray(lev._values), lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self.is_monotonic_increasing
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Paramters
---------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple([f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels)])
return hash_tuple(key)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
# isna is not implemented for MultiIndex
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [label == -1 for label in self.labels]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_labels = [label[~indexer] for label in self.labels]
return self.copy(labels=new_labels, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def _get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
Returns
-------
values : ndarray
"""
unique = self.levels[level]
labels = self.labels[level]
filled = algos.take_1d(unique._values, labels,
fill_value=unique._na_value)
values = unique._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
.. versionadded:: 0.20.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
"""
from pandas import DataFrame
result = DataFrame({(name or level):
self._get_level_values(level)
for name, level in
zip(self.names, range(len(self.levels)))},
copy=False)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [_ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.categorical import _factorize_from_iterables
labels, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels, labels, sortorder=sortorder, names=names)
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if lev.is_monotonic:
new_levels.append(lev)
new_labels.append(lab)
continue
# indexer to reorder the levels
indexer = lev.argsort()
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(new_levels, new_labels,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
create a new MultiIndex from the current that removing
unused levels, meaning that they are not expressed in the labels
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
labels=[[0, 0], [0, 1]])
"""
new_levels = []
new_labels = []
changed = False
for lev, lab in zip(self.levels, self.labels):
uniques = algos.unique(lab)
# nothing unused
if len(uniques) == len(lev):
new_levels.append(lev)
new_labels.append(lab)
continue
changed = True
# labels get mapped from uniques to 0:len(uniques)
label_mapping = np.zeros(len(lev))
label_mapping[uniques] = np.arange(len(uniques))
lab = label_mapping[lab]
# new levels are simple
lev = lev.take(uniques)
new_levels.append(lev)
new_labels.append(lab)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_labels(new_labels, validate=False)
return result
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except LookupError:
return False
contains = __contains__
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
labels=[label for label in self.labels],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.labels, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, labels=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.labels]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(FrozenNDArray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.labels]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(repeats)
for label in self.labels], names=self.names,
sortorder=self.sortorder, verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
def drop(self, labels, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
except Exception:
pass
inds = []
for label in labels:
try:
loc = self.get_loc(label)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~algos.isin(self.labels[i], values)
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.categorical import Categorical
def cats(label):
return np.arange(np.array(label).max() + 1 if len(label) else 0,
dtype=label.dtype)
return [Categorical.from_codes(label, cats(label), ordered=True)
for label in self.labels]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.labels[lev] for lev in level],
orders=ascending)
# level ordering
else:
labels = list(self.labels)
shape = list(self.levshape)
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = _ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return _ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._get_fill_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
# we may not compare equally because of hashing if we
# don't have the same dtypes
if self._inferred_type_levels != target._inferred_type_levels:
return Index(self.values).get_indexer(target.values)
indexer = self._engine.get_indexer(target)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = _ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise Exception("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lex-sorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
Notes
------
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
See also
--------
Index.get_loc : get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
def _maybe_str_to_time_stamp(key, lev):
if lev.is_all_dates and not isinstance(key, Timestamp):
try:
return Timestamp(key, tz=getattr(lev, 'tz', None))
except Exception:
pass
return key
key = _values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
all_dates = [(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)]
can_index_exactly = any(all_dates)
if (any([l.is_all_dates
for k, l in zip(key, self.levels)]) and
not can_index_exactly):
indexer = self.get_loc(key)
# we have a multiple selection here
if (not isinstance(indexer, slice) or
indexer.stop - indexer.start != 1):
return partial_selection(key, indexer)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(
_values_from_object(key)), None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
labels = self.labels[level]
def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
# given the inputs and the labels/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(labels):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = labels.take(_ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._values
else:
m = np.zeros(len(labels), dtype=bool)
m[np.in1d(labels, r,
assume_unique=Index(labels).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if isinstance(loc, slice):
return loc
elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a given label/slice/list/mask or a sequence of such as
an array of integers.
Parameters
----------
seq : label/slice/list/mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
locs : array of integers suitable for passing to iloc
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b')
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']])
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')])
array([2], dtype=int64)
See also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
from .numeric import Int64Index
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
from .numeric import Int64Index
# no matches we are done
return Int64Index([])._values
elif is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._values
return indexer._values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
return array_equivalent(self._values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
slabels = self.labels[i]
slabels = slabels[slabels != -1]
svalues = algos.take_nd(np.asarray(self.levels[i]._values),
slabels, allow_fill=False)
olabels = other.labels[i]
olabels = olabels[olabels != -1]
ovalues = algos.take_nd(np.asarray(other.levels[i]._values),
olabels, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(svalues) == 0 and len(ovalues) == 0:
continue
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
uniq_tuples = lib.fast_unique_multiple([self._values, other._values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._values
other_tuples = other._values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self._values) - set(other._values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if not is_object_dtype(np.dtype(dtype)):
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
|
apache-2.0
| -8,210,810,238,779,362,000
| 34.406106
| 79
| 0.524389
| false
| 4.388569
| false
| false
| false
|
ricardoy/coccimorph
|
coccimorph/content.py
|
1
|
23248
|
import math
import numpy as np
import os
import pandas as pd
from coccimorph.aux import load_image
import cv2
RED = (0, 0, 255)
fowl_species = [
'E. acervulina',
'E. maxima',
'E. brunetti',
'E. mitis',
'E. praecox',
'E. tenella',
'E. necatrix'
]
rabbit_species = [
'E. coecicola',
'E. exigua',
'E. flavescens',
'E. intestinalis',
'E. irresidua',
'E. magna',
'E. media',
'E. perforans',
'E. piriformis',
'E. stiedai',
'E. vejdovskyi'
]
basedir = os.path.dirname(__file__) + '/../prototypes'
def dilate(ima):
'''
Morphological dilation of binary matrix ima using
as default the structuring element(SE)
[0 1 0
1 1 1
0 1 0]
:param ima: a binary array
:return:
'''
dx, dy = ima.shape
se = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
ima_temp = np.zeros((dx, 500), dtype=np.int)
for m in range(dx):
for n in range(dy):
ima_temp[m, n] = ima[m, n]
for m in range(1, dx - 1):
for n in range(1, dy - 1):
if ima_temp[m, n] == 1:
for i in range(3):
for j in range(3):
mw = m - 1
nw = n - 1
if ima[mw + i, nw + j] == 0:
ima[mw + i, nw + j] = ima[mw + i, nw + j] or se[i][j]
return ima
def erode(ima: np.ndarray):
dx, dy = ima.shape
ima_temp = np.zeros((dx, 500), dtype=np.int)
for m in range(dx):
for n in range(dy):
ima_temp[m, n] = ima[m, n]
for m in range(1, dx - 1):
for n in range(1, dy - 1):
if ima_temp[m, n] == 1:
aux = 1
aux *= ima_temp[m, n]
aux *= ima_temp[m - 1, n]
aux *= ima_temp[m + 1, n]
aux *= ima_temp[m, n - 1]
aux *= ima_temp[m, n + 1]
ima[m, n] = aux
for i in range(dx):
ima[i, 0] = 0
ima[i, dy - 1] = 0
for i in range(dy):
ima[0, i] = 0
ima[dx - 1, i] = 0
return ima
class FeatureExtractor:
def __init__(self, filename, scale):
self.img = load_image(filename, scale)
self.height, self.width, _ = self.img.shape
self.img_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.ima = np.zeros((self.height, self.width), dtype=np.int)
self.vx = []
self.vy = []
self.wEnt = None
self.obj_entropy = 0.0
self.obj_size = 0.0
self.mcc = None
def set_co_matrix(self, d: int):
aux_mcc = np.zeros((256, 256), dtype=np.int)
ro = 0
for x in range(self.height):
for y in range(self.width-d):
if self.ima[x, y] > 0 and self.ima[x, y + d] > 0:
# if aux_mcc[self.ima[x, y], self.ima[x, y + d]] > 0:
# print(self.ima[x, y], self.ima[x, y + d], aux_mcc[self.ima[x, y], self.ima[x, y + d]])
aux_mcc[self.ima[x, y], self.ima[x, y + d]] += 1
ro += 1
for x in range(self.height):
y = self.width-1
while y > d - 1:
if self.ima[x, y] > 0 and self.ima[x, y - d] > 0:
# if aux_mcc[self.ima[x, y], self.ima[x, y - d]] > 0:
# print(self.ima[x, y], self.ima[x, y - d], aux_mcc[self.ima[x, y], self.ima[x, y - d]])
aux_mcc[self.ima[x, y], self.ima[x, y - d]] += 1
ro += 1
y -= 1
# print('ro', ro)
# self.ima.tofile('/tmp/ima_novo')
self.mcc = aux_mcc / float(ro)
# with open('/tmp/mcc_novo', 'w') as fh:
# for i in range(255):
#
# for j in range(255):
# fh.write('%.14f ' % (self.mcc[i][j]))
#
# fh.write('%.14f' % (self.mcc[i][255]))
# fh.write('\n')
#
# print('soma total mcc', np.sum(aux_mcc), np.std(self.mcc) ** 2)
def mcc_asm(self):
return np.sum(np.power(self.mcc, 2))
def mcc_con(self):
sm = 0.0
for i in range(256):
for j in range(256):
sm += self.mcc[i, j]*(i-j)*(i-j)
return sm
def mcc_idf(self):
sm = 0.0
for i in range(256):
for j in range(256):
sm += self.mcc[i, j] / float(1 + (i-j)*(i-j))
return sm
def mcc_ent(self):
sm = 0.0
for i in range(256):
for j in range(256):
if self.mcc[i, j] > 0:
sm += self.mcc[i, j]*np.log(self.mcc[i, j])
return sm * sm / 2.
# sm = np.sum(self.mcc * np.log(self.mcc))
# return sm * sm / 2.0
def eigens(self):
c = np.zeros(4, dtype=np.float)
mean_x = np.average(self.vx)
mean_y = np.average(self.vy)
sum0 = 0.
sum1 = 0.
sum2 = 0.
sum3 = 0.
for i in range(len(self.vx)):
sum0 += (self.vx[i] - mean_x) * (self.vx[i] - mean_x)
sum1 += (self.vx[i] - mean_x) * (self.vy[i] - mean_y)
sum2 += (self.vy[i] - mean_y) * (self.vx[i] - mean_x)
sum3 += (self.vy[i] - mean_y) * (self.vy[i] - mean_y)
n = len(self.vx)
c[0] = sum0/n
c[1] = sum1/n
c[2] = sum2/n
c[3] = sum3/n
k = np.reshape(c, (-1, 2))
# print('k', k)
# compute eigen vectors and eigen values
eigenvalues, eigenvectors = np.linalg.eigh(k)
# print('autovalores', eigenvalues)
#
# print('eigenvectors\n', eigenvectors)
evec_inv = np.linalg.inv(eigenvectors)
# transform to new space using inverse matrix of eigen vectors
vx1 = np.zeros(n, dtype=np.float)
vy1 = np.zeros(n, dtype=np.float)
# print('inversa: ', evec_inv)
for i in range(n):
vx_w = evec_inv[0, 0] * self.vx[i] + evec_inv[0, 1] * self.vy[i]
vy_w = evec_inv[1, 0] * self.vx[i] + evec_inv[1, 1] * self.vy[i]
vx1[i] = vx_w
vy1[i] = vy_w
# vx1 = -1 * vx1
# vy1 = -1 * vy1
# with open('/tmp/novo', 'w') as fh:
# fh.write('valor de vx1\n')
# for blah in vx1:
# fh.write(str(blah))
# fh.write('\n')
# exit()
meanvx1 = np.average(vx1)
meanvy1 = np.average(vy1)
vx1 = vx1 - meanvx1
vy1 = vy1 - meanvy1
vx2 = np.copy(vx1)
vy2 = np.copy(vy1)
# searching for diameters
# highX = np.max(vx1)
# lessX = np.min(vx1)
# highY = np.max(vy1)
# lessY = np.min(vy1)
highX = float('-Inf')
lessX = float('Inf')
highY = float('-Inf')
lessY = float('Inf')
for i in range(len(self.vx)):
if int(vx1[i]) == 0 and vy1[i] > highY:
highY = vy1[i]
if int(vx1[i]) == 0 and vy1[i] < lessY:
lessY = vy1[i]
if int(vy1[i]) == 0 and vx1[i] > highX:
highX = vx1[i]
if int(vy1[i]) == 0 and vx1[i] < lessX:
lessX = vx1[i]
# print('meanvx1', meanvx1, 'meanvy1', meanvy1)
# print('highX', highX, 'lessX', lessX)
# print('highY', highY, 'lessY', lessY)
# print('high diameter', (highY - lessY + 1))
self.high_diameter = highY - lessY + 1
self.less_diameter = highX - lessX + 1
# reflects accoding to principal components
if np.abs(int(eigenvalues[0])) > np.abs(int(eigenvalues[1])):
for i in range(n):
vy1[i] = -1. * vy1[i]
vx2[i] = -1. * vx2[i]
else:
for i in range(n):
vx1[i] = -1. * vx1[i]
vy2[i] = -1. * vy2[i]
# translate to original localization
vx1 = vx1 + meanvx1
vy1 = vy1 + meanvy1
vx2 = vx2 + meanvx1
vy2 = vy2 + meanvy1
# return to original base
for i in range(n):
vx_w = eigenvectors[0,0]*vx1[i] + eigenvectors[0,1]*vy1[i]
vy_w = eigenvectors[1,0]*vx1[i] + eigenvectors[1,1]*vy1[i]
vx1[i] = vx_w
vy1[i] = vy_w
vx_w = eigenvectors[0,0]*vx2[i] + eigenvectors[0,1]*vy2[i]
vy_w = eigenvectors[1,0]*vx2[i] + eigenvectors[1,1]*vy2[i]
vx2[i] = vx_w
vy2[i] = vy_w
# compute the symmetry
highX1 = float('-Inf')
highY1 = float('-Inf')
highX2 = float('-Inf')
highY2 = float('-Inf')
for i in range(len(self.vx)):
if int(round(vx1[i])) > highX1:
highX1 = int(round(vx1[i]))
if int(round(vy1[i])) > highY1:
highY1 = int(round(vy1[i]))
if int(round(vx2[i])) > highX2:
highX2 = int(round(vx2[i]))
if int(round(vy2[i])) > highY2:
highY2 = int(round(vy2[i]))
"""
TODO: original program was +3... this and the 500 columns look like
hard constraints over the image size
"""
highX1 += 3
highY1 += 3
highX2 += 3
highY2 += 3
# create temporal matrices to compute erosion, dilation and rate symmetry
ima3a = np.zeros((highX1, highY1))
ima3b = np.zeros((highX2, highY2))
try:
assert (np.max(self.vx) < highX1)
except AssertionError:
print('Constraint for max(vx) < highX1 does not hold!')
print(np.max(self.vx), highX1)
try:
assert (np.max(self.vx) < highX2)
except AssertionError as e:
print('Constraint for max(vx) < highX2 does not hold!')
print(np.max(self.vx), highX2)
#TODO write a better bound for the images dimensions
ima2a = np.zeros((highX1*2, 500), dtype=np.int)
ima2b = np.zeros((highX2*2, 500), dtype=np.int)
ima4a = np.zeros((highX1*2, 500), dtype=np.int)
ima4b = np.zeros((highX2*2, 500), dtype=np.int)
for i in range(n):
ima2a[int(self.vx[i]), int(self.vy[i])] = 1
ima2b[int(self.vx[i]), int(self.vy[i])] = 1
ima3a[int(np.round(vx1[i])), int(np.round(vy1[i]))] = 1
ima3b[int(np.round(vx2[i])), int(np.round(vy2[i]))] = 1
ima3a = dilate(ima3a)
ima3a = erode(ima3a)
for i in range(highX1):
for j in range(highY1):
ima4a[i, j] = ima2a[i, j] + ima3a[i, j]
ima3b = dilate(ima3b)
ima3b = erode(ima3b)
for i in range(highX2):
for j in range(highY2):
ima4b[i, j] = ima2b[i, j] + ima3b[i, j]
# compute symmetry index for high principal component
sa_one = 0
sa_two = 0
for i in range(highX1):
for j in range(highY1):
if ima4a[i, j] == 1:
sa_one += 1
if ima4a[i, j] == 2:
sa_two += 1
self.sym_high_pc = float(sa_one) / sa_two
# compute symmetry index for less principal component
sa_one = 0
sa_two = 0
for i in range(highX2):
for j in range(highY2):
if ima4b[i, j] == 1:
sa_one += 1
if ima4b[i, j] == 2:
sa_two += 1
self.sym_less_pc = float(sa_one) / sa_two
def _round(self, x):
f = np.vectorize(int)
return f(np.round(x))
def content_read(self, f1, f2, n):
sm = 0.0
x_max = float('-Inf')
x_min = float('Inf')
y_max = float('-Inf')
y_min = float('Inf')
for i in range(n):
if f1[i] > x_max:
x_max = f1[i]
if f1[i] < x_min:
x_min = f1[i]
if f2[i] > y_max:
y_max = f2[i]
if f2[i] < y_min:
y_min = f2[i]
self.ima[int(f1[i]), int(f2[i])] = 1
self.img[int(f1[i]), int(f2[i])] = RED
cx = int(np.average(f1))
cy = int(np.average(f2))
# print(len(f1))
# print(len(f2))
# print('cx:', cx)
# print('cy:', cy)
# print('average:', np.average(self.img_gray))
self.ima[cx][cy] = int(self.img_gray[cx, cy])
# print('centro', int(self.img_gray[cx, cy]))
sm += self.ima[cx][cy] * np.log(self.ima[cx][cy])
self.vx.append(cx)
self.vy.append(cy)
self.wEnt = np.zeros(256, dtype=np.float)
sw2 = 0
# print('x: ', x_min, x_max, "y:", y_min, y_max)
#
# print('size vx:', len(self.vx))
k = 0
while k < len(self.vx):
lx = self.vx[k]
ly = self.vy[k]
if lx > int(x_min)-1 and lx < int(x_max)+1 and ly>int(y_min)-1 and ly < int(y_max)+1:
self.contour_and_entropy(lx + 1, ly)
self.contour_and_entropy(lx - 1, ly)
self.contour_and_entropy(lx, ly + 1)
self.contour_and_entropy(lx, ly - 1)
else:
sw2 = 1
k += 1
if sw2 == 0:
sm = 0.0
for i in range(256):
self.wEnt[i] = self.wEnt[i] / float(len(self.vx))
if self.wEnt[i] > 0:
sm = sm + self.wEnt[i] * np.log(self.wEnt[i])
self.obj_entropy = sm*sm/2.0
self.obj_size = len(self.vx)
else:
self.obj_entropy = 0.0
self.obj_size = 0.0
# print('entropy:', self.obj_entropy)
# print('size:', self.obj_size)
#
# print('height', self.height, 'width', self.width)
#
# print('pixel', self.img[65, 135]) # [240 254 243]
# print('gray: ', self.img_gray[65, 135]) # 249 here, 250 c++
# print('pixel', self.img[65, 136])
# print('gray: ', self.img_gray[65, 136])
#
#
# for i in range(self.height):
# print('aaa')
# for j in range(self.width):
# print(i, j, self.ima[i][j], end=', ')
#
# print(self.ima.shape)
def contour_and_entropy(self, i, j):
if self.ima[i, j] == 0:
self.vx.append(i)
self.vy.append(j)
self.ima[i, j] = self.img_gray[i, j]
self.wEnt[self.ima[i, j]] = self.wEnt[self.ima[i, j]] + 1
def generate_similarity_classifier_fowl():
kl = []
for i in range(1, 8):
filename = 'kl9596_%d.txt' % (i)
kl.append(read_csv(basedir, filename))
ml_w = read_csv(basedir, 'ml9596.txt')
acerto_medio = [25.637479,
26.916101,
25.665415,
27.480373,
25.245048,
25.213264,
25.585858]
pw = np.repeat(0.14285, 7)
return ClassificaGauss(kl, ml_w, acerto_medio, pw, fowl_species)
def generate_similarity_classifier_rabbit():
kl = []
for i in range(1, 12):
filename = 'klrabbit_%d.txt' % (i)
kl.append(read_csv(basedir, filename))
ml_w = read_csv(basedir, 'mlrabbit.txt')
acerto_medio = [19.302075,
27.880435,
22.425938,
21.380911,
23.390403,
22.006214,
17.269468,
20.519256,
22.786217,
19.94028,
21.71183]
pw = np.repeat(0.090909091, 11)
return ClassificaGauss(kl, ml_w, acerto_medio, pw, rabbit_species)
class ClassificaGauss(object):
def __init__(self, kl, ml_w, acerto_medio, pw, species):
self.kl = kl
self.ml_w = ml_w
self.acerto_medio = acerto_medio
self.pw = pw
self.species = species
def classify(self, x):
class_density_value = []
for i, kl_w in enumerate(self.kl):
class_density_value.append(self._find_class_density(x, kl_w, i + 1))
# for x in class_density_value:
# print('density:', x)
taxa_acerto = np.zeros(7, dtype=np.float)
for i in range(7):
if class_density_value[i] > 0.0:
taxa_acerto[i] = class_density_value[i] * 100. / self.acerto_medio[i]
classification = dict()
for i in reversed(np.argsort(taxa_acerto)):
if taxa_acerto[i] > 0.0:
classification[self.species[i]] = taxa_acerto[i]
return classification
def _find_class_density(self, x, kl_w, w_especie):
gx = .0
if not math.isclose(np.linalg.det(kl_w), 0): # det(kl_w) != 0.0
mx = np.zeros((1, 13), dtype=np.float)
mxt = np.zeros((13, 1), dtype=np.float)
for i in range(13):
mx[0, i] = x[i] - self.ml_w[w_especie-1, i]
# print('x_i:', x[i])
# print('subtraendo:', self.ml_w[w_especie-1, i])
# print('mx:', mx[0, i])
mxt[i, 0] = x[i] - self.ml_w[w_especie-1, i]
mx_inv = np.dot(mx, np.linalg.inv(kl_w))
mx_inv_mx = np.dot(mx_inv, mxt)
# print('mx shape', mx.shape)
# print('inv shape', np.linalg.inv(kl_w).shape)
# print('mx_inv', mx_inv.shape)
#
# print('x', x)
# print('mx', mx)
aa = mx_inv_mx[0, 0]
# print('aa:', aa)
bb = np.linalg.det(kl_w)
# print('det:', bb)
cc = np.log(bb)
# cc = round(cc, 4)
# print('log:', cc)
# print ('aa:', aa, ' bb:', bb, ' cc:', cc)
gx = (-0.5) * aa - (0.5 * cc)
if not math.isclose(self.pw[w_especie-1], 0.0):
gx = gx + np.log(self.pw[w_especie-1])
# print('gx: ', gx)
return gx
def generate_probability_classifier_rabbit():
fq = []
for i in range(1, 14):
filename = 'freqRabbit_%d.txt' % (i)
fq.append(np.array(read_csv(basedir, filename), dtype=np.float64))
per_w = read_csv(basedir, 'PerRabbit.txt')
vpriori = np.repeat(0.090909091, 11)
return ClassificaProb(fq, per_w, vpriori, rabbit_species)
def generate_probability_classifier_fowl():
fq = []
for i in range(1, 14):
filename = 'freqFowl_%d.txt' % (i)
fq.append(np.array(read_csv(basedir, filename), dtype=np.float64))
per_w = read_csv(basedir, 'PerFowl.txt')
vpriori = np.repeat(0.14285, 7)
return ClassificaProb(fq, per_w, vpriori, fowl_species)
class ClassificaProb:
def __init__(self, fq, per_w, vpriori, species):
self.fq = fq
self.per_w = per_w
self.vpriori = vpriori
self.species = species
self.nclass = len(species)
def classify(self, x):
self._find_posteriori(x, self.fq[0], self.fq[0], 0)
for i in range(1, 13):
self._find_posteriori(x, self.fq[i - 1], self.fq[i], i)
"""
The last frequency matrix stores the final classification results;
detection is done locating the percetil where the last feature is.
Then, the column of the percentil elected is the posterior probability
classification.
"""
wflag = False
taxa_acerto = np.zeros(self.nclass, dtype=np.float)
for wcont in range(self.nclass):
wper = self.per_w[12, wcont]
if not wflag and x[12] <= wper:
for i in range(self.nclass):
taxa_acerto[i] = self.fq[-1][i, wcont] * 100
wflag = True
if not wflag:
"""
If the element is greater than higher value, it is considered
in last percentil
"""
for i in range(self.nclass):
taxa_acerto[i] = self.fq[-1][i, -1] * 100
classification = dict()
for i in reversed(np.argsort(taxa_acerto)):
if taxa_acerto[i] > 1e-4:
classification[self.species[i]] = taxa_acerto[i]
return classification
def _find_posteriori(self, x, fq0, fq2, w_feature):
"""
Computes the posterior probability of the frequency matrix; this approach
is based on the Dirichlet density (frequency and percentiles matrices).
:param x: features vector
:param fq0: previous frequency matrix
:param fq2: current frequency matrix
:param w_feature:
"""
wsum = 0.0
aa = 0.0
wper = 0.0
# TODO: acho que é possível simplificar os for's
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
aa = fq2[i, j]
aa = aa * (2.0 / self.nclass)
fq2[i, j] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[i, j]
if wsum > 0.0:
aa = aa / wsum
fq2[i, j] = aa
if w_feature == 0:
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
aa = fq2[j, i]
aa = aa * self.vpriori[j]
fq2[j, i] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[j, i]
if wsum > 0.0:
aa = aa / wsum
fq2[j, i] = aa
else:
wflag = False
for wcont in range(self.nclass):
"""
if the number of features is greater than 0,
the correct percentil was found in the previous matrix
and the column-percentil will be the priori probability
"""
wper = self.per_w[w_feature-1, wcont]
if not wflag and x[w_feature-1] <= wper:
for i in range(self.nclass):
self.vpriori[i] = fq0[i, wcont]
wflag = True
if not wflag:
"""
if the element is greater than the highest value, it is
connsidered in last percentil
"""
for i in range(self.nclass):
self.vpriori[i] = fq0[i, self.nclass-1]
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
"""
frequency matrix is multiplied by the new priori
probability vector, computed from the previous matrix
"""
aa = fq2[j, i]
aa = aa * self.vpriori[j]
fq2[j, i] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[j, i]
if wsum > 0.0:
aa = aa / wsum
fq2[j, i] = aa
def read_csv(basedir, filename):
return np.array(pd.read_csv('%s/%s'%(basedir, filename), sep='\s+', header=None).as_matrix(), dtype=np.float64)
|
gpl-3.0
| 6,436,709,685,596,016,000
| 30.843836
| 115
| 0.462789
| false
| 3.116921
| false
| false
| false
|
xju2/hzzws
|
bsubs/submit_limit.py
|
1
|
2128
|
#!/usr/bin/env python
import os
import sys
import commands
workdir = os.getcwd()
submit = True
do_hist = False
do_scalar = True
exe = "/afs/cern.ch/user/x/xju/work/h4l/h4lcode/hzzws/bsubs/run_limit.sh"
cal_opt = "pvalue" # limit,pvalue
data_opt = "obs" #obs, exp
ws_name = "combWS"
mu_name = "xs"
#mG_low = 500
#mG_hi = 3500
mG_low = 200
mG_hi = 2000
mG_step = 10
#kappa_list = [0.00, 0.01, 0.06, 0.1]
kappa_list = [0.01]
n_mG = int((mG_hi - mG_low)/mG_step)
out_name = workdir
if do_hist:
input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_histfactory_EKHI_v6.root"
data_name = "combDatabinned"
out_name += "/histofactory/"
else:
#input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_2D_EKHI_200.root"
#input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_2D_EKHI_200_Mar23.root"
#out_name += "/functional_Mar23/"
input_ws = "/afs/cern.ch/user/x/xju/work/HWWStatisticsCode/workspaces/2015_Scalar_2D_v4.root"
data_name = "combData"
out_name += "/scalar_2d/"
goodjobs = []
badjobs = []
print out_name
for kappa in kappa_list:
for mG in range(mG_low, mG_hi+mG_step, mG_step):
if not do_scalar:
fix_vars = "mG:"+str(mG)+",GkM:"+str(kappa)
else:
width = mG*kappa
fix_vars = "mX:"+str(mG)+",wX:"+str(width)
run_cmd = exe+" "+input_ws+" "+ws_name+" "+mu_name+" "+\
data_name+" "+fix_vars+" "+cal_opt+" "+data_opt+" "+out_name
if not submit: print run_cmd
#-G u_zp -q 8nh for atlas sources
#-G ATLASWISC_GEN -q wisc for wisconsin sources
bsubs_cmd = "bsub -q wisc -R 'pool>4000' -C 0 -o" + \
workdir+ "/output "+ run_cmd
if submit:
status,output=commands.getstatusoutput(bsubs_cmd)
else:
continue
if status != 0:
print output
badjobs.append(0)
else:
goodjobs.append(1)
print "Good jobs: "+ str(len(goodjobs))+", "+str(len(badjobs))+" failed!"
|
mit
| -4,650,468,770,263,121,000
| 28.555556
| 121
| 0.598214
| false
| 2.557692
| false
| false
| false
|
g2p/systems
|
lib/systems/plugins/apache2/a2mod.py
|
1
|
1252
|
# vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from systems.dsl import resource, transition
from systems.registry import get_registry
from systems.typesystem import AttrType, ResourceType, EResource
from systems.util.templates import build_and_render
class A2Mod(EResource):
"""
An apache2 module.
"""
def expand_into(self, rg):
name = self.id_attrs['name']
enabled = self.wanted_attrs['enabled']
apache2 = rg.add_to_top(resource('AptitudePackage',
name='apache2.2-common',
))
cmd = '/usr/sbin/a2%smod' % { True: 'en', False: 'dis', }[enabled]
endis = rg.add_transition(transition('Command',
cmdline=[cmd, name, ]
),
depends=[apache2])
# We don't need to restart everytime, which takes some time.
reload = rg.add_transition(transition('Command',
cmdline=['/usr/sbin/invoke-rc.d', 'apache2', 'restart', ],
),
depends=[endis],
)
def register():
restype = ResourceType('A2Mod', A2Mod,
id_type={
'name': AttrType(
pytype=str),
},
state_type={
'enabled': AttrType(
default_value=True,
pytype=bool),
})
get_registry().resource_types.register(restype)
|
gpl-2.0
| -8,509,602,508,099,704,000
| 25.638298
| 70
| 0.626198
| false
| 3.468144
| false
| false
| false
|
amaurywalbert/twitter
|
n6/n6_alters_timeline_collect_v5.0_50egos.py
|
2
|
14123
|
# -*- coding: latin1 -*-
################################################################################################
#
#
import tweepy, datetime, sys, time, json, os, os.path, shutil, time, struct, random
import multi_oauth_n6
#Script que contém as chaves para autenticação do twitter
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 5.0 - Coletar timeline dos alters (seguidroes dos amigos dos egos) - Timeline dos Alters - identificar conjunto de retweeets da timeline.
##
##
## 5.1 - Uso do Tweepy para controlar as autenticações...
##
##
## SALVAR APENAS O NECESSÁRIO PARA ECONOMIZAR ESPAÇO EM DISCO. Coletar tweets completos ocupa muito espaço.
##
## OBS> Twitter bloqueou diversas contas por suspeita de spam... redobrar as atenções com os scripts criados.
##
## STATUS - Coletando - OK - Salvar arquivos BINÀRIOS!! contendo o id do retweet e id do autor a partir da lista de alters dos egos.
##
## STATUS - Refazer a coleta até que não tenha nenhuma mensagem de "Rate Limit Exceeded" - A cada mensagem há um usuário que ficou sem ser coletada
##
##
######################################################################################################################################################################
######################################################################################################################################################################
#
# Realiza autenticação da aplicação.
#
######################################################################################################################################################################
def autentication(auths):
global key
key += 1
if (key >= key_limit):
key = key_init
print
print("######################################################################")
print ("Autenticando usando chave número: "+str(key)+"/"+str(key_limit))
print("######################################################################\n")
time.sleep(wait)
api_key = tweepy.API(auths[key])
return (api_key)
######################################################################################################################################################################
#
# Converte formato data para armazenar em formato JSON
#
######################################################################################################################################################################
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
encoded_object = list(obj.timetuple())[0:6]
else:
encoded_object =json.JSONEncoder.default(self, obj)
return encoded_object
################################################################################################
# Imprime os arquivos binários com os ids dos amigos
################################################################################################
def read_arq_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
retweets_list = []
while f.tell() < tamanho:
buffer = f.read(timeline_struct.size)
retweet, user = timeline_struct.unpack(buffer)
status = {'retweet':retweet, 'user':user}
retweets_list.append(status)
return retweets_list
################################################################################################
# Imprime os arquivos binários com os ids dos amigos
################################################################################################
def read_arq_followers_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
followers_file = []
while f.tell() < tamanho:
buffer = f.read(followers_struct.size)
follower = followers_struct.unpack(buffer)
followers_file.append(follower[0])
return followers_file
######################################################################################################################################################################
#
# Tweepy - Realiza a busca e devolve a timeline de um usuário específico
#
######################################################################################################################################################################
def get_timeline(user): #Coleta da timeline
global key
global dictionary
global api
global i
timeline = []
try:
for page in tweepy.Cursor(api.user_timeline,id=user, count=200).pages(16): #Retorna os últimos 3200 tweets (16*20)
for tweet in page:
timeline.append(tweet)
return (timeline)
except tweepy.error.RateLimitError as e:
print("Limite de acesso à API excedido. User: "+str(user)+" - Autenticando novamente... "+str(e))
api = autentication(auths)
except tweepy.error.TweepError as e:
agora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M') # Recupera o instante atual na forma AnoMesDiaHoraMinuto
error = {}
with open(error_dir+"timeline_collect.err", "a+") as outfile: # Abre o arquivo para gravação no final do arquivo
if e.message:
error = {'user':user,'reason': e.message,'date':agora, 'key':key}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
else:
error = {'user':user,'reason': str(e),'date':agora, 'key':key}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
try:
if e.message[0]['code'] == 32 or e.message[0]['code'] == 215:
key = random.randint(key_init,key_limit)
api = autentication(auths)
if e.message[0]['code'] == 34: # Usuários não existentes
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".json", "w") as f: # Cria arquivo vazio
print ("Usuário inexistente. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
except Exception as e2:
print ("E2: "+str(e2))
try:
if e.message == 'Not authorized.': # Usuários não autorizados
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".json", "w") as f: # Cria arquivo vazio
print ("Usuário não autorizada. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
except Exception as e3:
print ("E3: "+str(e3))
######################################################################################################################################################################
#
# Obtem timeline dos usuários
#
######################################################################################################################################################################
def save_timeline(j,k,l,user):
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
# Dicionário - Tabela Hash contendo os usuários já coletados
global dictionary
#Chama a função e recebe como retorno a lista de tweets do usuário
t = 0 # Número de Tweets por usuário
timeline = get_timeline(user)
if timeline:
try:
with open(data_dir+str(user)+".dat", "w+b") as f:
for status in timeline:
if hasattr(status, 'retweeted_status'):
t+=1
f.write(timeline_struct.pack(status.retweeted_status.id, status.retweeted_status.user.id)) # Grava os ids dos retweet e o id do autor no arquivo binário do usuário
###
# retweets_list = read_arq_bin(data_dir+str(user)+".dat") # Função para converter o binário de volta em string em formato json.
# print retweets_list
####
dictionary[user] = user # Insere o usuário coletado na tabela em memória
i +=1
print ("Egos_Friend nº: "+str(j)+" - Alter("+str(k)+"/"+str(l)+"): "+str(user)+" coletado com sucesso. "+str(t)+" retweets. Total de usuários coletados: "+str(i))
except Exception as e:
agora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M') # Recupera o instante atual na forma AnoMesDiaHoraMinuto
with open(error_dir+"timeline_collect.err", "a+") as outfile: # Abre o arquivo para gravação no final do arquivo
if e.message:
error = {'user':user,'reason': e.message,'date':agora}
else:
error = {'user':user,'reason': str(e),'date':agora}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
if os.path.exists(data_dir+str(user)+".dat"):
os.remove(data_dir+str(user)+".dat")
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos favoritos do user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
j = 0 # Exibe o número ordinal do friend que está sendo usado para a coleta da timeline
k = 0 # Exibe o número ordinal do alter(follower) que está sendo usado para a coleta da timeline
for file in os.listdir(followers_collected_dir): # Verifica no diretorio.
j+=1
followers_list = read_arq_followers_bin(followers_collected_dir+file) # Lista de alters (friends) de um determinado ego
l = len(followers_list) # Exibe o tamanho/quantidade de seguidores do amigo do ego
for follower in followers_list:
k+=1
if not dictionary.has_key(follower):
save_timeline(j,k,l,follower) #Inicia função de busca
print
print("######################################################################")
print("Coleta finalizada!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
################################### DEFINIR SE É TESTE OU NÃO!!! ### ['auths_ok'] OU ['auths_test'] ################
oauth_keys = multi_oauth_n6.keys()
auths = oauth_keys['auths_ok']
################################### CONFIGURAR AS LINHAS A SEGUIR ####################################################
######################################################################################################################
key_init = 0 #################################################### Essas duas linhas atribuem as chaves para cada script
key_limit = len(auths) #################################################### Usa todas as chaves (tamanho da lista de chaves)
key = random.randint(key_init,key_limit) ###################################### Inicia o script a partir de uma chave aleatória do conjunto de chaves
followers_collected_dir = "/home/amaury/coleta/n5/alters_followers/50/bin/"#### Diretório contendo o conjunto de amigos dos ego já coletados. Cada arquivo contém o conjunto de seguidores dos amigos.
data_dir = "/home/amaury/coleta/n6/timeline_collect/alters/bin/" ############## Diretório para armazenamento dos arquivos
error_dir = "/home/amaury/coleta/n6/timeline_collect/alters/error/" ########### Diretório para armazenamento dos arquivos de erro
formato = 'll' ####################################################### Long para o código ('l') e depois o array de chars de X posições:
timeline_struct = struct.Struct(formato) ###################################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
formato_followers = 'l' ############################################## Long para o código ('l') e depois o array de chars de X posições:
followers_struct = struct.Struct(formato_followers) ########################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
wait = 60
dictionary = {} #################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados
######################################################################################################################
######################################################################################################################
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(error_dir):
os.makedirs(error_dir)
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print
print("######################################################################")
print ("Criando tabela hash...")
i = 0 #Conta quantos usuários já foram coletados (todos arquivos no diretório)
for file in os.listdir(data_dir):
user_id = file.split(".dat")
user_id = long(user_id[0])
dictionary[user_id] = user_id
i+=1
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
#Autenticação
api = autentication(auths)
#Executa o método main
if __name__ == "__main__": main()
|
gpl-3.0
| 3,922,565,552,435,736,600
| 50.547794
| 198
| 0.461341
| false
| 3.788165
| false
| false
| false
|
Kagee/youtube-dl
|
youtube_dl/extractor/teamcoco.py
|
1
|
3246
|
from __future__ import unicode_literals
import base64
import re
from .common import InfoExtractor
from ..utils import qualities
class TeamcocoIE(InfoExtractor):
_VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
_TESTS = [
{
'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
'md5': '3f7746aa0dc86de18df7539903d399ea',
'info_dict': {
'id': '80187',
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
'age_limit': 0,
}
}, {
'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
'info_dict': {
'id': '19705',
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
'age_limit': 0,
}
}
]
_VIDEO_ID_REGEXES = (
r'"eVar42"\s*:\s*(\d+)',
r'Ginger\.TeamCoco\.openInApp\("video",\s*"([^"]+)"',
r'"id_not"\s*:\s*(\d+)'
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_id = mobj.group('video_id')
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
embed_url = 'http://teamcoco.com/embed/v/%s' % video_id
embed = self._download_webpage(
embed_url, video_id, 'Downloading embed page')
encoded_data = self._search_regex(
r'"preload"\s*:\s*"([^"]+)"', embed, 'encoded data')
data = self._parse_json(
base64.b64decode(encoded_data.encode('ascii')).decode('utf-8'), video_id)
formats = []
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
for filed in data['files']:
m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
if m_format is not None:
format_id = m_format.group(1)
else:
format_id = filed['bitrate']
tbr = (
int(filed['bitrate'])
if filed['bitrate'].isdigit()
else None)
formats.append({
'url': filed['url'],
'ext': 'mp4',
'tbr': tbr,
'format_id': format_id,
'quality': get_quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': data['title'],
'thumbnail': data.get('thumb', {}).get('href'),
'description': data.get('teaser'),
'age_limit': self._family_friendly_search(webpage),
}
|
unlicense
| -4,121,415,404,715,677,700
| 34.282609
| 156
| 0.496303
| false
| 3.460554
| false
| false
| false
|
qpython-android/QPython3-core
|
pybuild/packages/pandas.py
|
1
|
1975
|
from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
class Pandas(Package):
source = GitSource('https://github.com/AIPYX/pandas.git', alias='pandas', branch='qpyc/0.23.4')
patches = [
#LocalPatch('0001-cross-compile'),
#LocalPatch('0001-add-ftello64'),
]
#use_gcc = True
def prepare(self):
#self.run(['cp', self.filesdir / 'site.cfg', './'])
pass
def build(self):
#self.system("find . -iname '*.pyx' -exec cython {} \;")
PY_BRANCH = os.getenv('PY_BRANCH')
PY_M_BRANCH = os.getenv('PY_M_BRANCH')
self.run([
'python',
'setup.py',
'cython',
])
self.run([
'python',
'setup.py',
'build_ext',
f'-I../../build/target/python/usr/include/python{PY_BRANCH}.{PY_M_BRANCH}'\
f':../../build/target/openblas/usr/include'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/include'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/include',
f'-L../../build/target/python/usr/lib'\
f':../../build/target/openblas/usr/lib:{self.env["ANDROID_NDK"]}/toolchains/renderscript/prebuilt/linux-x86_64/platform/arm'\
f':{self.env["ANDROID_NDK"]}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x/armv7-a'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a'\
f':{self.env["ANDROID_NDK"]}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/arm-linux-androideabi/lib/armv7-a',
f'-lpython{PY_BRANCH}.{PY_M_BRANCH},m,gnustl_static,atomic'
])
self.run([
'python',
'setup.py',
'build_py',
])
def refresh(self):
return True
|
apache-2.0
| 1,866,403,884,767,679,500
| 35.574074
| 145
| 0.56557
| false
| 3.07154
| false
| false
| false
|
cylc/cylc
|
cylc/flow/cycling/iso8601.py
|
1
|
34605
|
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Date-time cycling by point, interval, and sequence classes."""
from functools import lru_cache
import re
from metomi.isodatetime.data import Calendar, Duration, CALENDAR
from metomi.isodatetime.dumpers import TimePointDumper
from metomi.isodatetime.timezone import (
get_local_time_zone, get_local_time_zone_format, TimeZoneFormatMode)
from metomi.isodatetime.exceptions import IsodatetimeError
from cylc.flow.time_parser import CylcTimeParser
from cylc.flow.cycling import (
PointBase, IntervalBase, SequenceBase, ExclusionBase, cmp_to_rich, cmp
)
from cylc.flow.exceptions import (
CylcConfigError,
IntervalParsingError,
PointParsingError,
SequenceDegenerateError
)
from cylc.flow.wallclock import get_current_time_string
from cylc.flow.parsec.validate import IllegalValueError
CYCLER_TYPE_ISO8601 = "iso8601"
CYCLER_TYPE_SORT_KEY_ISO8601 = "b"
DATE_TIME_FORMAT = "CCYYMMDDThhmm"
EXPANDED_DATE_TIME_FORMAT = "+XCCYYMMDDThhmm"
NEW_DATE_TIME_REC = re.compile("T")
WARNING_PARSE_EXPANDED_YEAR_DIGITS = (
"(incompatible with [cylc]cycle point num expanded year digits = %s ?)")
class SuiteSpecifics:
"""Store suite-setup-specific constants and utilities here."""
ASSUMED_TIME_ZONE = None
DUMP_FORMAT = None
NUM_EXPANDED_YEAR_DIGITS = None
abbrev_util = None
interval_parser = None
point_parser = None
recurrence_parser = None
iso8601_parsers = None
class ISO8601Point(PointBase):
"""A single point in an ISO8601 date time sequence."""
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
__slots__ = ('value')
@classmethod
def from_nonstandard_string(cls, point_string):
"""Standardise a date-time string."""
return ISO8601Point(str(point_parse(point_string))).standardise()
def add(self, other):
"""Add an Interval to self."""
return ISO8601Point(self._iso_point_add(self.value, other.value))
def __cmp__(self, other):
# Compare other (point) to self.
if other is None:
return -1
if self.TYPE != other.TYPE:
return cmp(self.TYPE_SORT_KEY, other.TYPE_SORT_KEY)
if self.value == other.value:
return 0
return self._iso_point_cmp(self.value, other.value)
def standardise(self):
"""Reformat self.value into a standard representation."""
try:
self.value = str(point_parse(self.value))
except IsodatetimeError as exc:
if self.value.startswith("+") or self.value.startswith("-"):
message = WARNING_PARSE_EXPANDED_YEAR_DIGITS % (
SuiteSpecifics.NUM_EXPANDED_YEAR_DIGITS)
else:
message = str(exc)
raise PointParsingError(type(self), self.value, message)
return self
def sub(self, other):
"""Subtract a Point or Interval from self."""
if isinstance(other, ISO8601Point):
return ISO8601Interval(
self._iso_point_sub_point(self.value, other.value))
return ISO8601Point(
self._iso_point_sub_interval(self.value, other.value))
def __hash__(self):
return hash(self.value)
@staticmethod
@lru_cache(10000)
def _iso_point_add(point_string, interval_string):
"""Add the parsed point_string to the parsed interval_string."""
point = point_parse(point_string)
interval = interval_parse(interval_string)
return str(point + interval)
@staticmethod
@lru_cache(10000)
def _iso_point_cmp(point_string, other_point_string):
"""Compare the parsed point_string to the other one."""
point = point_parse(point_string)
other_point = point_parse(other_point_string)
return cmp(point, other_point)
@staticmethod
@lru_cache(10000)
def _iso_point_sub_interval(point_string, interval_string):
"""Return the parsed point_string minus the parsed interval_string."""
point = point_parse(point_string)
interval = interval_parse(interval_string)
return str(point - interval)
@staticmethod
@lru_cache(10000)
def _iso_point_sub_point(point_string, other_point_string):
"""Return the difference between the two parsed point strings."""
point = point_parse(point_string)
other_point = point_parse(other_point_string)
return str(point - other_point)
# TODO: replace __cmp__ infrastructure
cmp_to_rich(ISO8601Point)
class ISO8601Interval(IntervalBase):
"""The interval between points in an ISO8601 date time sequence."""
NULL_INTERVAL_STRING = "P0Y"
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
__slots__ = ('value')
@classmethod
def get_null(cls):
"""Return a null interval."""
return ISO8601Interval("P0Y")
@classmethod
def get_null_offset(cls):
"""Return a null offset."""
return ISO8601Interval("+P0Y")
def get_inferred_child(self, string):
"""Return an instance with 'string' amounts of my non-zero units."""
interval = interval_parse(self.value)
amount_per_unit = int(string)
unit_amounts = {}
for attribute in ["years", "months", "weeks", "days",
"hours", "minutes", "seconds"]:
if getattr(interval, attribute):
unit_amounts[attribute] = amount_per_unit
interval = Duration(**unit_amounts)
return ISO8601Interval(str(interval))
def standardise(self):
"""Format self.value into a standard representation."""
try:
self.value = str(interval_parse(self.value))
except IsodatetimeError:
raise IntervalParsingError(type(self), self.value)
return self
def add(self, other):
"""Add other to self (point or interval) c.f. ISO 8601."""
if isinstance(other, ISO8601Interval):
return ISO8601Interval(
self._iso_interval_add(self.value, other.value))
return other + self
def cmp_(self, other):
"""Compare another interval with this one."""
return self._iso_interval_cmp(self.value, other.value)
def sub(self, other):
"""Subtract another interval from this one."""
return ISO8601Interval(
self._iso_interval_sub(self.value, other.value))
def __abs__(self):
"""Return an interval with absolute values of this one's values."""
return ISO8601Interval(
self._iso_interval_abs(self.value, self.NULL_INTERVAL_STRING))
def __mul__(self, factor):
"""Return an interval with v * factor for v in this one's values."""
return ISO8601Interval(self._iso_interval_mul(self.value, factor))
def __bool__(self):
"""Return whether this interval has any non-null values."""
return self._iso_interval_nonzero(self.value)
@staticmethod
@lru_cache(10000)
def _iso_interval_abs(interval_string, other_interval_string):
"""Return the absolute (non-negative) value of an interval_string."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
if interval < other:
return str(interval * -1)
return interval_string
@staticmethod
@lru_cache(10000)
def _iso_interval_add(interval_string, other_interval_string):
"""Return one parsed interval_string plus the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return str(interval + other)
@staticmethod
@lru_cache(10000)
def _iso_interval_cmp(interval_string, other_interval_string):
"""Compare one parsed interval_string with the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return cmp(interval, other)
@staticmethod
@lru_cache(10000)
def _iso_interval_sub(interval_string, other_interval_string):
"""Subtract one parsed interval_string from the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return str(interval - other)
@staticmethod
@lru_cache(10000)
def _iso_interval_mul(interval_string, factor):
"""Multiply one parsed interval_string's values by factor."""
interval = interval_parse(interval_string)
return str(interval * factor)
@staticmethod
@lru_cache(10000)
def _iso_interval_nonzero(interval_string):
"""Return whether the parsed interval_string is a null interval."""
interval = interval_parse(interval_string)
return bool(interval)
class ISO8601Exclusions(ExclusionBase):
"""A collection of ISO8601Sequences that represent excluded sequences.
The object is able to determine if points are within any of its
grouped exclusion sequences. The Python ``in`` and ``not in`` operators
may be used on this object to determine if a point is in the collection
of exclusion sequences."""
def __init__(self, excl_points, start_point, end_point=None):
super(ISO8601Exclusions, self).__init__(start_point, end_point)
self.build_exclusions(excl_points)
def build_exclusions(self, excl_points):
for point in excl_points:
try:
# Try making an ISO8601Sequence
exclusion = ISO8601Sequence(point, self.exclusion_start_point,
self.exclusion_end_point)
self.exclusion_sequences.append(exclusion)
except (AttributeError, TypeError, ValueError):
# Try making an ISO8601Point
exclusion_point = ISO8601Point.from_nonstandard_string(
str(point)) if point else None
if exclusion_point not in self.exclusion_points:
self.exclusion_points.append(exclusion_point)
class ISO8601Sequence(SequenceBase):
"""A sequence of ISO8601 date time points separated by an interval.
Note that an ISO8601Sequence object (may) contain
ISO8601ExclusionSequences"""
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
_MAX_CACHED_POINTS = 100
__slots__ = ('dep_section', 'context_start_point', 'context_end_point',
'offset', '_cached_first_point_values',
'_cached_next_point_values', '_cached_valid_point_booleans',
'_cached_recent_valid_points', 'spec', 'abbrev_util',
'recurrence', 'exclusions', 'step', 'value')
@classmethod
def get_async_expr(cls, start_point=None):
"""Express a one-off sequence at the initial cycle point."""
if start_point is None:
return "R1"
return "R1/" + str(start_point)
def __init__(self, dep_section, context_start_point=None,
context_end_point=None):
SequenceBase.__init__(
self, dep_section, context_start_point, context_end_point)
self.dep_section = dep_section
if context_start_point is None:
self.context_start_point = context_start_point
elif isinstance(context_start_point, ISO8601Point):
self.context_start_point = context_start_point
else:
self.context_start_point = ISO8601Point.from_nonstandard_string(
context_start_point)
if context_end_point is None:
self.context_end_point = None
elif isinstance(context_end_point, ISO8601Point):
self.context_end_point = context_end_point
else:
self.context_end_point = ISO8601Point.from_nonstandard_string(
context_end_point)
self.offset = ISO8601Interval.get_null()
self._cached_first_point_values = {}
self._cached_next_point_values = {}
self._cached_valid_point_booleans = {}
self._cached_recent_valid_points = []
self.spec = dep_section
self.abbrev_util = CylcTimeParser(self.context_start_point,
self.context_end_point,
SuiteSpecifics.iso8601_parsers)
# Parse_recurrence returns an isodatetime TimeRecurrence object
# and a list of exclusion strings.
self.recurrence, excl_points = self.abbrev_util.parse_recurrence(
dep_section)
# Determine the exclusion start point and end point
try:
exclusion_start_point = ISO8601Point.from_nonstandard_string(
str(self.recurrence.start_point))
except IsodatetimeError:
exclusion_start_point = self.context_start_point
try:
exclusion_end_point = ISO8601Point.from_nonstandard_string(
str(self.recurrence.end_point))
except IsodatetimeError:
exclusion_end_point = self.context_end_point
self.exclusions = []
# Creating an exclusions object instead
if excl_points:
try:
self.exclusions = ISO8601Exclusions(
excl_points,
exclusion_start_point,
exclusion_end_point)
except AttributeError:
pass
self.step = ISO8601Interval(str(self.recurrence.duration))
self.value = str(self.recurrence)
# Concatenate the strings in exclusion list
if self.exclusions:
self.value += '!' + str(self.exclusions)
def get_interval(self):
"""Return the interval between points in this sequence."""
return self.step
def get_offset(self):
"""Deprecated: return the offset used for this sequence."""
return self.offset
def set_offset(self, i_offset):
"""Deprecated: alter state to i_offset the entire sequence."""
if self.recurrence.start_point is not None:
self.recurrence.start_point += interval_parse(str(i_offset))
if self.recurrence.end_point is not None:
self.recurrence.end_point += interval_parse(str(i_offset))
self._cached_first_point_values = {}
self._cached_next_point_values = {}
self._cached_valid_point_booleans = {}
self._cached_recent_valid_points = []
self.value = str(self.recurrence) + '!' + str(self.exclusions)
if self.exclusions:
self.value += '!' + str(self.exclusions)
@lru_cache(100)
def is_on_sequence(self, point):
"""Return True if point is on-sequence."""
# Iterate starting at recent valid points, for speed.
if self.exclusions and point in self.exclusions:
return False
for valid_point in reversed(self._cached_recent_valid_points):
if valid_point == point:
return True
if valid_point > point:
continue
next_point = valid_point
while next_point is not None and next_point < point:
next_point = self.get_next_point_on_sequence(next_point)
if next_point is None:
continue
if next_point == point:
return True
return self.recurrence.get_is_valid(point_parse(point.value))
def is_valid(self, point):
"""Return True if point is on-sequence and in-bounds."""
try:
return self._cached_valid_point_booleans[point.value]
except KeyError:
is_valid = self.is_on_sequence(point)
if (len(self._cached_valid_point_booleans) >
self._MAX_CACHED_POINTS):
self._cached_valid_point_booleans.popitem()
self._cached_valid_point_booleans[point.value] = is_valid
return is_valid
def get_prev_point(self, point):
"""Return the previous point < point, or None if out of bounds."""
# may be None if out of the recurrence bounds
res = None
prev_point = self.recurrence.get_prev(point_parse(point.value))
if prev_point:
res = ISO8601Point(str(prev_point))
if res == point:
raise SequenceDegenerateError(self.recurrence,
SuiteSpecifics.DUMP_FORMAT,
res, point)
# Check if res point is in the list of exclusions
# If so, check the previous point by recursion.
# Once you have found a point that is *not* in the exclusion
# list, you can return it.
if self.exclusions and res in self.exclusions:
return self.get_prev_point(res)
return res
def get_nearest_prev_point(self, point):
"""Return the largest point < some arbitrary point."""
if self.is_on_sequence(point):
return self.get_prev_point(point)
p_iso_point = point_parse(point.value)
prev_cycle_point = None
for recurrence_iso_point in self.recurrence:
# Is recurrence point greater than arbitrary point?
if recurrence_iso_point > p_iso_point:
break
recurrence_cycle_point = ISO8601Point(str(recurrence_iso_point))
if self.exclusions and recurrence_cycle_point in self.exclusions:
break
prev_cycle_point = recurrence_cycle_point
if prev_cycle_point is None:
return None
if prev_cycle_point == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
prev_cycle_point, point
)
# Check all exclusions
if self.exclusions and prev_cycle_point in self.exclusions:
return self.get_prev_point(prev_cycle_point)
return prev_cycle_point
def get_next_point(self, point):
"""Return the next point > p, or None if out of bounds."""
try:
return ISO8601Point(self._cached_next_point_values[point.value])
except KeyError:
pass
# Iterate starting at recent valid points, for speed.
for valid_point in reversed(self._cached_recent_valid_points):
if valid_point >= point:
continue
next_point = valid_point
excluded = False
while next_point is not None and (next_point <= point or excluded):
excluded = False
next_point = self.get_next_point_on_sequence(next_point)
if next_point and next_point in self.exclusions:
excluded = True
if next_point is not None:
self._check_and_cache_next_point(point, next_point)
return next_point
# Iterate starting at the beginning.
p_iso_point = point_parse(point.value)
for recurrence_iso_point in self.recurrence:
if recurrence_iso_point > p_iso_point:
next_point = ISO8601Point(str(recurrence_iso_point))
if next_point and next_point in self.exclusions:
continue
self._check_and_cache_next_point(point, next_point)
return next_point
return None
def _check_and_cache_next_point(self, point, next_point):
"""Verify and cache the get_next_point return info."""
# Verify next_point != point.
if next_point == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
next_point, point
)
# Cache the answer for point -> next_point.
if (len(self._cached_next_point_values) >
self._MAX_CACHED_POINTS):
self._cached_next_point_values.popitem()
self._cached_next_point_values[point.value] = next_point.value
# Cache next_point as a valid starting point for this recurrence.
if (len(self._cached_next_point_values) >
self._MAX_CACHED_POINTS):
self._cached_recent_valid_points.pop(0)
self._cached_recent_valid_points.append(next_point)
def get_next_point_on_sequence(self, point):
"""Return the on-sequence point > point assuming that point is
on-sequence, or None if out of bounds."""
result = None
next_point = self.recurrence.get_next(point_parse(point.value))
if next_point:
result = ISO8601Point(str(next_point))
if result == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
point, result
)
# Check it is in the exclusions list now
if result and result in self.exclusions:
return self.get_next_point_on_sequence(result)
return result
def get_first_point(self, point):
"""Return the first point >= to point, or None if out of bounds."""
try:
return ISO8601Point(self._cached_first_point_values[point.value])
except KeyError:
pass
p_iso_point = point_parse(point.value)
for recurrence_iso_point in self.recurrence:
if recurrence_iso_point >= p_iso_point:
first_point_value = str(recurrence_iso_point)
ret = ISO8601Point(first_point_value)
# Check multiple exclusions
if ret and ret in self.exclusions:
return self.get_next_point_on_sequence(ret)
if (len(self._cached_first_point_values) >
self._MAX_CACHED_POINTS):
self._cached_first_point_values.popitem()
self._cached_first_point_values[point.value] = (
first_point_value)
return ret
return None
def get_start_point(self):
"""Return the first point in this sequence, or None."""
for recurrence_iso_point in self.recurrence:
point = ISO8601Point(str(recurrence_iso_point))
# Check for multiple exclusions
if not self.exclusions or point not in self.exclusions:
return point
return None
def get_stop_point(self):
"""Return the last point in this sequence, or None if unbounded."""
if (self.recurrence.repetitions is not None or (
(self.recurrence.start_point is not None or
self.recurrence.min_point is not None) and
(self.recurrence.end_point is not None or
self.recurrence.max_point is not None))):
curr = None
prev = None
for recurrence_iso_point in self.recurrence:
prev = curr
curr = recurrence_iso_point
ret = ISO8601Point(str(curr))
if self.exclusions and ret in self.exclusions:
return ISO8601Point(str(prev))
return ret
return None
def __eq__(self, other):
# Return True if other (sequence) is equal to self.
if self.TYPE != other.TYPE:
return False
if self.value == other.value:
return True
return False
def __lt__(self, other):
return self.value < other.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def _get_old_anchor_step_recurrence(anchor, step, start_point):
"""Return a string representing an old-format recurrence translation."""
anchor_point = ISO8601Point.from_nonstandard_string(anchor)
# We may need to adjust the anchor downwards if it is ahead of the start.
if start_point is not None:
while anchor_point >= start_point + step:
anchor_point -= step
return str(anchor_point) + "/" + str(step)
def ingest_time(value, now=None):
"""Handle relative, truncated and prev/next cycle points.
Args:
value (str):
The string containing the prev()/next() stuff.
now (metomi.isodatetime.data.TimePoint):
A time point to use as the context for resolving the value.
"""
# remove extraneous whitespace from cycle point
value = value.replace(" ", "")
parser = SuiteSpecifics.point_parser
# integer point or old-style date-time cycle point format
is_integer = bool(re.match(r"\d+$", value))
# iso8601 expanded year
is_expanded = (
(value.startswith("-") or value.startswith("+"))
and "P" not in value
)
# prev() or next()
is_prev_next = "next" in value or "previous" in value
# offset from now (±P...)
is_offset = value.startswith("P") or value.startswith("-P")
if (
is_integer
or is_expanded
):
# we don't need to do any fancy processing
return value
# parse the timepoint if needed
if is_prev_next or is_offset:
# `value` isn't necessarily valid ISO8601
timepoint = None
is_truncated = None
else:
timepoint = parser.parse(value)
# missing date-time components off the front (e.g. 01T00)
is_truncated = timepoint.truncated
if not any((is_prev_next, is_offset, is_truncated)):
return value
if now is None:
now = parser.parse(get_current_time_string())
else:
now = parser.parse(now)
# correct for year in 'now' if year only,
# or year and time, specified in input
if re.search(r"\(-\d{2}[);T]", value):
now.year += 1
# correct for month in 'now' if year and month only,
# or year, month and time, specified in input
elif re.search(r"\(-\d{4}[);T]", value):
now.month_of_year += 1
# perform whatever transformation is required
offset = None
if is_prev_next:
cycle_point, offset = prev_next(value, now, parser)
elif is_offset:
cycle_point = now
offset = value
else: # is_truncated
cycle_point = now + timepoint
if offset is not None:
# add/subtract offset duration to/from chosen timepoint
duration_parser = SuiteSpecifics.interval_parser
offset = offset.replace('+', '')
offset = duration_parser.parse(offset)
cycle_point = cycle_point + offset
return str(cycle_point)
def prev_next(value, now, parser):
"""Handle prev() and next() syntax.
Args:
value (str):
The string containing the prev()/next() stuff.
now (metomi.isodatetime.data.TimePoint):
A time point to use as the context for resolving the value.
parser (metomi.isodatetime.parsers.TimePointParser):
A time point parser.
Returns
tuple - (cycle_point, offset)
"""
# are we in gregorian mode (or some other eccentric calendar
if CALENDAR.mode != Calendar.MODE_GREGORIAN:
raise CylcConfigError(
'prev()/next() syntax must be used with integer or gregorian'
f' cycling modes ("{value}")'
)
# break down cycle point into constituent parts.
direction, tmp = value.split("(")
tmp, offset = tmp.split(")")
if offset.strip() == '':
offset = None
else:
offset = offset.strip()
timepoints = tmp.split(";")
# for use with 'previous' below.
go_back = {
"minute_of_hour": "PT1M",
"hour_of_day": "PT1H",
"day_of_week": "P1D",
"day_of_month": "P1D",
"day_of_year": "P1D",
"week_of_year": "P1W",
"month_of_year": "P1M",
"year_of_decade": "P1Y",
"decade_of_century": "P10Y",
"year_of_century": "P1Y",
"century": "P100Y"}
for i_time, my_time in enumerate(timepoints):
parsed_point = parser.parse(my_time.strip())
timepoints[i_time] = parsed_point + now
if direction == 'previous':
# for 'previous' determine next largest unit,
# from go_back dict (defined outside 'for' loop), and
# subtract 1 of it from each timepoint
duration_parser = SuiteSpecifics.interval_parser
next_unit = parsed_point.get_smallest_missing_property_name()
timepoints[i_time] = (
timepoints[i_time] -
duration_parser.parse(go_back[next_unit]))
my_diff = [abs(my_time - now) for my_time in timepoints]
cycle_point = timepoints[my_diff.index(min(my_diff))]
# ensure truncated dates do not have
# time from 'now' included'
if 'T' not in value.split(')')[0]:
cycle_point.hour_of_day = 0
cycle_point.minute_of_hour = 0
cycle_point.second_of_minute = 0
# ensure month and day from 'now' are not included
# where they did not appear in the truncated datetime
# NOTE: this may break when the order of tick over
# for time point is reversed!!!
# https://github.com/metomi/isodatetime/pull/101
# case 1 - year only
if re.search(r"\(-\d{2}[);T]", value):
cycle_point.month_of_year = 1
cycle_point.day_of_month = 1
# case 2 - month only or year and month
elif re.search(r"\(-(-\d{2}|\d{4})[;T)]", value):
cycle_point.day_of_month = 1
return cycle_point, offset
def init_from_cfg(cfg):
"""Initialise global variables (yuk) based on the configuration."""
num_expanded_year_digits = cfg['cylc'][
'cycle point num expanded year digits']
time_zone = cfg['cylc']['cycle point time zone']
custom_dump_format = cfg['cylc']['cycle point format']
assume_utc = cfg['cylc']['UTC mode']
cycling_mode = cfg['scheduling']['cycling mode']
init(
num_expanded_year_digits=num_expanded_year_digits,
custom_dump_format=custom_dump_format,
time_zone=time_zone,
assume_utc=assume_utc,
cycling_mode=cycling_mode
)
def init(num_expanded_year_digits=0, custom_dump_format=None, time_zone=None,
assume_utc=False, cycling_mode=None):
"""Initialise suite-setup-specific information."""
if cycling_mode in Calendar.default().MODES:
Calendar.default().set_mode(cycling_mode)
if time_zone is None:
if assume_utc:
time_zone = "Z"
time_zone_hours_minutes = (0, 0)
else:
time_zone = get_local_time_zone_format(TimeZoneFormatMode.reduced)
time_zone_hours_minutes = get_local_time_zone()
else:
time_zone_hours_minutes = TimePointDumper().get_time_zone(time_zone)
SuiteSpecifics.ASSUMED_TIME_ZONE = time_zone_hours_minutes
SuiteSpecifics.NUM_EXPANDED_YEAR_DIGITS = num_expanded_year_digits
if custom_dump_format is None:
if num_expanded_year_digits > 0:
SuiteSpecifics.DUMP_FORMAT = EXPANDED_DATE_TIME_FORMAT + time_zone
else:
SuiteSpecifics.DUMP_FORMAT = DATE_TIME_FORMAT + time_zone
else:
SuiteSpecifics.DUMP_FORMAT = custom_dump_format
if "+X" not in custom_dump_format and num_expanded_year_digits:
raise IllegalValueError(
'cycle point format',
('cylc', 'cycle point format'),
SuiteSpecifics.DUMP_FORMAT
)
SuiteSpecifics.iso8601_parsers = CylcTimeParser.initiate_parsers(
dump_format=SuiteSpecifics.DUMP_FORMAT,
num_expanded_year_digits=num_expanded_year_digits,
assumed_time_zone=SuiteSpecifics.ASSUMED_TIME_ZONE
)
(SuiteSpecifics.point_parser,
SuiteSpecifics.interval_parser,
SuiteSpecifics.recurrence_parser) = SuiteSpecifics.iso8601_parsers
SuiteSpecifics.abbrev_util = CylcTimeParser(
None, None, SuiteSpecifics.iso8601_parsers
)
def get_dump_format():
"""Return cycle point string dump format."""
return SuiteSpecifics.DUMP_FORMAT
def get_point_relative(offset_string, base_point):
"""Create a point from offset_string applied to base_point."""
try:
interval = ISO8601Interval(str(interval_parse(offset_string)))
except IsodatetimeError:
return ISO8601Point(str(
SuiteSpecifics.abbrev_util.parse_timepoint(
offset_string, context_point=_point_parse(base_point.value))
))
else:
return base_point + interval
def interval_parse(interval_string):
"""Parse an interval_string into a proper Duration class."""
try:
return _interval_parse(interval_string).copy()
except Exception:
try:
return -1 * _interval_parse(
interval_string.replace("-", "", 1)).copy()
except Exception:
return _interval_parse(
interval_string.replace("+", "", 1)).copy()
def is_offset_absolute(offset_string):
"""Return True if offset_string is a point rather than an interval."""
try:
interval_parse(offset_string)
except Exception:
return True
else:
return False
@lru_cache(10000)
def _interval_parse(interval_string):
"""Parse an interval_string into a proper Duration object."""
return SuiteSpecifics.interval_parser.parse(interval_string)
def point_parse(point_string):
"""Parse a point_string into a proper TimePoint object."""
return _point_parse(point_string).copy()
@lru_cache(10000)
def _point_parse(point_string):
"""Parse a point_string into a proper TimePoint object."""
if "%" in SuiteSpecifics.DUMP_FORMAT:
# May be a custom not-quite ISO 8601 dump format.
try:
return SuiteSpecifics.point_parser.strptime(
point_string, SuiteSpecifics.DUMP_FORMAT)
except IsodatetimeError:
pass
# Attempt to parse it in ISO 8601 format.
return SuiteSpecifics.point_parser.parse(point_string)
|
gpl-3.0
| -7,118,441,874,249,039,000
| 35.970085
| 79
| 0.615102
| false
| 3.975643
| false
| false
| false
|
kfix/SleepProxyServer
|
sleepproxy/sniff.py
|
1
|
1143
|
from select import select
from threading import Event, Thread
from scapy.config import conf
from scapy.data import ETH_P_ALL, MTU
class SnifferThread(Thread):
"""A thread which runs a scapy sniff, and can be stopped"""
def __init__(self, prn, filterexp, iface):
Thread.__init__(self) #make this a greenlet?
self._prn = prn
self._filterexp = filterexp
self._iface = iface
self._stop_recd = Event()
def run(self):
self._sniff()
def stop(self):
self._stop_recd.set()
def _sniff(self):
sock = conf.L2listen(type=ETH_P_ALL, filter=self._filterexp, iface=self._iface)
while 1:
try:
sel = select([sock], [], [], 1)
if sock in sel[0]:
p = sock.recv(MTU)
if p is None:
break
self._prn(p)
if self._stop_recd.is_set():
print "Breaking out of sniffer thread %s" % (self, )
break
except KeyboardInterrupt:
break
sock.close()
|
bsd-2-clause
| 825,196,674,331,822,000
| 28.307692
| 87
| 0.504812
| false
| 3.822742
| false
| false
| false
|
midokura/python-midonetclient
|
src/midonetclient/application.py
|
1
|
18841
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tomoe Sugihara <tomoe@midokura.com>, Midokura
# @author: Ryu Ishimoto <ryu@midokura.com>, Midokura
# @author: Artem Dmytrenko <art@midokura.com>, Midokura
import os
from midonetclient import vendor_media_type
from midonetclient.ad_route import AdRoute
from midonetclient.bgp import Bgp
from midonetclient.bridge import Bridge
from midonetclient.chain import Chain
from midonetclient.host import Host
from midonetclient.port import Port
from midonetclient.port_group import PortGroup
from midonetclient.ip_addr_group import IpAddrGroup
from midonetclient.resource_base import ResourceBase
from midonetclient.route import Route
from midonetclient.router import Router
from midonetclient.rule import Rule
from midonetclient.tenant import Tenant
from midonetclient.tunnel_zone import TunnelZone
from midonetclient.write_version import WriteVersion
from midonetclient.system_state import SystemState
from midonetclient.host_version import HostVersion
from midonetclient.load_balancer import LoadBalancer
from midonetclient.vip import VIP
from midonetclient.pool import Pool
from midonetclient.pool_member import PoolMember
from midonetclient.health_monitor import HealthMonitor
from midonetclient.pool_statistic import PoolStatistic
from midonetclient.vtep import Vtep
class Application(ResourceBase):
media_type = vendor_media_type.APPLICATION_JSON_V5
ID_TOKEN = '{id}'
IP_ADDR_TOKEN = '{ipAddr}'
def __init__(self, uri, dto, auth):
super(Application, self).__init__(uri, dto, auth)
def get_ad_route_template(self):
return self.dto['adRouteTemplate']
def get_bgp_template(self):
return self.dto['bgpTemplate']
def get_bridge_template(self):
return self.dto['bridgeTemplate']
def get_chain_template(self):
return self.dto['chainTemplate']
def get_host_template(self):
return self.dto['hostTemplate']
def get_port_group_template(self):
return self.dto['portGroupTemplate']
def get_ip_addr_group_template(self):
return self.dto['ipAddrGroupTemplate']
def get_port_template(self):
return self.dto['portTemplate']
def get_route_template(self):
return self.dto['routeTemplate']
def get_router_template(self):
return self.dto['routerTemplate']
def get_rule_template(self):
return self.dto['ruleTemplate']
def get_tenant_template(self):
return self.dto['tenantTemplate']
def get_tunnel_zone_template(self):
return self.dto['tunnelZoneTemplate']
def get_vtep_template(self):
return self.dto['vtepTemplate']
def get_write_version_uri(self):
return self.dto['writeVersion']
def get_system_state_uri(self):
return self.dto['systemState']
def get_host_versions_uri(self):
return self.dto['hostVersions']
#L4LB resources
def get_load_balancers_uri(self):
return self.dto['loadBalancers']
def get_vips_uri(self):
return self.dto['vips']
def get_pools_uri(self):
return self.dto['pools']
def get_pool_members_uri(self):
return self.dto['poolMembers']
def get_ports_uri(self):
return self.dto['ports']
def get_health_monitors_uri(self):
return self.dto['healthMonitors']
def get_pool_statistics_uri(self):
return self.dto['poolStatistics']
def get_load_balancer_template(self):
return self.dto['loadBalancerTemplate']
def get_vip_template(self):
return self.dto['vipTemplate']
def get_pool_template(self):
return self.dto['poolTemplate']
def get_pool_member_template(self):
return self.dto['poolMemberTemplate']
def get_health_monitor_template(self):
return self.dto['healthMonitorTemplate']
def get_pool_statistic_template(self):
return self.dto['poolStatisticTemplate']
def get_tenants(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_TENANT_COLLECTION_JSON}
return self.get_children(self.dto['tenants'], query, headers, Tenant)
def get_routers(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_ROUTER_COLLECTION_JSON}
return self.get_children(self.dto['routers'], query, headers, Router)
def get_bridges(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_BRIDGE_COLLECTION_JSON}
return self.get_children(self.dto['bridges'], query, headers, Bridge)
def get_ports(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_PORT_COLLECTION_JSON}
return self.get_children(self.dto['ports'], query, headers, Port)
def get_port_groups(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
PortGroup)
def get_ip_addr_groups(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_IP_ADDR_GROUP_COLLECTION_JSON}
return self.get_children(self.dto['ipAddrGroups'], query, headers,
IpAddrGroup)
def get_chains(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_CHAIN_COLLECTION_JSON}
return self.get_children(self.dto['chains'], query, headers, Chain)
def get_tunnel_zones(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_TUNNEL_ZONE_COLLECTION_JSON}
return self.get_children(self.dto['tunnelZones'], query, headers,
TunnelZone)
def get_tunnel_zone(self, id_):
return self._get_resource_by_id(TunnelZone, self.dto['tunnelZones'],
self.get_tunnel_zone_template(), id_)
def get_hosts(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HOST_COLLECTION_JSON}
return self.get_children(self.dto['hosts'], query, headers, Host)
def delete_ad_route(self, id_):
return self._delete_resource_by_id(self.get_ad_route_template(), id_)
def get_ad_route(self, id_):
return self._get_resource_by_id(AdRoute, self.dto['adRoutes'],
self.get_ad_route_template(), id_)
def delete_bgp(self, id_):
return self._delete_resource_by_id(self.get_bgp_template(), id_)
def get_bgp(self, id_):
return self._get_resource_by_id(Bgp, None, self.get_bgp_template(),
id_)
def delete_bridge(self, id_):
return self._delete_resource_by_id(self.get_bridge_template(), id_)
def get_bridge(self, id_):
return self._get_resource_by_id(Bridge, self.dto['bridges'],
self.get_bridge_template(), id_)
def delete_chain(self, id_):
return self._delete_resource_by_id(self.get_chain_template(), id_)
def get_chain(self, id_):
return self._get_resource_by_id(Chain, self.dto['chains'],
self.get_chain_template(), id_)
def get_host(self, id_):
return self._get_resource_by_id(Host, self.dto['hosts'],
self.get_host_template(), id_)
def delete_port_group(self, id_):
return self._delete_resource_by_id(self.get_port_group_template(), id_)
def get_port_group(self, id_):
return self._get_resource_by_id(PortGroup, self.dto['portGroups'],
self.get_port_group_template(), id_)
def delete_ip_addr_group(self, id_):
return self._delete_resource_by_id(self.get_ip_addr_group_template(),
id_)
def get_ip_addr_group(self, id_):
return self._get_resource_by_id(IpAddrGroup, self.dto['ipAddrGroups'],
self.get_ip_addr_group_template(), id_)
def delete_port(self, id_):
return self._delete_resource_by_id(self.get_port_template(), id_)
def get_port(self, id_):
return self._get_resource_by_id(Port, None,
self.get_port_template(), id_)
def delete_route(self, id_):
return self._delete_resource_by_id(self.get_route_template(), id_)
def get_route(self, id_):
return self._get_resource_by_id(Route, None, self.get_route_template(),
id_)
def delete_router(self, id_):
return self._delete_resource_by_id(self.get_router_template(), id_)
def get_router(self, id_):
return self._get_resource_by_id(Router, self.dto['routers'],
self.get_router_template(), id_)
def delete_rule(self, id_):
return self._delete_resource_by_id(self.get_rule_template(), id_)
def get_rule(self, id_):
return self._get_resource_by_id(Rule, None, self.get_rule_template(),
id_)
def get_tenant(self, id_):
return self._get_resource_by_id(Tenant, self.dto['tenants'],
self.get_tenant_template(), id_)
def add_router(self):
return Router(self.dto['routers'], {}, self.auth)
def add_bridge(self):
return Bridge(self.dto['bridges'], {}, self.auth)
def add_port_group(self):
return PortGroup(self.dto['portGroups'], {}, self.auth)
def add_ip_addr_group(self):
return IpAddrGroup(self.dto['ipAddrGroups'], {}, self.auth)
def add_chain(self):
return Chain(self.dto['chains'], {}, self.auth)
def add_tunnel_zone(self):
return TunnelZone(self.dto['tunnelZones'], {}, self.auth)
def add_gre_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'gre'}, self.auth,
vendor_media_type.APPLICATION_GRE_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_GRE_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def add_vxlan_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'vxlan'}, self.auth,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def add_vtep_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'vtep'}, self.auth,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def get_write_version(self):
return self._get_resource(WriteVersion, None,
self.get_write_version_uri())
def get_system_state(self):
return self._get_resource(SystemState, None,
self.get_system_state_uri())
def get_host_versions(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HOST_VERSION_JSON}
return self.get_children(self.dto['hostVersions'],
query, headers, HostVersion)
def _create_uri_from_template(self, template, token, value):
return template.replace(token, value)
def _get_resource(self, clazz, create_uri, uri):
return clazz(create_uri, {'uri': uri}, self.auth).get(
headers={'Content-Type': clazz.media_type,
'Accept': clazz.media_type})
def _get_resource_by_id(self, clazz, create_uri,
template, id_):
uri = self._create_uri_from_template(template,
self.ID_TOKEN,
id_)
return self._get_resource(clazz, create_uri, uri)
def _get_resource_by_ip_addr(self, clazz, create_uri,
template, ip_address):
uri = self._create_uri_from_template(template,
self.IP_ADDR_TOKEN,
ip_address)
return self._get_resource(clazz, create_uri, uri)
def _delete_resource_by_id(self, template, id_):
uri = self._create_uri_from_template(template,
self.ID_TOKEN,
id_)
self.auth.do_request(uri, 'DELETE')
def _delete_resource_by_ip_addr(self, template, ip_address):
uri = self._create_uri_from_template(template,
self.IP_ADDR_TOKEN,
ip_address)
self.auth.do_request(uri, 'DELETE')
def _upload_resource(self, clazz, create_uri, uri, body, headers):
return clazz(create_uri, {'uri': uri}, self.auth)\
.upload(create_uri, body, headers=headers)
#L4LB resources
def get_load_balancers(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_LOAD_BALANCER_COLLECTION_JSON}
return self.get_children(self.dto['loadBalancers'],
query, headers, LoadBalancer)
def get_vips(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_VIP_COLLECTION_JSON}
return self.get_children(self.dto['vips'], query, headers, VIP)
def get_pools(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_COLLECTION_JSON}
return self.get_children(self.dto['pools'], query, headers, Pool)
def get_pool_members(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_MEMBER_COLLECTION_JSON}
return self.get_children(self.dto['poolMembers'],
query, headers, PoolMember)
def get_health_monitors(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HEALTH_MONITOR_COLLECTION_JSON}
return self.get_children(self.dto['healthMonitors'],
query, headers, HealthMonitor)
def get_pool_statistics(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_STATISTIC_COLLECTION_JSON}
return self.get_children(self.dto['poolStatistics'],
query, headers, PoolStatistic)
def get_load_balancer(self, id_):
return self._get_resource_by_id(LoadBalancer,
self.dto['loadBalancers'],
self.get_load_balancer_template(),
id_)
def get_vip(self, id_):
return self._get_resource_by_id(VIP,
self.dto['vips'],
self.get_vip_template(),
id_)
def get_pool(self, id_):
return self._get_resource_by_id(Pool,
self.dto['pools'],
self.get_pool_template(),
id_)
def get_pool_member(self, id_):
return self._get_resource_by_id(PoolMember,
self.dto['poolMembers'],
self.get_pool_member_template(),
id_)
def get_health_monitor(self, id_):
return self._get_resource_by_id(HealthMonitor,
self.dto['healthMonitors'],
self.get_health_monitor_template(),
id_)
def get_pool_statistic(self, id_):
return self._get_resource_by_id(PoolStatistic,
self.dto['poolStatistic'],
self.get_pool_statistic_template(),
id_)
def delete_load_balancer(self, id_):
return self._delete_resource_by_id(
self.get_load_balancer_template(), id_)
def delete_vip(self, id_):
return self._delete_resource_by_id(self.get_vip_template(), id_)
def delete_pool(self, id_):
return self._delete_resource_by_id(self.get_pool_template(), id_)
def delete_pool_member(self, id_):
return self._delete_resource_by_id(
self.get_pool_member_template(), id_)
def delete_health_monitor(self, id_):
return self._delete_resource_by_id(
self.get_health_monitor_template(), id_)
def delete_pool_statistic(self, id_):
return self._delete_resource_by_id(
self.get_pool_statistic_template(), id_)
def add_load_balancer(self):
return LoadBalancer(self.dto['loadBalancers'], {}, self.auth)
def add_vip(self):
return VIP(self.dto['vips'], {}, self.auth)
def add_pool(self):
return Pool(self.dto['pools'], {}, self.auth)
def add_pool_member(self):
return PoolMember(self.dto['poolMembers'], {}, self.auth)
def add_health_monitor(self):
return HealthMonitor(self.dto['healthMonitors'], {}, self.auth)
def add_pool_statistic(self):
return PoolStatistic(self.dto['poolStatistics'], {}, self.auth)
def get_vteps(self):
headers = {'Accept':
vendor_media_type.APPLICATION_VTEP_COLLECTION_JSON}
return self.get_children(self.dto['vteps'], {}, headers, Vtep)
def add_vtep(self):
return Vtep(self.dto['vteps'], {}, self.auth)
def get_vtep(self, mgmt_ip):
return self._get_resource_by_ip_addr(Vtep,
self.dto['vteps'],
self.get_vtep_template(),
mgmt_ip)
def delete_vtep(self, mgmt_ip):
return self._delete_resource_by_ip_addr(self.get_vtep_template(),
mgmt_ip)
|
apache-2.0
| -5,466,880,693,771,139,000
| 37.217039
| 80
| 0.581869
| false
| 3.918677
| false
| false
| false
|
sheppard/django-rest-framework
|
tests/test_generics.py
|
1
|
19410
|
from __future__ import unicode_literals
import pytest
from django.db import models
from django.shortcuts import get_object_or_404
from django.test import TestCase
from django.utils import six
from rest_framework import generics, renderers, serializers, status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from tests.models import (
BasicModel, ForeignKeySource, ForeignKeyTarget, RESTFrameworkModel
)
factory = APIRequestFactory()
# Models
class SlugBasedModel(RESTFrameworkModel):
text = models.CharField(max_length=100)
slug = models.SlugField(max_length=32)
# Model for regression test for #285
class Comment(RESTFrameworkModel):
email = models.EmailField()
content = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
# Serializers
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class ForeignKeySerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.ReadOnlyField()
class Meta:
model = SlugBasedModel
fields = ('text', 'slug')
# Views
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.exclude(text='filtered out')
serializer_class = BasicSerializer
class FKInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = ForeignKeySource.objects.all()
serializer_class = ForeignKeySerializer
class SlugBasedInstanceView(InstanceView):
"""
A model with a slug-field.
"""
queryset = SlugBasedModel.objects.all()
serializer_class = SlugSerializer
lookup_field = 'slug'
# Tests
class TestRootView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "PUT" not allowed.'})
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "DELETE" not allowed.'})
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_post_error_root_view(self):
"""
POST requests to ListCreateAPIView in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.post('/', data, HTTP_ACCEPT='text/html')
response = self.view(request).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
EXPECTED_QUERIES_FOR_PUT = 2
class TestInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "POST" not allowed.'})
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk='1').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(dict(response.data), {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.patch('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, six.b(''))
ids = [obj.id for obj in self.objects.all()]
self.assertEqual(ids, [2, 3])
def test_get_instance_view_incorrect_arg(self):
"""
GET requests with an incorrect pk type, should raise 404, not 500.
Regression test for #890.
"""
request = factory.get('/a')
with self.assertNumQueries(0):
response = self.view(request, pk='a').render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should return 404 if
an object does not currently exist.
"""
self.objects.get(id=1).delete()
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_to_filtered_out_instance(self):
"""
PUT requests to an URL of instance which is filtered out should not be
able to create new objects.
"""
data = {'text': 'foo'}
filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
request = factory.put('/{0}'.format(filtered_out_pk), data, format='json')
response = self.view(request, pk=filtered_out_pk).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_cannot_create_an_object(self):
"""
PATCH requests should not be able to create objects.
"""
data = {'text': 'foobar'}
request = factory.patch('/999', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=999).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(self.objects.filter(id=999).exists())
def test_put_error_instance_view(self):
"""
Incorrect PUT requests in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.put('/', data, HTTP_ACCEPT='text/html')
response = self.view(request, pk=1).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
class TestFKInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
t = ForeignKeyTarget(name=item)
t.save()
ForeignKeySource(name='source_' + item, target=t).save()
self.objects = ForeignKeySource.objects
self.data = [
{'id': obj.id, 'name': obj.name}
for obj in self.objects.all()
]
self.view = FKInstanceView.as_view()
class TestOverriddenGetObject(TestCase):
"""
Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the
queryset/model mechanism but instead overrides get_object()
"""
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView):
"""
Example detail view for override of get_object().
"""
serializer_class = BasicSerializer
def get_object(self):
pk = int(self.kwargs['pk'])
return get_object_or_404(BasicModel.objects.all(), id=pk)
self.view = OverriddenGetObjectView.as_view()
def test_overridden_get_object_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
# Regression test for #285
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
model = Comment
class TestCreateModelWithAutoNowAddField(TestCase):
def setUp(self):
self.objects = Comment.objects
self.view = CommentView.as_view()
def test_create_model_with_auto_now_add_field(self):
"""
Regression test for #285
https://github.com/tomchristie/django-rest-framework/issues/285
"""
data = {'email': 'foobar@example.com', 'content': 'foobar'}
request = factory.post('/', data, format='json')
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created = self.objects.get(id=1)
self.assertEqual(created.content, 'foobar')
# Test for particularly ugly regression with m2m in browsable API
class ClassB(models.Model):
name = models.CharField(max_length=255)
class ClassA(models.Model):
name = models.CharField(max_length=255)
children = models.ManyToManyField(ClassB, blank=True, null=True)
class ClassASerializer(serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True, queryset=ClassB.objects.all()
)
class Meta:
model = ClassA
class ExampleView(generics.ListCreateAPIView):
serializer_class = ClassASerializer
queryset = ClassA.objects.all()
class TestM2MBrowsableAPI(TestCase):
def test_m2m_in_browsable_api(self):
"""
Test for particularly ugly regression with m2m in browsable API
"""
request = factory.get('/', HTTP_ACCEPT='text/html')
view = ExampleView().as_view()
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class InclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='foo')
class ExclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='other')
class TwoFieldModel(models.Model):
field_a = models.CharField(max_length=100)
field_b = models.CharField(max_length=100)
class DynamicSerializerView(generics.ListCreateAPIView):
queryset = TwoFieldModel.objects.all()
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_serializer_class(self):
if self.request.method == 'POST':
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = ('field_b',)
else:
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
return DynamicSerializer
class TestFilterBackendAppliedToViews(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances to filter on.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
def test_get_root_view_filters_by_name_with_filter_backend(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
root_view = RootView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, [{'id': 1, 'text': 'foo'}])
def test_get_root_view_filters_out_all_models_with_exclusive_filter_backend(self):
"""
GET requests to ListCreateAPIView should return empty list when all models are filtered out.
"""
root_view = RootView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_get_instance_view_filters_out_name_with_filter_backend(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.
"""
instance_view = InstanceView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_get_instance_view_will_return_single_object_when_filter_does_not_exclude_it(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object when not excluded
"""
instance_view = InstanceView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foo'})
def test_dynamic_serializer_form_in_browsable_api(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
view = DynamicSerializerView.as_view()
request = factory.get('/')
response = view(request).render()
self.assertContains(response, 'field_b')
self.assertNotContains(response, 'field_a')
class TestGuardedQueryset(TestCase):
def test_guarded_queryset(self):
class QuerysetAccessError(generics.ListAPIView):
queryset = BasicModel.objects.all()
def get(self, request):
return Response(list(self.queryset))
view = QuerysetAccessError.as_view()
request = factory.get('/')
with pytest.raises(RuntimeError):
view(request).render()
|
bsd-2-clause
| 8,100,674,122,528,714,000
| 34.680147
| 109
| 0.631685
| false
| 4.076875
| true
| false
| false
|
wwitzel3/awx
|
awx/main/tests/unit/api/serializers/conftest.py
|
1
|
1711
|
import mock
import pytest
@pytest.fixture
def get_related_assert():
def fn(model_obj, related, resource_name, related_resource_name):
assert related_resource_name in related
assert related[related_resource_name] == '/api/v2/%s/%d/%s/' % (resource_name, model_obj.pk, related_resource_name)
return fn
@pytest.fixture
def get_related_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
related = serializer.get_related(model_obj)
return related
return fn
@pytest.fixture
def test_get_related(get_related_assert, get_related_mock_and_run):
def fn(serializer_class, model_obj, resource_name, related_resource_name):
related = get_related_mock_and_run(serializer_class, model_obj)
get_related_assert(model_obj, related, resource_name, related_resource_name)
return related
return fn
@pytest.fixture
def get_summary_fields_assert():
def fn(summary, summary_field_name):
assert summary_field_name in summary
return fn
@pytest.fixture
def get_summary_fields_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
serializer.show_capabilities = []
serializer.context['view'] = mock.Mock(kwargs={})
return serializer.get_summary_fields(model_obj)
return fn
@pytest.fixture
def test_get_summary_fields(get_summary_fields_mock_and_run, get_summary_fields_assert):
def fn(serializer_class, model_obj, summary_field_name):
summary = get_summary_fields_mock_and_run(serializer_class, model_obj)
get_summary_fields_assert(summary, summary_field_name)
return summary
return fn
|
apache-2.0
| 6,837,339,957,018,921,000
| 30.685185
| 123
| 0.6955
| false
| 3.648188
| true
| false
| false
|
BV-DR/foamBazar
|
ideFoam/inputFiles/transportProperties.py
|
1
|
1464
|
from ideFoam.inputFiles import ReadWriteFile, getFilePath
from PyFoam.Basics.DataStructures import DictProxy
from os.path import join
from ideFoam.inputFiles.compatOF import water, air
"""
Convenience class to simply write "TransportProperties"
"""
class TransportProperties(ReadWriteFile) :
"""
TransportProperties dictionnary
"""
@classmethod
def Build(cls , case, rhoWater = 1000 , nuWater = 1e-6, rhoAir = 1. , nuAir = 1.48e-05, sigma = 0.0 , application = "foamStar") :
res = cls( name = join(case, getFilePath("transportProperties") ), read = False )
res.header["class"] = "dictionary"
if application == "foamStar" : res["phases"] = ["water" , "air"]
dw = DictProxy()
dw["transportModel"] = "Newtonian"
dw["nu"] = "nu [0 2 -1 0 0 0 0] {}".format(nuWater)
dw["rho"] = "rho [1 -3 0 0 0 0 0] {}".format(rhoWater)
res['"'+water[application]+'"'] = dw
da = DictProxy()
da["transportModel"] = "Newtonian",
da["nu"] = "nu [0 2 -1 0 0 0 0] {}".format(nuAir)
da["rho"] = "rho [1 -3 0 0 0 0 0] {}".format(rhoAir)
res['"'+air[application]+'"'] = da
res[r"sigma"] = "sigma [1 0 -2 0 0 0 0] {}".format(sigma)
return res
if __name__ == "__main__" :
print(TransportProperties.Build("test" , application = "foamExtend"))
|
gpl-3.0
| 7,376,739,367,757,549,000
| 32.272727
| 134
| 0.551913
| false
| 3.428571
| false
| false
| false
|
caspartse/QQ-Groups-Spider
|
vendor/pyexcel/plugins/renderers/sqlalchemy.py
|
1
|
2226
|
"""
pyexcel.plugin.renderers.sqlalchemy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Export data into database datables
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel_io import save_data
import pyexcel_io.database.common as sql
from pyexcel._compact import OrderedDict
from pyexcel.renderer import DbRenderer
import pyexcel.internal.common as common
class SQLAlchemyRenderer(DbRenderer):
"""Import data into database"""
def render_sheet_to_stream(self, file_stream, sheet,
init=None, mapdict=None, **keywords):
headers = common.get_sheet_headers(sheet)
importer = sql.SQLTableImporter(file_stream[0])
adapter = sql.SQLTableImportAdapter(file_stream[1])
adapter.column_names = headers
adapter.row_initializer = init
adapter.column_name_mapping_dict = mapdict
importer.append(adapter)
save_data(importer, {adapter.get_name(): sheet.get_internal_array()},
file_type=self._file_type, **keywords)
def render_book_to_stream(self, file_stream, book,
inits=None, mapdicts=None, **keywords):
session, tables = file_stream
thebook = book
initializers = inits
colnames_array = common.get_book_headers_in_array(book)
if initializers is None:
initializers = [None] * len(tables)
if mapdicts is None:
mapdicts = [None] * len(tables)
scattered = zip(tables, colnames_array, mapdicts, initializers)
importer = sql.SQLTableImporter(session)
for each_table in scattered:
adapter = sql.SQLTableImportAdapter(each_table[0])
adapter.column_names = each_table[1]
adapter.column_name_mapping_dict = each_table[2]
adapter.row_initializer = each_table[3]
importer.append(adapter)
to_store = OrderedDict()
for sheet in thebook:
# due book.to_dict() brings in column_names
# which corrupts the data
to_store[sheet.name] = sheet.get_internal_array()
save_data(importer, to_store, file_type=self._file_type, **keywords)
|
mit
| -2,864,051,646,709,129,700
| 38.75
| 77
| 0.624888
| false
| 3.989247
| false
| false
| false
|
gkc1000/pyscf
|
pyscf/nao/m_gpaw_wfsx.py
|
1
|
1781
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os
import sys
import numpy as np
from numpy import zeros, empty
import warnings
class gpaw_wfsx_c():
def __init__(self, calc):
"""
Gathers the information on the available wavefunctions
(Kohn-Sham or Hartree-Fock orbitals)
"""
assert calc.wfs.mode.lower()=='lcao'
self.nreim = 1 # Only real part? because wavefunctions from gpaw are complex
self.nspin = calc.get_number_of_spins()
self.norbs = calc.setups.nao
self.nbands= calc.parameters['nbands']
self.k2xyz = calc.parameters['kpts']
self.nkpoints = len(self.k2xyz)
self.ksn2e = np.zeros((self.nkpoints, self.nspin, self.nbands))
for ik in range(self.nkpoints):
for spin in range(self.nspin):
self.ksn2e[ik, spin, :] = calc.wfs.collect_eigenvalues(spin,ik)
# Import wavefunctions from GPAW calculator
self.x = np.zeros((self.nkpoints, self.nspin, self.nbands, self.norbs, self.nreim))
for k in range(calc.wfs.kd.nibzkpts):
for s in range(calc.wfs.nspins):
C_nM = calc.wfs.collect_array('C_nM', k, s)
self.x[k, s, :, :, 0] = C_nM.real
|
apache-2.0
| -6,172,492,844,675,328,000
| 36.104167
| 87
| 0.691746
| false
| 3.310409
| false
| false
| false
|
jrversteegh/softsailor
|
deps/scipy-0.10.0b2/scipy/misc/__init__.py
|
2
|
1914
|
"""
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
Various utilities that don't have another home.
Note that the Python Imaging Library (PIL) is not a dependency
of SciPy and therefore the `pilutil` module is not available on
systems that don't have PIL installed.
.. autosummary::
:toctree: generated/
bytescale - Byte scales an array (image)
central_diff_weights - Weights for an n-point central m-th derivative
comb - Combinations of N things taken k at a time, "N choose k"
derivative -\tFind the n-th derivative of a function at a point
factorial - The factorial function, n! = special.gamma(n+1)
factorial2 - Double factorial, (n!)!
factorialk - (...((n!)!)!...)! where there are k '!'
fromimage - Return a copy of a PIL image as a numpy array
imfilter - Simple filtering of an image
imread - Read an image file from a filename
imresize - Resize an image
imrotate - Rotate an image counter-clockwise
imsave - Save an array to an image file
imshow - Simple showing of an image through an external viewer
info - Get help information for a function, class, or module
lena - Get classic image processing example image Lena
pade - Pade approximation to function as the ratio of two polynomials
radon -
toimage - Takes a numpy array and returns a PIL image
"""
__all__ = ['who', 'source', 'info', 'doccer']
import doccer
from common import *
from numpy import who, source, info as _info
import sys
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'):
return _info(object, maxwidth, output, toplevel)
info.__doc__ = _info.__doc__
del sys
try:
from pilutil import *
__all__ += pilutil.__all__
except ImportError:
pass
__all__ += common.__all__
from numpy.testing import Tester
test = Tester().test
|
gpl-3.0
| 5,945,414,943,550,416,000
| 30.9
| 72
| 0.673981
| false
| 3.812749
| false
| false
| false
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/edcast/api.py
|
1
|
3170
|
import logging
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from student.models import User
from enrollment import api
from course_modes.models import CourseMode
from openedx.core.lib.exceptions import CourseNotFoundError
from enrollment.errors import (
CourseEnrollmentError,
CourseModeNotFoundError,
CourseEnrollmentExistsError
)
from .authentication import JSONWebTokenAuthenticationQS
log = logging.getLogger("Edcast")
class EdcastUserEnroll(APIView):
authentication_classes = [JSONWebTokenAuthenticationQS]
def post(self, request):
data = request.json or request.data
user_details = data.get("user")
course_details = data.get("payment")
course_id = course_details.get("course_id")
email = user_details.get("email")
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
log.exception("Invalid user trying to enroll course")
error = {"error_message": "Invalid user"}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
try:
username = user.username
enrollment = api.get_enrollment(username, unicode(course_id))
if not enrollment:
api.add_enrollment(username, unicode(course_id), mode=CourseMode.HONOR, is_active=True)
return Response({})
except CourseNotFoundError:
log.exception("Invalid course id.")
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": u"No course '{course_id}' found for enrollment".format(course_id=course_id)
})
except CourseModeNotFoundError:
log.exception("Course mode not define for the {course_id}".format(course_id=course_id))
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"The [{mode}] course mode is expired or otherwise unavailable for course run [{course_id}]."
).format(mode=CourseMode.HONOR, course_id=course_id)
})
except CourseEnrollmentExistsError as error:
log.warning('An enrollment already exists for user [%s] in course run [%s].', username, course_id)
return Response(data={
"error": "User already enrolled in the {course_id}".format(course_id=course_id)
})
except CourseEnrollmentError:
log.exception("An error occurred while creating the new course enrollment for user "
"[%s] in course run [%s]", username, course_id)
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while creating the new course enrollment for user "
u"'{username}' in course '{course_id}'"
).format(username=username, course_id=course_id)
}
)
|
agpl-3.0
| 7,598,553,352,521,704,000
| 37.658537
| 117
| 0.603785
| false
| 4.738416
| false
| false
| false
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/basics/quantity.py
|
1
|
3116
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.basics.quantity Contains the Quantity class, representing floating point values with a certain
# uncertainty
# -----------------------------------------------------------------
# Import standard modules
import math
# -----------------------------------------------------------------
class Quantity(object):
"""
This class ...
"""
def __init__(self, value, error=None):
"""
The constructor ...
"""
# Set the attributes
self.value = value
self.error = error
# -----------------------------------------------------------------
@property
def relative_error(self):
"""
This function ...
:return:
"""
return self.error / self.value
# -----------------------------------------------------------------
def __add__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value + quantity.value
error = math.sqrt(math.pow(self.error, 2) + math.pow(quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __sub__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value - quantity.value
error = math.sqrt(math.pow(self.error, 2) + math.pow(quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __mul__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value * quantity.value
error = math.sqrt(math.pow(quantity.value * self.error, 2) + math.pow(self.value * quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __div__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value / quantity.value
error = math.fabs(value) * math.sqrt(math.pow(self.relative_error, 2) + math.pow(quantity.relative_error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __truediv__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value / quantity.value
error = math.fabs(value) * math.sqrt(math.pow(self.relative_error, 2) + math.pow(quantity.relative_error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
|
mit
| 8,029,924,372,909,666,000
| 25.853448
| 117
| 0.402889
| false
| 5
| false
| false
| false
|
cloudwatt/contrail-controller
|
src/opserver/opserver.py
|
1
|
79582
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from uveserver import UVEServer
import sys
import ConfigParser
import bottle
import json
import uuid
import argparse
import time
import redis
import base64
import socket
import struct
import errno
import copy
import datetime
import pycassa
from analytics_db import AnalyticsDb
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pysandesh.util import UTCTimestampUsec
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType,\
ConnectionStatus
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, COLLECTOR_DISCOVERY_SERVICE_NAME,\
ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME
from sandesh.viz.constants import _TABLES, _OBJECT_TABLES,\
_OBJECT_TABLE_SCHEMA, _OBJECT_TABLE_COLUMN_VALUES, \
_STAT_TABLES, STAT_OBJECTID_FIELD, STAT_VT_PREFIX, \
STAT_TIME_FIELD, STAT_TIMEBIN_FIELD, STAT_UUID_FIELD, \
STAT_SOURCE_FIELD, SOURCE, MODULE
from sandesh.viz.constants import *
from sandesh.analytics.ttypes import *
from sandesh.analytics.cpuinfo.ttypes import ProcessCpuInfo
from sandesh.discovery.ttypes import CollectorTrace
from opserver_util import OpServerUtils
from opserver_util import ServicePoller
from cpuinfo import CpuInfoData
from sandesh_req_impl import OpserverSandeshReqImpl
from sandesh.analytics_database.ttypes import *
from sandesh.analytics_database.constants import PurgeStatusString
from overlay_to_underlay_mapper import OverlayToUnderlayMapper, \
OverlayToUnderlayMapperError
_ERRORS = {
errno.EBADMSG: 400,
errno.ENOBUFS: 403,
errno.EINVAL: 404,
errno.ENOENT: 410,
errno.EIO: 500,
errno.EBUSY: 503
}
@bottle.error(400)
@bottle.error(403)
@bottle.error(404)
@bottle.error(410)
@bottle.error(500)
@bottle.error(503)
def opserver_error(err):
return err.body
#end opserver_error
class LinkObject(object):
def __init__(self, name, href):
self.name = name
self.href = href
# end __init__
# end class LinkObject
def obj_to_dict(obj):
# Non-null fields in object get converted to json fields
return dict((k, v) for k, v in obj.__dict__.iteritems())
# end obj_to_dict
def redis_query_start(host, port, redis_password, qid, inp):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
for key, value in inp.items():
redish.hset("QUERY:" + qid, key, json.dumps(value))
query_metadata = {}
query_metadata['enqueue_time'] = OpServerUtils.utc_timestamp_usec()
redish.hset("QUERY:" + qid, 'query_metadata', json.dumps(query_metadata))
redish.hset("QUERY:" + qid, 'enqueue_time',
OpServerUtils.utc_timestamp_usec())
redish.lpush("QUERYQ", qid)
res = redish.blpop("REPLY:" + qid, 10)
if res is None:
return None
# Put the status back on the queue for the use of the status URI
redish.lpush("REPLY:" + qid, res[1])
resp = json.loads(res[1])
return int(resp["progress"])
# end redis_query_start
def redis_query_status(host, port, redis_password, qid):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
resp = {"progress": 0}
chunks = []
# For now, the number of chunks will be always 1
res = redish.lrange("REPLY:" + qid, -1, -1)
if not res:
return None
chunk_resp = json.loads(res[0])
ttl = redish.ttl("REPLY:" + qid)
if int(ttl) != -1:
chunk_resp["ttl"] = int(ttl)
query_time = redish.hmget("QUERY:" + qid, ["start_time", "end_time"])
chunk_resp["start_time"] = query_time[0]
chunk_resp["end_time"] = query_time[1]
if chunk_resp["progress"] == 100:
chunk_resp["href"] = "/analytics/query/%s/chunk-final/%d" % (qid, 0)
chunks.append(chunk_resp)
resp["progress"] = chunk_resp["progress"]
resp["chunks"] = chunks
return resp
# end redis_query_status
def redis_query_chunk_iter(host, port, redis_password, qid, chunk_id):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
iters = 0
fin = False
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
redish.persist("RESULT:" + qid + ":" + str(iters))
elems = redish.lrange("RESULT:" + qid + ":" + str(iters), 0, -1)
yield elems
if elems == []:
fin = True
else:
redish.delete("RESULT:" + qid + ":" + str(iters), 0, -1)
iters += 1
return
# end redis_query_chunk_iter
def redis_query_chunk(host, port, redis_password, qid, chunk_id):
res_iter = redis_query_chunk_iter(host, port, redis_password, qid, chunk_id)
dli = u''
starter = True
fin = False
yield u'{"value": ['
outcount = 0
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
elems = res_iter.next()
fin = True
for elem in elems:
fin = False
outcount += 1
if starter:
dli += '\n' + elem
starter = False
else:
dli += ', ' + elem
if not fin:
yield dli + '\n'
dli = u''
if outcount == 0:
yield '\n' + u']}'
else:
yield u']}'
return
# end redis_query_chunk
def redis_query_result(host, port, redis_password, qid):
try:
status = redis_query_status(host, port, redis_password, qid)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] result : Connection Error' % (qid),
server_addrs = ['%s:%d' % (host, port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] result : Exception: %s' % (qid, str(e)),
server_addrs = ['%s:%d' % (host, port)])
self._logger.error("Exception: %s" % e)
yield bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if status is None:
yield bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id (or) query result purged from DB')
if status['progress'] == 100:
for chunk in status['chunks']:
chunk_id = int(chunk['href'].rsplit('/', 1)[1])
for gen in redis_query_chunk(host, port, redis_password, qid,
chunk_id):
yield gen
else:
yield {}
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
message = None,
status = ConnectionStatus.UP,
server_addrs = ['%s:%d' % (host, port)],
name = 'Query')
return
# end redis_query_result
def redis_query_result_dict(host, port, redis_password, qid):
stat = redis_query_status(host, port, redis_password, qid)
prg = int(stat["progress"])
res = []
if (prg < 0) or (prg == 100):
done = False
gen = redis_query_result(host, port, redis_password, qid)
result = u''
while not done:
try:
result += gen.next()
#import pdb; pdb.set_trace()
except StopIteration:
done = True
res = (json.loads(result))['value']
return prg, res
# end redis_query_result_dict
def redis_query_info(redish, qid):
query_data = {}
query_dict = redish.hgetall('QUERY:' + qid)
query_metadata = json.loads(query_dict['query_metadata'])
del query_dict['query_metadata']
query_data['query_id'] = qid
query_data['query'] = str(query_dict)
query_data['enqueue_time'] = query_metadata['enqueue_time']
return query_data
# end redis_query_info
class OpStateServer(object):
def __init__(self, logger, redis_password=None):
self._logger = logger
self._redis_list = []
self._redis_password= redis_password
# end __init__
def update_redis_list(self, redis_list):
self._redis_list = redis_list
# end update_redis_list
def redis_publish(self, msg_type, destination, msg):
# Get the sandesh encoded in XML format
sandesh = SandeshWriter.encode_sandesh(msg)
msg_encode = base64.b64encode(sandesh)
redis_msg = '{"type":"%s","destination":"%s","message":"%s"}' \
% (msg_type, destination, msg_encode)
# Publish message in the Redis bus
for redis_server in self._redis_list:
redis_inst = redis.StrictRedis(redis_server[0],
redis_server[1], db=0,
password=self._redis_password)
try:
redis_inst.publish('analytics', redis_msg)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'Connection Error',
server_addrs = ['%s:%d' % (redis_server[0], \
redis_server[1])])
self._logger.error('No Connection to Redis [%s:%d].'
'Failed to publish message.' \
% (redis_server[0], redis_server[1]))
return True
# end redis_publish
# end class OpStateServer
class OpServer(object):
"""
This class provides ReST API to get operational state of
Contrail VNS system.
The supported **GET** APIs are:
* ``/analytics/virtual-network/<name>``
* ``/analytics/virtual-machine/<name>``
* ``/analytics/vrouter/<name>``:
* ``/analytics/bgp-router/<name>``
* ``/analytics/bgp-peer/<name>``
* ``/analytics/xmpp-peer/<name>``
* ``/analytics/collector/<name>``
* ``/analytics/tables``:
* ``/analytics/table/<table>``:
* ``/analytics/table/<table>/schema``:
* ``/analytics/table/<table>/column-values``:
* ``/analytics/table/<table>/column-values/<column>``:
* ``/analytics/query/<queryId>``
* ``/analytics/query/<queryId>/chunk-final/<chunkId>``
* ``/analytics/send-tracebuffer/<source>/<module>/<name>``
* ``/analytics/operation/analytics-data-start-time``
The supported **POST** APIs are:
* ``/analytics/query``:
* ``/analytics/operation/database-purge``:
"""
def __new__(cls, *args, **kwargs):
obj = super(OpServer, cls).__new__(cls, *args, **kwargs)
bottle.route('/', 'GET', obj.homepage_http_get)
bottle.route('/analytics', 'GET', obj.analytics_http_get)
bottle.route('/analytics/uves', 'GET', obj.uves_http_get)
bottle.route('/analytics/alarms', 'GET', obj.alarms_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/service-chains', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/xmpp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/xmpp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/query', 'POST', obj.query_process)
bottle.route('/analytics/query/<queryId>', 'GET', obj.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', obj.query_chunk_get)
bottle.route('/analytics/queries', 'GET', obj.show_queries)
bottle.route('/analytics/tables', 'GET', obj.tables_process)
bottle.route('/analytics/operation/database-purge',
'POST', obj.process_purge_request)
bottle.route('/analytics/operation/analytics-data-start-time',
'GET', obj._get_analytics_data_start_time)
bottle.route('/analytics/table/<table>', 'GET', obj.table_process)
bottle.route(
'/analytics/table/<table>/schema', 'GET', obj.table_schema_process)
for i in range(0, len(_TABLES)):
if len(_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', obj.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', obj.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', obj.send_trace_buffer)
bottle.route('/documentation/<filename:path>', 'GET',
obj.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', obj.uve_http_post)
bottle.route(
'/analytics/alarms/' + uve + 's', 'GET', obj.alarm_list_http_get)
bottle.route(
'/analytics/alarms/' + uve + '/<name>', 'GET', obj.alarm_http_get)
bottle.route(
'/analytics/alarms/' + uve, 'POST', obj.alarm_http_post)
return obj
# end __new__
def disc_publish(self):
try:
import discoveryclient.client as client
except:
try:
# TODO: Try importing from the server. This should go away..
import discovery.client as client
except:
raise Exception('Could not get Discovery Client')
data = {
'ip-address': self._args.host_ip,
'port': self._args.rest_api_port,
}
self.disc = client.DiscoveryClient(
self._args.disc_server_ip,
self._args.disc_server_port,
ModuleNames[Module.OPSERVER])
self._logger.info("Disc Publish to %s : %d - %s"
% (self._args.disc_server_ip,
self._args.disc_server_port, str(data)))
self.disc.publish(ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME, data)
# end
def __init__(self):
self._args = None
self._parse_args()
self._homepage_links = []
self._homepage_links.append(
LinkObject('documentation', '/documentation/index.html'))
self._homepage_links.append(LinkObject('analytics', '/analytics'))
super(OpServer, self).__init__()
module = Module.OPSERVER
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
if self._args.worker_id:
self._instance_id = self._args.worker_id
else:
self._instance_id = INSTANCE_ID_DEFAULT
self._hostname = socket.gethostname()
if self._args.dup:
self._hostname += 'dup'
opserver_sandesh_req_impl = OpserverSandeshReqImpl(self)
sandesh_global.init_generator(self._moduleid, self._hostname,
self._node_type_name, self._instance_id,
self._args.collectors, 'opserver_context',
int(self._args.http_server_port),
['opserver.sandesh'])
sandesh_global.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
self._instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus)
# Trace buffer list
self.trace_buf = [
{'name':'DiscoveryMsg', 'size':1000}
]
# Create trace buffers
for buf in self.trace_buf:
sandesh_global.trace_buffer_create(name=buf['name'], size=buf['size'])
self._logger = sandesh_global._logger
self._get_common = self._http_get_common
self._put_common = self._http_put_common
self._delete_common = self._http_delete_common
self._post_common = self._http_post_common
self._collector_pool = None
self._state_server = OpStateServer(self._logger, self._args.redis_password)
self._uve_server = UVEServer(('127.0.0.1',
self._args.redis_server_port),
self._logger,
self._args.redis_password)
self._LEVEL_LIST = []
for k in SandeshLevel._VALUES_TO_NAMES:
if (k < SandeshLevel.UT_START):
d = {}
d[k] = SandeshLevel._VALUES_TO_NAMES[k]
self._LEVEL_LIST.append(d)
self._CATEGORY_MAP =\
dict((ModuleNames[k], [CategoryNames[ce] for ce in v])
for k, v in ModuleCategoryMap.iteritems())
self.disc = None
if self._args.disc_server_ip:
self.disc_publish()
else:
self.redis_uve_list = []
try:
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
for redis_uve in self._args.redis_uve_list:
redis_ip_port = redis_uve.split(':')
redis_ip_port = (redis_ip_port[0], int(redis_ip_port[1]))
self.redis_uve_list.append(redis_ip_port)
except Exception as e:
self._logger.error('Failed to parse redis_uve_list: %s' % e)
else:
self._state_server.update_redis_list(self.redis_uve_list)
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._analytics_links = ['uves', 'alarms', 'tables', 'queries']
self._VIRTUAL_TABLES = copy.deepcopy(_TABLES)
for t in _OBJECT_TABLES:
obj = query_table(
name=t, display_name=_OBJECT_TABLES[t].objtable_display_name,
schema=_OBJECT_TABLE_SCHEMA,
columnvalues=_OBJECT_TABLE_COLUMN_VALUES)
self._VIRTUAL_TABLES.append(obj)
for t in _STAT_TABLES:
stat_id = t.stat_type + "." + t.stat_attr
scols = []
keyln = stat_query_column(name=STAT_SOURCE_FIELD, datatype='string', index=True)
scols.append(keyln)
tln = stat_query_column(name=STAT_TIME_FIELD, datatype='int', index=False)
scols.append(tln)
tcln = stat_query_column(name="CLASS(" + STAT_TIME_FIELD + ")",
datatype='int', index=False)
scols.append(tcln)
teln = stat_query_column(name=STAT_TIMEBIN_FIELD, datatype='int', index=False)
scols.append(teln)
tecln = stat_query_column(name="CLASS(" + STAT_TIMEBIN_FIELD+ ")",
datatype='int', index=False)
scols.append(tecln)
uln = stat_query_column(name=STAT_UUID_FIELD, datatype='uuid', index=False)
scols.append(uln)
cln = stat_query_column(name="COUNT(" + t.stat_attr + ")",
datatype='int', index=False)
scols.append(cln)
isname = False
for aln in t.attributes:
if aln.name==STAT_OBJECTID_FIELD:
isname = True
scols.append(aln)
if aln.datatype in ['int','double']:
sln = stat_query_column(name= "SUM(" + aln.name + ")",
datatype=aln.datatype, index=False)
scols.append(sln)
scln = stat_query_column(name= "CLASS(" + aln.name + ")",
datatype=aln.datatype, index=False)
scols.append(scln)
if not isname:
keyln = stat_query_column(name=STAT_OBJECTID_FIELD, datatype='string', index=True)
scols.append(keyln)
sch = query_schema_type(type='STAT', columns=scols)
stt = query_table(
name = STAT_VT_PREFIX + "." + stat_id,
display_name = t.display_name,
schema = sch,
columnvalues = [STAT_OBJECTID_FIELD, SOURCE])
self._VIRTUAL_TABLES.append(stt)
self._analytics_db = AnalyticsDb(self._logger,
self._args.cassandra_server_list,
self._args.redis_query_port,
self._args.redis_password)
bottle.route('/', 'GET', self.homepage_http_get)
bottle.route('/analytics', 'GET', self.analytics_http_get)
bottle.route('/analytics/uves', 'GET', self.uves_http_get)
bottle.route('/analytics/alarms', 'GET', self.alarms_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-chains', 'GET', self.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', self.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', self.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', self.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', self.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/query', 'POST', self.query_process)
bottle.route(
'/analytics/query/<queryId>', 'GET', self.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', self.query_chunk_get)
bottle.route('/analytics/queries', 'GET', self.show_queries)
bottle.route('/analytics/tables', 'GET', self.tables_process)
bottle.route('/analytics/operation/database-purge',
'POST', self.process_purge_request)
bottle.route('/analytics/operation/analytics-data-start-time',
'GET', self._get_analytics_data_start_time)
bottle.route('/analytics/table/<table>', 'GET', self.table_process)
bottle.route('/analytics/table/<table>/schema',
'GET', self.table_schema_process)
for i in range(0, len(self._VIRTUAL_TABLES)):
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', self.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', self.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', self.send_trace_buffer)
bottle.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', self.uve_http_post)
bottle.route(
'/analytics/alarms/' + uve + 's', 'GET', self.alarm_list_http_get)
bottle.route(
'/analytics/alarms/' + uve + '/<name>', 'GET', self.alarm_http_get)
bottle.route(
'/analytics/alarms/' + uve, 'POST', self.alarm_http_post)
# end __init__
def _parse_args(self, args_str=' '.join(sys.argv[1:])):
'''
Eg. python opserver.py --host_ip 127.0.0.1
--redis_server_port 6379
--redis_query_port 6379
--redis_password
--collectors 127.0.0.1:8086
--cassandra_server_list 127.0.0.1:9160
--http_server_port 8090
--rest_api_port 8081
--rest_api_ip 0.0.0.0
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--worker_id 0
--redis_uve_list 127.0.0.1:6379
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'host_ip' : "127.0.0.1",
'collectors' : ['127.0.0.1:8086'],
'cassandra_server_list' : ['127.0.0.1:9160'],
'http_server_port' : 8090,
'rest_api_port' : 8081,
'rest_api_ip' : '0.0.0.0',
'log_local' : False,
'log_level' : 'SYS_DEBUG',
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'dup' : False,
'redis_uve_list' : ['127.0.0.1:6379']
}
redis_opts = {
'redis_server_port' : 6379,
'redis_query_port' : 6379,
'redis_password' : None,
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
defaults.update(redis_opts)
defaults.update(disc_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_query_port",
type=int,
help="Redis query port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--http_server_port",
type=int,
help="HTTP server port")
parser.add_argument("--rest_api_port",
type=int,
help="REST API port")
parser.add_argument("--rest_api_ip",
help="REST API IP address")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--disc_server_ip",
help="Discovery Server IP address")
parser.add_argument("--disc_server_port",
type=int,
help="Discovery Server port")
parser.add_argument("--dup", action="store_true",
help="Internal use")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument(
"--worker_id",
help="Worker Id")
parser.add_argument("--cassandra_server_list",
help="List of cassandra_server_ip in ip:port format",
nargs="+")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.cassandra_server_list) is str:
self._args.cassandra_server_list = self._args.cassandra_server_list.split()
# end _parse_args
def get_args(self):
return self._args
# end get_args
def get_http_server_port(self):
return int(self._args.http_server_port)
# end get_http_server_port
def get_uve_server(self):
return self._uve_server
# end get_uve_server
def homepage_http_get(self):
json_body = {}
json_links = []
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
for link in self._homepage_links:
json_links.append(
{'link': obj_to_dict(
LinkObject(link.name, base_url + link.href))})
json_body = \
{"href": base_url,
"links": json_links
}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
return bottle.static_file(
filename, root='/usr/share/doc/contrail-analytics-api/html')
# end documentation_http_get
def _http_get_common(self, request):
return (True, '')
# end _http_get_common
def _http_put_common(self, request, obj_dict):
return (True, '')
# end _http_put_common
def _http_delete_common(self, request, id):
return (True, '')
# end _http_delete_common
def _http_post_common(self, request, obj_dict):
return (True, '')
# end _http_post_common
@staticmethod
def _get_redis_query_ip_from_qid(qid):
try:
ip = qid.rsplit('-', 1)[1]
redis_ip = socket.inet_ntop(socket.AF_INET,
struct.pack('>I', int(ip, 16)))
except Exception as err:
return None
return redis_ip
# end _get_redis_query_ip_from_qid
def _query_status(self, request, qid):
resp = {}
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
resp = redis_query_status(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] status : Connection Error' % (qid),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] status : Exception %s' % (qid, str(e)),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
self._logger.error("Exception: %s" % e)
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if resp is None:
return bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id or Abandoned query id')
resp_header = {'Content-Type': 'application/json'}
resp_code = 200
self._logger.debug("query [%s] status: %s" % (qid, resp))
return bottle.HTTPResponse(
json.dumps(resp), resp_code, resp_header)
# end _query_status
def _query_chunk(self, request, qid, chunk_id):
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
done = False
gen = redis_query_chunk(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid, chunk_id=chunk_id)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query [%s] chunk #%d : Connection Error' % \
(qid, chunk_id),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query [%s] chunk #%d : Exception %s' % \
(qid, chunk_id, str(e)),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.ENOENT], 'Error: %s' % e)
else:
self._logger.info(
"Query [%s] chunk #%d read at time %d"
% (qid, chunk_id, time.time()))
# end _query_chunk
def _query(self, request):
reply = {}
try:
redis_query_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
qid = str(uuid.uuid1(redis_query_ip))
self._logger.info("Starting Query %s" % qid)
tabl = ""
for key, value in request.json.iteritems():
if key == "table":
tabl = value
self._logger.info("Table is " + tabl)
tabn = None
for i in range(0, len(self._VIRTUAL_TABLES)):
if self._VIRTUAL_TABLES[i].name == tabl:
tabn = i
if (tabn is not None):
tabtypes = {}
for cols in self._VIRTUAL_TABLES[tabn].schema.columns:
if cols.datatype in ['long', 'int']:
tabtypes[cols.name] = 'int'
elif cols.datatype in ['ipv4']:
tabtypes[cols.name] = 'ipv4'
else:
tabtypes[cols.name] = 'string'
self._logger.info(str(tabtypes))
if (tabn is None):
if not tabl.startswith("StatTable."):
reply = bottle.HTTPError(_ERRORS[errno.ENOENT],
'Table %s not found' % tabl)
yield reply
return
else:
self._logger.info("Schema not known for dynamic table %s" % tabl)
if tabl == OVERLAY_TO_UNDERLAY_FLOW_MAP:
overlay_to_underlay_map = OverlayToUnderlayMapper(
request.json, self._args.host_ip,
self._args.rest_api_port, self._logger)
try:
yield overlay_to_underlay_map.process_query()
except OverlayToUnderlayMapperError as e:
yield bottle.HTTPError(_ERRORS[errno.EIO], str(e))
return
prg = redis_query_start('127.0.0.1',
int(self._args.redis_query_port),
self._args.redis_password,
qid, request.json)
if prg is None:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Query Engine not responding' % qid,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error('QE Not Responding')
yield bottle.HTTPError(_ERRORS[errno.EBUSY],
'Query Engine is not responding')
return
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Connection Error' % (qid),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Exception: %s' % (qid, str(e)),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
redish = None
if prg < 0:
cod = -prg
self._logger.error(
"Query Failed. Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
else:
self._logger.info(
"Query Accepted at time %d , Progress %d"
% (time.time(), prg))
# In Async mode, we should return with "202 Accepted" here
# and also give back the status URI "/analytic/query/<qid>"
# OpServers's client will poll the status URI
if request.get_header('Expect') == '202-accepted' or\
request.get_header('Postman-Expect') == '202-accepted':
href = '/analytics/query/%s' % (qid)
resp_data = json.dumps({'href': href})
yield bottle.HTTPResponse(
resp_data, 202, {'Content-type': 'application/json'})
else:
for gen in self._sync_query(request, qid):
yield gen
# end _query
def _sync_query(self, request, qid):
# In Sync mode, Keep polling query status until final result is
# available
try:
self._logger.info("Polling %s for query result" % ("REPLY:" + qid))
prg = 0
done = False
while not done:
gevent.sleep(1)
resp = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
# We want to print progress only if it has changed
if int(resp["progress"]) == prg:
continue
self._logger.info(
"Query Progress is %s time %d" % (str(resp), time.time()))
prg = int(resp["progress"])
# Either there was an error, or the query is complete
if (prg < 0) or (prg == 100):
done = True
if prg < 0:
cod = -prg
self._logger.error("Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
return
# In Sync mode, its time to read the final result. Status is in
# "resp"
done = False
gen = redis_query_result(host='127.0.0.1',
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
'''
final_res = {}
prg, final_res['value'] =\
redis_query_result_dict(host=self._args.redis_server_ip,
port=int(self._args.redis_query_port),
qid=qid)
yield json.dumps(final_res)
'''
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Sync Query[%s] Connection Error' % qid,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Sync Query[%s] Exception: %s' % (qid, str(e)),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.UP,
message = None,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.info(
"Query Result available at time %d" % time.time())
return
# end _sync_query
def query_process(self):
self._post_common(bottle.request, None)
result = self._query(bottle.request)
return result
# end query_process
def query_status_get(self, queryId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_status(bottle.request, queryId)
# end query_status_get
def query_chunk_get(self, queryId, chunkId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_chunk(bottle.request, queryId, int(chunkId))
# end query_chunk_get
def show_queries(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
queries = {}
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=int(self._args.redis_query_port),
password=self._args.redis_password)
pending_queries = redish.lrange('QUERYQ', 0, -1)
pending_queries_info = []
for query_id in pending_queries:
query_data = redis_query_info(redish, query_id)
pending_queries_info.append(query_data)
queries['pending_queries'] = pending_queries_info
processing_queries = redish.lrange(
'ENGINE:' + socket.gethostname(), 0, -1)
processing_queries_info = []
abandoned_queries_info = []
error_queries_info = []
for query_id in processing_queries:
status = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=query_id)
query_data = redis_query_info(redish, query_id)
if status is None:
abandoned_queries_info.append(query_data)
elif status['progress'] < 0:
query_data['error_code'] = status['progress']
error_queries_info.append(query_data)
else:
query_data['progress'] = status['progress']
processing_queries_info.append(query_data)
queries['queries_being_processed'] = processing_queries_info
queries['abandoned_queries'] = abandoned_queries_info
queries['error_queries'] = error_queries_info
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Show queries : Connection Error',
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as err:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Show queries : Exception %s' % str(err),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception in show queries: %s" % str(err))
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % err)
else:
return json.dumps(queries)
# end show_queries
@staticmethod
def _get_tfilter(cfilt):
tfilter = {}
for tfilt in cfilt:
afilt = tfilt.split(':')
try:
attr_list = tfilter[afilt[0]]
except KeyError:
tfilter[afilt[0]] = set()
attr_list = tfilter[afilt[0]]
finally:
if len(afilt) > 1:
attr_list.add(afilt[1])
tfilter[afilt[0]] = attr_list
return tfilter
# end _get_tfilter
@staticmethod
def _uve_filter_set(req):
sfilter = None
mfilter = None
tfilter = None
kfilter = None
any_filter = False
if 'sfilt' in req.keys():
any_filter = True
sfilter = req.sfilt
if 'mfilt' in req.keys():
any_filter = True
mfilter = req.mfilt
if 'cfilt' in req.keys():
any_filter = True
infos = req.cfilt.split(',')
tfilter = OpServer._get_tfilter(infos)
if 'kfilt' in req.keys():
any_filter = True
kfilter = req.kfilt.split(',')
return any_filter, kfilter, sfilter, mfilter, tfilter
# end _uve_filter_set
@staticmethod
def _uve_http_post_filter_set(req):
try:
kfilter = req['kfilt']
if not isinstance(kfilter, list):
raise ValueError('Invalid kfilt')
except KeyError:
kfilter = ['*']
try:
sfilter = req['sfilt']
except KeyError:
sfilter = None
try:
mfilter = req['mfilt']
except KeyError:
mfilter = None
try:
cfilt = req['cfilt']
if not isinstance(cfilt, list):
raise ValueError('Invalid cfilt')
except KeyError:
tfilter = None
else:
tfilter = OpServer._get_tfilter(cfilt)
return True, kfilter, sfilter, mfilter, tfilter
# end _uve_http_post_filter_set
def _uve_alarm_http_post(self, is_alarm):
(ok, result) = self._post_common(bottle.request, None)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 1)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid table name')
else:
try:
req = bottle.request.json
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_http_post_filter_set(req)
except Exception as err:
yield bottle.HTTPError(_ERRORS[errno.EBADMSG], err)
bottle.response.set_header('Content-Type', 'application/json')
yield u'{"value": ['
first = True
for key in kfilter:
if key.find('*') != -1:
uve_name = uve_tbl + ':*'
for gen in self._uve_server.multi_uve_get(uve_name, True,
kfilter, sfilter,
mfilter, tfilter,
is_alarm):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
return
first = True
for key in kfilter:
uve_name = uve_tbl + ':' + key
rsp = self._uve_server.get_uve(uve_name, True, sfilter,
mfilter, tfilter,
is_alarm=is_alarm)
if rsp != {}:
data = {'name': key, 'value': rsp}
if first:
yield u'' + json.dumps(data)
first = False
else:
yield u', ' + json.dumps(data)
yield u']}'
# end _uve_alarm_http_post
def uve_http_post(self):
return self._uve_alarm_http_post(is_alarm=False)
# end uve_http_post
def alarm_http_post(self):
return self._uve_alarm_http_post(is_alarm=True)
# end alarm_http_post
def _uve_alarm_http_get(self, name, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 2)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield {}
else:
bottle.response.set_header('Content-Type', 'application/json')
uve_name = uve_tbl + ':' + name
req = bottle.request.query
flat = False
if 'flat' in req.keys():
flat = True
any_filter, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
if any_filter:
flat = True
uve_name = uve_tbl + ':' + name
if name.find('*') != -1:
flat = True
yield u'{"value": ['
first = True
for gen in self._uve_server.multi_uve_get(uve_name, flat,
kfilter, sfilter,
mfilter, tfilter,
is_alarm):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
else:
rsp = self._uve_server.get_uve(uve_name, flat, sfilter,
mfilter, tfilter,
is_alarm=is_alarm)
yield json.dumps(rsp)
# end _uve_alarm_http_get
def uve_http_get(self, name):
return self._uve_alarm_http_get(name, is_alarm=False)
# end uve_http_get
def alarm_http_get(self, name):
return self._uve_alarm_http_get(name, is_alarm=True)
# end alarm_http_get
def _uve_alarm_list_http_get(self, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
arg_line = bottle.request.url.rsplit('/', 1)[1]
uve_args = arg_line.split('?')
uve_type = uve_args[0][:-1]
if len(uve_args) != 1:
uve_filters = ''
filters = uve_args[1].split('&')
filters = \
[filt for filt in filters if filt[:len('kfilt')] != 'kfilt']
if len(filters):
uve_filters = '&'.join(filters)
else:
uve_filters = 'flat'
else:
uve_filters = 'flat'
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
return {}
else:
bottle.response.set_header('Content-Type', 'application/json')
req = bottle.request.query
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
uve_list = self._uve_server.get_uve_list(
uve_tbl, kfilter, sfilter, mfilter, tfilter, True, is_alarm)
uve_or_alarm = 'alarms' if is_alarm else 'uves'
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/%s/%s/' % (uve_or_alarm, uve_type)
uve_links =\
[obj_to_dict(LinkObject(uve,
base_url + uve + "?" + uve_filters))
for uve in uve_list]
return json.dumps(uve_links)
# end _uve_alarm_list_http_get
def uve_list_http_get(self):
return self._uve_alarm_list_http_get(is_alarm=False)
# end uve_list_http_get
def alarm_list_http_get(self):
return self._uve_alarm_list_http_get(is_alarm=True)
# end alarm_list_http_get
def analytics_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/'
analytics_links = [obj_to_dict(LinkObject(link, base_url + link))
for link in self._analytics_links]
return json.dumps(analytics_links)
# end analytics_http_get
def _uves_alarms_http_get(self, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
uve_or_alarm = 'alarms' if is_alarm else 'uves'
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/%s/' % (uve_or_alarm)
uvetype_links =\
[obj_to_dict(
LinkObject(uvetype + 's', base_url + uvetype + 's'))
for uvetype in UVE_MAP]
return json.dumps(uvetype_links)
# end _uves_alarms_http_get
def uves_http_get(self):
return self._uves_alarms_http_get(is_alarm=False)
# end uves_http_get
def alarms_http_get(self):
return self._uves_alarms_http_get(is_alarm=True)
# end alarms_http_get
def send_trace_buffer(self, source, module, instance_id, name):
response = {}
trace_req = SandeshTraceRequest(name)
if module not in ModuleIds:
response['status'] = 'fail'
response['error'] = 'Invalid module'
return json.dumps(response)
module_id = ModuleIds[module]
node_type = Module2NodeType[module_id]
node_type_name = NodeTypeNames[node_type]
if self._state_server.redis_publish(msg_type='send-tracebuffer',
destination=source + ':' +
node_type_name + ':' + module +
':' + instance_id,
msg=trace_req):
response['status'] = 'pass'
else:
response['status'] = 'fail'
response['error'] = 'No connection to Redis'
return json.dumps(response)
# end send_trace_buffer
def tables_process(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
link = LinkObject(self._VIRTUAL_TABLES[
i].name, base_url + self._VIRTUAL_TABLES[i].name)
tbl_info = obj_to_dict(link)
tbl_info['type'] = self._VIRTUAL_TABLES[i].schema.type
if (self._VIRTUAL_TABLES[i].display_name is not None):
tbl_info['display_name'] =\
self._VIRTUAL_TABLES[i].display_name
json_links.append(tbl_info)
return json.dumps(json_links)
# end tables_process
def process_purge_request(self):
self._post_common(bottle.request, None)
if ("application/json" not in bottle.request.headers['Content-Type']):
self._logger.error('Content-type is not JSON')
response = {
'status': 'failed', 'reason': 'Content-type is not JSON'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
analytics_start_time = self._analytics_db._get_analytics_start_time()
if (analytics_start_time == None):
self._logger.info("Failed to get the analytics start time")
response = {'status': 'failed',
'reason': 'Failed to get the analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
purge_input = None
if ("purge_input" in bottle.request.json.keys()):
value = bottle.request.json["purge_input"]
if (type(value) is int):
if ((value <= 100) and (value > 0)):
current_time = UTCTimestampUsec()
purge_input = analytics_start_time + (float((value)*
(float(current_time) - float(analytics_start_time))))/100
else:
response = {'status': 'failed',
'reason': 'Valid % range is [1, 100]'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
elif (type(value) is unicode):
try:
purge_input = OpServerUtils.convert_to_utc_timestamp_usec(value)
except:
response = {'status': 'failed',
'reason': 'Valid time formats are: \'%Y %b %d %H:%M:%S.%f\', '
'\'now\', \'now-h/m/s\', \'-/h/m/s\' in purge_input'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'Valid purge_input format is % or time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'purge_input not specified'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
if (purge_input <= analytics_start_time):
response = {'status': 'failed',
'reason': 'purge input is less than analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
res = self._analytics_db.get_analytics_db_purge_status(
self._state_server._redis_list)
if (res == None):
purge_request_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
purge_id = str(uuid.uuid1(purge_request_ip))
resp = self._analytics_db.set_analytics_db_purge_status(purge_id,
purge_input)
if (resp == None):
gevent.spawn(self.db_purge_operation, purge_input, purge_id)
response = {'status': 'started', 'purge_id': purge_id}
return bottle.HTTPResponse(json.dumps(response), 200,
{'Content-type': 'application/json'})
elif (resp['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(resp), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
elif (res['status'] == 'running'):
return bottle.HTTPResponse(json.dumps(res), 200,
{'Content-type': 'application/json'})
elif (res['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(res), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
# end process_purge_request
def db_purge_operation(self, purge_input, purge_id):
self._logger.info("purge_id %s START Purging!" % str(purge_id))
purge_stat = DatabasePurgeStats()
purge_stat.request_time = UTCTimestampUsec()
purge_info = DatabasePurgeInfo()
self._analytics_db.number_of_purge_requests += 1
purge_info.number_of_purge_requests = \
self._analytics_db.number_of_purge_requests
total_rows_deleted = self._analytics_db.db_purge(purge_input, purge_id)
self._analytics_db.delete_db_purge_status()
end_time = UTCTimestampUsec()
duration = end_time - purge_stat.request_time
purge_stat.purge_id = purge_id
if (total_rows_deleted < 0):
purge_stat.purge_status = PurgeStatusString[PurgeStatus.FAILURE]
self._logger.error("purge_id %s purging Failed" % str(purge_id))
else:
purge_stat.purge_status = PurgeStatusString[PurgeStatus.SUCCESS]
self._logger.info("purge_id %s purging DONE" % str(purge_id))
purge_stat.rows_deleted = total_rows_deleted
purge_stat.duration = duration
purge_info.name = self._hostname
purge_info.stats = [purge_stat]
purge_data = DatabasePurge(data=purge_info)
purge_data.send()
#end db_purge_operation
def _get_analytics_data_start_time(self):
analytics_start_time = self._analytics_db._get_analytics_start_time()
response = {'analytics_data_start_time': analytics_start_time}
return bottle.HTTPResponse(
json.dumps(response), 200, {'Content-type': 'application/json'})
# end _get_analytics_data_start_time
def table_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/' + table + '/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
link = LinkObject('schema', base_url + 'schema')
json_links.append(obj_to_dict(link))
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
link = LinkObject(
'column-values', base_url + 'column-values')
json_links.append(obj_to_dict(link))
break
return json.dumps(json_links)
# end table_process
def table_schema_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
return json.dumps(self._VIRTUAL_TABLES[i].schema,
default=lambda obj: obj.__dict__)
return (json.dumps({}))
# end table_schema_process
def column_values_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/table/' + table + '/column-values/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
for col in self._VIRTUAL_TABLES[i].columnvalues:
link = LinkObject(col, base_url + col)
json_links.append(obj_to_dict(link))
break
return (json.dumps(json_links))
# end column_values_process
def generator_info(self, table, column):
if ((column == MODULE) or (column == SOURCE)):
sources = []
moduleids = []
for redis_uve in self.redis_uve_list:
redish = redis.StrictRedis(
db=1,
host=redis_uve[0],
port=redis_uve[1],
password=self._args.redis_password)
try:
for key in redish.smembers("NGENERATORS"):
source = key.split(':')[0]
module = key.split(':')[2]
if (sources.count(source) == 0):
sources.append(source)
if (moduleids.count(module) == 0):
moduleids.append(module)
except Exception as e:
self._logger.error('Exception: %s' % e)
if column == MODULE:
return moduleids
elif column == SOURCE:
return sources
elif (column == 'Category'):
return self._CATEGORY_MAP
elif (column == 'Level'):
return self._LEVEL_LIST
elif (column == STAT_OBJECTID_FIELD):
objtab = None
for t in _STAT_TABLES:
stat_table = STAT_VT_PREFIX + "." + \
t.stat_type + "." + t.stat_attr
if (table == stat_table):
objtab = t.obj_table
break
if (objtab != None) and (objtab != "None"):
#import pdb; pdb.set_trace()
return list(self._uve_server.get_uve_list(objtab,
None, None, None, None, False))
return []
# end generator_info
def column_process(self, table, column):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
if self._VIRTUAL_TABLES[i].columnvalues.count(column) > 0:
return (json.dumps(self.generator_info(table, column)))
return (json.dumps([]))
# end column_process
def start_uve_server(self):
self._uve_server.run()
#end start_uve_server
def start_webserver(self):
pipe_start_app = bottle.app()
try:
bottle.run(app=pipe_start_app, host=self._args.rest_api_ip,
port=self._args.rest_api_port, server='gevent')
except Exception as e:
self._logger.error("Exception: %s" % e)
sys.exit()
# end start_webserver
def cpu_info_logger(self):
opserver_cpu_info = CpuInfoData()
while True:
mod_cpu_info = ModuleCpuInfo()
mod_cpu_info.module_id = self._moduleid
mod_cpu_info.instance_id = self._instance_id
mod_cpu_info.cpu_info = opserver_cpu_info.get_cpu_info(
system=False)
mod_cpu_state = ModuleCpuState()
mod_cpu_state.name = self._hostname
# At some point, the following attributes will be deprecated in favor of cpu_info
mod_cpu_state.module_cpu_info = [mod_cpu_info]
mod_cpu_state.opserver_cpu_share = mod_cpu_info.cpu_info.cpu_share
mod_cpu_state.opserver_mem_virt =\
mod_cpu_info.cpu_info.meminfo.virt
opserver_cpu_state_trace = ModuleCpuStateTrace(data=mod_cpu_state)
opserver_cpu_state_trace.send()
aly_cpu_state = AnalyticsCpuState()
aly_cpu_state.name = self._hostname
aly_cpu_info = ProcessCpuInfo()
aly_cpu_info.module_id= self._moduleid
aly_cpu_info.inst_id = self._instance_id
aly_cpu_info.cpu_share = mod_cpu_info.cpu_info.cpu_share
aly_cpu_info.mem_virt = mod_cpu_info.cpu_info.meminfo.virt
aly_cpu_state.cpu_info = [aly_cpu_info]
aly_cpu_state_trace = AnalyticsCpuStateTrace(data=aly_cpu_state)
aly_cpu_state_trace.send()
gevent.sleep(60)
#end cpu_info_logger
def poll_collector_list(self):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
Opserver needs to know the list of all Analytics nodes (redis-uves).
Presently, Discovery server supports only pull mechanism to get the
Publisher list. Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
** Remove this code when the push mechanism to update the discovery clients
on the addition/deletion of Publisher nodes for a given service is
supported by the Discovery server.
'''
if self.disc:
while True:
self.redis_uve_list = []
try:
sub_obj = \
self.disc.subscribe(COLLECTOR_DISCOVERY_SERVICE_NAME, 0)
collectors = sub_obj.info
except Exception as e:
self._logger.error('Failed to get collector-list from ' \
'discovery server')
else:
if collectors:
disc_trace = CollectorTrace()
disc_trace.collectors = []
for collector in collectors:
self.redis_uve_list.append((collector['ip-address'],
self._args.redis_server_port))
disc_trace.collectors.append(collector['ip-address'])
disc_trace.trace_msg(name='DiscoveryMsg')
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._state_server.update_redis_list(self.redis_uve_list)
if self.redis_uve_list:
gevent.sleep(60)
else:
gevent.sleep(5)
# end poll_collector_list
def disc_cb(self, clist):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
Opserver needs to know the list of all Analytics nodes (redis-uves).
Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
'''
newlist = []
for elem in clist:
(ipaddr,port) = elem
newlist.append((ipaddr, self._args.redis_server_port))
self._uve_server.update_redis_uve_list(newlist)
self._state_server.update_redis_list(newlist)
def main():
opserver = OpServer()
gevs = [
gevent.spawn(opserver.start_webserver),
gevent.spawn(opserver.cpu_info_logger),
gevent.spawn(opserver.start_uve_server)]
if opserver.disc:
sp = ServicePoller(opserver._logger, CollectorTrace, opserver.disc, \
COLLECTOR_DISCOVERY_SERVICE_NAME, opserver.disc_cb)
sp.start()
gevs.append(sp)
gevent.joinall(gevs)
if __name__ == '__main__':
main()
|
apache-2.0
| -6,446,021,524,382,836,000
| 40.362786
| 98
| 0.526124
| false
| 3.963247
| false
| false
| false
|
borevitzlab/Gigavision
|
libs/IPCamera.py
|
1
|
21632
|
import datetime
import logging.config
import os
import re
import time
import numpy
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from xml.etree import ElementTree
from PIL import Image
from io import BytesIO
try:
logging.config.fileConfig("logging.ini")
except:
pass
exiv2_exists = False
try:
import pyexiv2
exiv2_exists = True
except Exception as e:
logging.debug("Couldnt import pyexiv2: {}".format(str(e)))
class IPCamera(object):
def __init__(self, identifier=None, config=None, **kwargs):
if not config:
config = dict()
self.config = config.copy()
self.return_parser = config.get("return_parser", "plaintext")
e = os.environ.get("RETURN_PARSER", None)
e = os.environ.get("CAMERA_RETURN_PARSER", e)
self.return_parser = e if e is not None else self.return_parser
self.logger = logging.getLogger(identifier)
self.identifier = identifier
self.camera_name = config.get("camera_name", identifier)
self.interval = int(config.get("interval", 300))
self.current_capture_time = datetime.datetime.now()
self._image = None
self._notified = []
format_str = config.get("format_url", "http://{HTTP_login}@{ip}{command}")
e = os.environ.get("FORMAT_URL", None)
e = os.environ.get("CAMERA_FORMAT_URL", e)
format_str = e if e is not None else format_str
self.auth_type = config.get("auth_type", "basic")
e = os.environ.get("AUTH_TYPE", None)
e = os.environ.get("CAMERA_AUTH_TYPE", e)
self.auth_type = e if e is not None else self.auth_type
self.auth_object = None
username = config.get("username", "admin")
e = os.environ.get("AUTH_USERNAME", None)
e = os.environ.get("CAMERA_AUTH_USERNAME", e)
username = e if e is not None else username
password = config.get("password", "admin")
e = os.environ.get("AUTH_PASSWORD", None)
e = os.environ.get("CAMERA_AUTH_PASSWORD", e)
username = e if e is not None else username
if format_str.startswith("http://{HTTP_login}@"):
format_str = format_str.replace("{HTTP_login}@", "")
self.auth_object = HTTPBasicAuth(username, password)
self.auth_object_digest = HTTPDigestAuth(username, password)
self.auth_object = self.auth_object_digest if self.auth_type == "digest" else self.auth_object
self._HTTP_login = config.get("HTTP_login", "{user}:{password}").format(
user=username,
password=password)
ip = config.get("ip", "192.168.1.101:81")
ip = os.environ.get("IP", ip)
ip = os.environ.get("CAMERA_IP", ip)
self._url = format_str.format(
ip=ip,
HTTP_login=self._HTTP_login,
command="{command}")
)
self._image_size = config.get("image_size", [1920, 1080])
self._image_size = os.environ.get("CAMERA_IMAGE_SIZE", self._image_size)
if type(self._image_size) is str:
self._image_size = re.split("[\W+|\||,|x|x|:]", self._image_size)
self._image_size = [ int(float(x)) for x in self._image_size ]
self.image_quality = config.get("image_quality", 100)
self.image_quality = os.environ.get("CAMERA_IMAGE_QUALITY", self.image_quality)
# no autofocus modes by default.
self._autofocus_modes = config.get("autofocus_modes", [])
self._hfov_list = config.get("horizontal_fov_list",
[71.664, 58.269, 47.670, 40.981, 33.177, 25.246, 18.126, 12.782, 9.217, 7.050,
5.82])
self._vfov_list = config.get("vertical_fov_list",
[39.469, 33.601, 26.508, 22.227, 16.750, 13.002, 10.324, 7.7136, 4.787, 3.729,
2.448])
self._hfov = self._vfov = None
self._zoom_list = config.get("zoom_list", [50, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1000])
self._focus_range = config.get("focus_range", [1, 99999])
# set commands from the rest of the config.
self.command_urls = config.get('urls', {})
self.return_keys = config.get("keys", {})
self.logger.info(self.status)
def _make_request(self, command_string, *args, **kwargs):
"""
Makes a generic request formatting the command string and applying the authentication.
:param command_string: command string like read stream raw
:type command_string: str
:param args:
:param kwargs:
:return:
"""
url = self._url.format(*args, command=command_string, **kwargs)
if "&" in url and "?" not in url:
url = url.replace("&", "?", 1)
response = None
try:
response = requests.get(url, timeout=60, auth=self.auth_object)
if response.status_code == 401:
self.logger.debug("Auth is not basic, trying digest")
response = requests.get(url, timeout=60, auth=self.auth_object_digest)
if response.status_code not in [200, 204]:
self.logger.error(
"[{}] - {}\n{}".format(str(response.status_code), str(response.reason), str(response.url)))
return
return response
except Exception as e:
self.logger.error("Some exception got raised {}".format(str(e)))
return
def _read_stream(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:type command_string: str
:param command_string: url to go to with parameters
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.text
def _read_stream_raw(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:param command_string: url to go to with parameters
:type command_string: str
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.content
def _get_cmd(self, cmd):
cmd_str = self.command_urls.get(cmd, None)
if not cmd_str and cmd_str not in self._notified:
print("No command available for \"{}\"".format(cmd))
self._notified.append(cmd_str)
return None, None
keys = self.return_keys.get(cmd, [])
if type(keys) not in (list, tuple):
keys = [keys]
return cmd_str, keys
@staticmethod
def get_value_from_xml(message_xml, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message_xml: the xml to searach in.
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message_xml):
return return_values
# apparently, there is an issue parsing when the ptz returns INVALID XML (WTF?)
# these seem to be the tags that get mutilated.
illegal = ['\n', '\t', '\r',
"<CPStatusMsg>", "</CPStatusMsg>", "<Text>",
"</Text>", "<Type>Info</Type>", "<Type>Info",
"Info</Type>", "</Type>", "<Type>"]
for ill in illegal:
message_xml = message_xml.replace(ill, "")
root_element = ElementTree.Element("invalidation_tag")
try:
root_element = ElementTree.fromstring(message_xml)
except Exception as e:
print(str(e))
print("Couldnt parse XML!!!")
print(message_xml)
return_values = dict
for key in args:
target_ele = root_element.find(key)
if target_ele is None:
continue
value = target_ele.text.replace(' ', '')
if value is None:
continue
types = [float, int, str]
for t in types:
try:
return_values[key] = t(value)
break
except ValueError:
pass
else:
print("Couldnt cast an xml element text attribute to str. What are you feeding the xml parser?")
return return_values
@staticmethod
def get_value_from_plaintext(message, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message:
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message):
return return_values
for line in message.split("\n"):
line = line.replace("= ", "=").replace(" =", "=").strip()
name, value = line.partition("=")[::2]
name, value = name.strip(), value.strip()
types = [float, int, str]
if name in args:
for t in types:
try:
v = t(value)
if str(v).lower() in ['yes', 'no', 'true', 'false', 'on', 'off']:
v = str(v).lower() in ['yes', 'true', 'on']
return_values[name] = v
break
except ValueError:
pass
else:
print("Couldnt cast an plaintext element text attribute to str. What are you feeding the parser?")
return return_values
def get_value_from_stream(self, stream, *keys):
"""
Gets a value from some text data (xml or plaintext = separated values)
returns a dict of "key":value pairs.
:param stream: text data to search for values
:type stream: str
:param keys:
:type keys: list
:return: dict of values
:rtype: dict
"""
if self.return_parser == 'plaintext':
return self.get_value_from_plaintext(stream, *keys)
elif self.return_parser == 'xml':
return self.get_value_from_xml(stream, *keys)
else:
return dict()
def encode_write_image(self, img: Image, fn: str) -> list:
"""
takes an image from PIL and writes it to disk as a tif and jpg
converts from rgb to bgr for cv2 so that the images save correctly
also tries to add exif data to the images
:param PIL.Image img: 3 dimensional image array, x,y,rgb
:param str fn: filename
:return: files successfully written.
:rtype: list(str)
"""
# output types must be valid!
fnp = os.path.splitext(fn)[0]
successes = list()
output_types = ["jpg", "tiff"]
e = os.environ.get("OUTPUT_TYPES", None)
if e is not None:
output_types = re.split("[\W+|\||,|:]", e)
for ext in output_types:
fn = "{}.{}".format(fnp, ext)
s = False
try:
if ext in ("tiff", "tif"):
if fn.endswith(".tiff"):
fn = fn[:-1]
img.save(fn, format="TIFF", compression='tiff_lzw')
if ext in ("jpeg", "jpg"):
img.save(fn, format="JPEG", quality=95, optimize=True, progressive=True, subsampling="4:4:4")
else:
img.save(fn)
s = True
except Exception as e:
self.logger.error("Couldnt write image")
self.logger.error(e)
# im = Image.fromarray(np.uint8(img))
# s = cv2.imwrite(fn, img)
if s:
successes.append(fn)
try:
# set exif data
if exiv2_exists:
meta = pyexiv2.ImageMetadata(fn)
meta.read()
for k, v in self.exif.items():
try:
meta[k] = v
except:
pass
meta.write()
except Exception as e:
self.logger.debug("Couldnt write the appropriate metadata: {}".format(str(e)))
return successes
def capture_image(self, filename=None) -> numpy.array:
"""
Captures an image with the IP camera, uses requests.get to acqire the image.
:param filename: filename without extension to capture to.
:return: list of filenames (of captured images) if filename was specified, otherwise a numpy array of the image.
:rtype: numpy.array or list
"""
st = time.time()
cmd, keys = self._get_cmd("get_image")
if "{width}" in cmd and "{height}" in cmd:
cmd = cmd.format(width=self._image_size[0], height=self.image_size[1])
if not cmd:
self.logger.error("No capture command, this is wrong...")
return self._image
url = self._url.format(command=cmd)
for x in range(10):
try:
# fast method
a = self._read_stream_raw(cmd)
# b = numpy.fromstring(a, numpy.uint8)
self._image = Image.open(BytesIO(a))
if filename:
rfiles = self.encode_write_image(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return rfiles
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
break
except Exception as e:
self.logger.error("Capture from network camera failed {}".format(str(e)))
time.sleep(0.2)
else:
self.logger.error("All capture attempts (10) for network camera failed.")
return self._image
# def set_fov_from_zoom(self):
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
@property
def image_quality(self) -> float:
"""
Image quality as a percentage.
:getter: cached.
:setter: to camera.
:rtype: float
"""
return self._image_quality
@image_quality.setter
def image_quality(self, value: float):
assert (1 <= value <= 100)
cmd, keys = self._get_cmd("get_image_quality")
if cmd:
self._read_stream(cmd.format(value))
@property
def image_size(self) -> list:
"""
Image resolution in pixels, tuple of (width, height)
:getter: from camera.
:setter: to camera.
:rtype: tuple
"""
cmd, keys = self._get_cmd("get_image_size")
if cmd:
stream = self._read_stream(cmd)
output = self.get_value_from_stream(stream, keys)
width,height = self._image_size
for k,v in output.items():
if "width" in k:
width = v
if "height" in k:
height = v
self._image_size = [width, height]
return self._image_size
@image_size.setter
def image_size(self, value):
assert type(value) in (list, tuple), "image size is not a list or tuple!"
assert len(value) == 2, "image size doesnt have 2 elements width,height are required"
value = list(value)
cmd, keys = self._get_cmd("set_image_size")
if cmd:
self._read_stream(cmd.format(width=value[0], height=value[1]))
self._image_size = value
@property
def focus_mode(self) -> str:
"""
TODO: this is broken, returns the dict of key: value not value
Focus Mode
When setting, the mode provided must be in 'focus_modes'
:getter: from camera.
:setter: to camera.
:rtype: list
"""
cmd, keys = self._get_cmd("get_focus_mode")
if not cmd:
return None
stream_output = self._read_stream(cmd)
return self.get_value_from_stream(stream_output, keys)['mode']
@focus_mode.setter
def focus_mode(self, mode: str):
assert (self._autofocus_modes is not None)
if str(mode).upper() not in [x.upper() for x in self._autofocus_modes]:
print("Focus mode not in list of supported focus modes, not setting.")
return
cmd, keys = self._get_cmd("set_focus_mode")
if cmd:
self._read_stream(cmd.format(mode=mode))
@property
def focus_position(self):
"""
Focal position as an absolute value.
:getter: from camera.
:setter: to camera.
:rtype: float
"""
cmd, keys = self._get_cmd("get_focus")
if not cmd:
return None
stream_output = self._read_stream(cmd)
result = self.get_value_from_stream(stream_output, keys)
return next(iter(result), float(99999))
@focus_position.setter
def focus_position(self, absolute_position):
self.logger.debug("Setting focus position to {}".format(absolute_position))
cmd, key = self._get_cmd("set_focus")
if not cmd:
assert (self._focus_range is not None and absolute_position is not None)
absolute_position = min(self._focus_range[1], max(self._focus_range[0], absolute_position))
assert (self._focus_range[0] <= absolute_position <= self._focus_range[1])
self._read_stream(cmd.format(focus=absolute_position))
def focus(self):
"""
focuses the camera by cycling it through its autofocus modes.
"""
self.logger.debug("Focusing...")
tempfocus = self.focus_mode
cmd, key = self._get_cmd("set_autofocus_mode")
if not cmd or len(self._autofocus_modes) < 1:
return
for mode in self._autofocus_modes:
self.focus_mode = mode
time.sleep(2)
self.focus_mode = tempfocus
self._read_stream(cmd.format(mode=self._autofocus_modes[0]))
time.sleep(2)
self.logger.debug("Focus complete.")
@property
def focus_range(self):
"""
Information about the focus of the camera
:return: focus type, focus max, focus min
:rtype: list [str, float, float]
"""
cmd, keys = self._get_cmd("get_focus_range")
if not cmd:
return None
stream_output = self._read_stream(cmd)
values = self.get_value_from_stream(stream_output, keys)
return values[2:0:-1]
@property
def hfov_list(self):
"""
List of horizontal FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._hfov_list
@hfov_list.setter
def hfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._hfov_list = list(value)
@property
def vfov_list(self):
"""
List of vertical FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._vfov_list
@vfov_list.setter
def vfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._vfov_list = list(value)
@property
def hfov(self):
"""
Horizontal FoV
:getter: calculated using cached zoom_position, zoom_list and hfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
return self._hfov
@hfov.setter
def hfov(self, value: float):
self._hfov = value
@property
def vfov(self):
"""
Vertical FoV
:getter: calculated using cached zoom_position, zoom_list and vfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
return self._vfov
@vfov.setter
def vfov(self, value: float):
self._vfov = value
@property
def status(self) -> str:
"""
Helper property for a string of the current zoom/focus status.
:return: informative string of zoom_pos zoom_range focus_pos focus_range
:rtype: str
"""
# fmt_string = "zoom_pos:\t{}\nzoom_range:\t{}"
fmt_string = "".join(("\nfocus_pos:\t{}\nfocus_range:\t{}"))
return fmt_string.format(self.focus_position, self.focus_range)
|
mit
| 2,764,234,713,448,507,000
| 34.873964
| 120
| 0.546644
| false
| 3.947445
| true
| false
| false
|
emijrp/pywikibot-core
|
tests/namespace_tests.py
|
1
|
11867
|
# -*- coding: utf-8 -*-
"""Tests for the Namespace class."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
from collections import Iterable
from pywikibot.site import Namespace
from tests.aspects import unittest, TestCase, AutoDeprecationTestCase
import sys
if sys.version_info[0] > 2:
basestring = (str, )
unicode = str
# Default namespaces which should work in any MW wiki
_base_builtin_ns = {
'Media': -2,
'Special': -1,
'': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Project': 4,
'Project talk': 5,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
}
image_builtin_ns = dict(_base_builtin_ns)
image_builtin_ns['Image'] = 6
image_builtin_ns['Image talk'] = 7
file_builtin_ns = dict(_base_builtin_ns)
file_builtin_ns['File'] = 6
file_builtin_ns['File talk'] = 7
builtin_ns = dict(list(image_builtin_ns.items()) + list(file_builtin_ns.items()))
class TestNamespaceObject(TestCase):
"""Test cases for Namespace class."""
net = False
def testNamespaceTypes(self):
"""Test cases for methods manipulating namespace names."""
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertIsInstance(ns, dict)
self.assertTrue(all(x in ns for x in range(0, 16)))
self.assertTrue(all(isinstance(key, int)
for key in ns))
self.assertTrue(all(isinstance(val, Iterable)
for val in ns.values()))
self.assertTrue(all(isinstance(name, basestring)
for val in ns.values()
for name in val))
# Use a namespace object as a dict key
self.assertEqual(ns[ns[6]], ns[6])
def testNamespaceConstructor(self):
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(y.id, 6)
self.assertEqual(y.custom_name, u'dummy')
self.assertEqual(y.canonical_name, u'File')
self.assertNotEqual(y.custom_name, u'Dummy')
self.assertNotEqual(y.canonical_name, u'file')
self.assertIn(u'Image', y.aliases)
self.assertIn(u'Immagine', y.aliases)
self.assertEqual(len(y), 4)
self.assertEqual(list(y), ['dummy', u'File', u'Image', u'Immagine'])
self.assertEqual(y.case, u'first-letter')
def testNamespaceNameCase(self):
"""Namespace names are always case-insensitive."""
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertIn(u'dummy', y)
self.assertIn(u'Dummy', y)
self.assertIn(u'file', y)
self.assertIn(u'File', y)
self.assertIn(u'image', y)
self.assertIn(u'Image', y)
self.assertIn(u'immagine', y)
self.assertIn(u'Immagine', y)
def testNamespaceToString(self):
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertEqual(str(ns[0]), ':')
self.assertEqual(str(ns[1]), 'Talk:')
self.assertEqual(str(ns[6]), ':File:')
self.assertEqual(unicode(ns[0]), u':')
self.assertEqual(unicode(ns[1]), u'Talk:')
self.assertEqual(unicode(ns[6]), u':File:')
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(str(y), ':File:')
if sys.version_info[0] <= 2:
self.assertEqual(unicode(y), u':ملف:')
self.assertEqual(y.canonical_prefix(), ':File:')
self.assertEqual(y.custom_prefix(), u':ملف:')
def testNamespaceCompare(self):
a = Namespace(id=0, canonical_name=u'')
self.assertEqual(a, 0)
self.assertEqual(a, '')
self.assertNotEqual(a, None)
x = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
z = Namespace(id=7, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
self.assertEqual(x, x)
self.assertEqual(x, y)
self.assertNotEqual(x, a)
self.assertNotEqual(x, z)
self.assertEqual(x, 6)
self.assertEqual(x, u'dummy')
self.assertEqual(x, u'Dummy')
self.assertEqual(x, u'file')
self.assertEqual(x, u'File')
self.assertEqual(x, u':File')
self.assertEqual(x, u':File:')
self.assertEqual(x, u'File:')
self.assertEqual(x, u'image')
self.assertEqual(x, u'Image')
self.assertEqual(y, u'ملف')
self.assertLess(a, x)
self.assertGreater(x, a)
self.assertGreater(z, x)
self.assertIn(6, [x, y, z])
self.assertNotIn(8, [x, y, z])
def testNamespaceNormalizeName(self):
"""Test Namespace.normalize_name."""
self.assertEqual(Namespace.normalize_name(u'File'), u'File')
self.assertEqual(Namespace.normalize_name(u':File'), u'File')
self.assertEqual(Namespace.normalize_name(u'File:'), u'File')
self.assertEqual(Namespace.normalize_name(u':File:'), u'File')
self.assertEqual(Namespace.normalize_name(u''), u'')
self.assertEqual(Namespace.normalize_name(u':'), False)
self.assertEqual(Namespace.normalize_name(u'::'), False)
self.assertEqual(Namespace.normalize_name(u':::'), False)
self.assertEqual(Namespace.normalize_name(u':File::'), False)
self.assertEqual(Namespace.normalize_name(u'::File:'), False)
self.assertEqual(Namespace.normalize_name(u'::File::'), False)
def test_repr(self):
"""Test Namespace.__repr__."""
a = Namespace(id=0, canonical_name=u'Foo')
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[])" \
% (unicode('Foo'), unicode('Foo'))
self.assertEqual(s, r)
a.defaultcontentmodel = 'bar'
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], defaultcontentmodel=%r)" \
% (unicode('Foo'), unicode('Foo'), unicode('bar'))
self.assertEqual(s, r)
a.case = 'upper'
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], case=%r, defaultcontentmodel=%r)" \
% (unicode('Foo'), unicode('Foo'), unicode('upper'), unicode('bar'))
self.assertEqual(s, r)
b = eval(repr(a))
self.assertEqual(a, b)
class TestNamespaceDictDeprecated(AutoDeprecationTestCase):
"""Test static/classmethods in Namespace replaced by NamespacesDict."""
net = False
def test_resolve(self):
"""Test Namespace.resolve."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
main_ns = namespaces[0]
file_ns = namespaces[6]
special_ns = namespaces[-1]
self.assertEqual(Namespace.resolve([6]), [file_ns])
self.assertEqual(Namespace.resolve(['File']), [file_ns])
self.assertEqual(Namespace.resolve(['6']), [file_ns])
self.assertEqual(Namespace.resolve([file_ns]), [file_ns])
self.assertEqual(Namespace.resolve([file_ns, special_ns]),
[file_ns, special_ns])
self.assertEqual(Namespace.resolve([file_ns, file_ns]),
[file_ns, file_ns])
self.assertEqual(Namespace.resolve(6), [file_ns])
self.assertEqual(Namespace.resolve('File'), [file_ns])
self.assertEqual(Namespace.resolve('6'), [file_ns])
self.assertEqual(Namespace.resolve(file_ns), [file_ns])
self.assertEqual(Namespace.resolve(0), [main_ns])
self.assertEqual(Namespace.resolve('0'), [main_ns])
self.assertEqual(Namespace.resolve(-1), [special_ns])
self.assertEqual(Namespace.resolve('-1'), [special_ns])
self.assertEqual(Namespace.resolve('File:'), [file_ns])
self.assertEqual(Namespace.resolve(':File'), [file_ns])
self.assertEqual(Namespace.resolve(':File:'), [file_ns])
self.assertEqual(Namespace.resolve('Image:'), [file_ns])
self.assertEqual(Namespace.resolve(':Image'), [file_ns])
self.assertEqual(Namespace.resolve(':Image:'), [file_ns])
self.assertRaises(TypeError, Namespace.resolve, [True])
self.assertRaises(TypeError, Namespace.resolve, [False])
self.assertRaises(TypeError, Namespace.resolve, [None])
self.assertRaises(TypeError, Namespace.resolve, True)
self.assertRaises(TypeError, Namespace.resolve, False)
self.assertRaises(TypeError, Namespace.resolve, None)
self.assertRaises(KeyError, Namespace.resolve, -10)
self.assertRaises(KeyError, Namespace.resolve, '-10')
self.assertRaises(KeyError, Namespace.resolve, 'foo')
self.assertRaises(KeyError, Namespace.resolve, ['foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10',
Namespace.resolve, [-10, 0])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: foo',
Namespace.resolve, [0, 'foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10,-11',
Namespace.resolve, [-10, 0, -11])
def test_lookup_name(self):
"""Test Namespace.lookup_name."""
file_nses = Namespace.builtin_namespaces(use_image_name=False)
image_nses = Namespace.builtin_namespaces(use_image_name=True)
for name, ns_id in builtin_ns.items():
file_ns = Namespace.lookup_name(name, file_nses)
self.assertIsInstance(file_ns, Namespace)
image_ns = Namespace.lookup_name(name, image_nses)
self.assertIsInstance(image_ns, Namespace)
with self.disable_assert_capture():
self.assertEqual(file_ns.id, ns_id)
self.assertEqual(image_ns.id, ns_id)
class TestNamespaceCollections(TestCase):
"""Test how Namespace interact when in collections."""
net = False
def test_set(self):
"""Test converting sequence of Namespace to a set."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
self.assertTrue(all(isinstance(x, int) for x in namespaces))
self.assertTrue(all(isinstance(x, int) for x in namespaces.keys()))
self.assertTrue(all(isinstance(x, Namespace)
for x in namespaces.values()))
namespaces_set = set(namespaces)
self.assertEqual(len(namespaces), len(namespaces_set))
self.assertTrue(all(isinstance(x, int) for x in namespaces_set))
def test_set_minus(self):
"""Test performing set minus operation on set of Namespace objects."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
excluded_namespaces = set([-1, -2])
positive_namespaces = set(namespaces) - excluded_namespaces
self.assertEqual(len(namespaces),
len(positive_namespaces) + len(excluded_namespaces))
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
mit
| 8,047,064,952,597,964,000
| 35.922118
| 111
| 0.599477
| false
| 3.795069
| true
| false
| false
|
MoroGasper/client
|
client/plugins/hoster/http.py
|
1
|
26525
|
# -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import gevent
from gevent.lock import Semaphore
from contextlib import closing
import base64
from ... import core, hoster, account as account_module, event, logger
from ...scheme import Column, transaction
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
log = logger.get('http')
@event.register('account.domain:changed')
def _(e, account, old):
if re.match(r'^w:', account.domain):
account._domain = hoster.wildcard(account.domain)
elif re.match(r'^r:', account.domain):
account._domain = hoster.regexp(account.domain)
else:
account._domain = re.compile(re.quote(account.domain))
class Account(account_module.Profile, account_module.HttpAccount):
scheme = Column('api', read_only=False)
# options
auth_method = Column('api', read_only=False)
cookies = Column('api', read_only=False)
headers = Column('api', read_only=False)
def __init__(self, **kwargs):
account_module.Profile.__init__(self, **kwargs)
account_module.HttpAccount.__init__(self, **kwargs)
if not self.cookies:
self.cookies = {}
if not self.headers:
self.headers = {}
def get_login_data(self):
data = account_module.Profile.get_login_data(self)
data.update(dict(auth_method=self.auth_method, cookies=self.cookies, headers=self.headers))
return data
def match(self, file):
if self.scheme is not None and self.scheme != file.split_url.scheme:
return False
if not account_module.Profile.match(self, file):
return False
return True
def _http_request(self, func, *args, **kwargs):
self._http_request_prepare(kwargs)
if self.cookies:
if 'cookies' not in kwargs:
kwargs['cookies'] = dict()
kwargs['cookies'].update(self.cookies)
if self.headers:
if 'headers' not in kwargs:
kwargs['headers'] = dict()
kwargs['headers'].update(self.headers)
if self.auth_method and (self.username or self.password):
if self.auth_method == 'basic':
kwargs['auth'] = HTTPBasicAuth(self.username, self.password)
elif self.auth_method == 'digest':
kwargs['auth'] = HTTPDigestAuth(self.username, self.password)
else:
self.fatal('unknown auth method: {}'.format(self.auth_method))
return func(*args, **kwargs)
def on_initialize(self):
pass
@hoster.host
class this:
model = hoster.HttpHoster
account_model = Account
name = "http"
priority = 150
patterns = [
hoster.Matcher('https?')
]
config = [
hoster.cfg('domains', dict(), dict),
hoster.cfg('send_crawl_domains', False, bool, description='Report domain names that have no plugin')
]
_crawl_mime_types = 'text/.*'
_download_mime_types = '.*/.*'
input_lock = Semaphore()
def load_icon(hostname):
return base64.b64decode(_http_default_icon)
def get_hostname(file):
return file.split_url.host
def on_check(file):
# check if we have a multi hoster account for this file
acc = hoster.get_multihoster_account('check', multi_match, file)
if acc:
oldacc = file.account
try:
file.log.info('trying multihoster {}, on_check of {}'.format(acc.name, file.url))
acc.hoster.get_download_context(file, acc)
return acc.hoster.on_check(file)
except gevent.GreenletExit:
raise
except BaseException as e:
log.exception(e)
file.account = oldacc
# default check code
with closing(file.account.get(file.url, referer=file.referer, stream=True)) as resp:
if resp.status_code in (301, 302, 303, 307):
return [hoster.urljoin(file.url, resp.headers['Location'])]
hoster.http_response_errors(file, resp)
content_type = None
if 'Content-Type' in resp.headers:
content_type = re.sub('; .*$', '', resp.headers['Content-Type'])
content_length = None
if 'Content-Length' in resp.headers:
content_length = int(resp.headers['Content-Length'])
content_disposition = None
if resp.headers.get('Content-Disposition', None) not in (None, 'attachment'):
content_disposition = resp.headers['Content-Disposition']
if content_disposition or (content_length and content_length > hoster.MB(2)): # or 'accept-ranges' in resp.headers:
return _on_check_download(file, resp, content_type, content_length, content_disposition)
if content_type:
if re.match(_crawl_mime_types, content_type):
return _on_check_crawl(file, resp, content_type, content_length, content_disposition)
elif re.match(_download_mime_types, content_type):
return _on_check_download(file, resp, content_type, content_length, content_disposition)
file.delete_after_greenlet()
def _on_check_download(file, resp, content_type, content_length, content_disposition):
if content_disposition:
name = hoster.contentdisposition.parse(content_disposition)
else:
path = hoster.urlsplit(file.url).path
name = os.path.basename(path)
file.set_infos(name, size=content_length)
def _on_check_crawl(file, resp, content_type, content_length, content_disposition):
# TODO: ask if file sould be parsed or downloaded
if False:
return _on_check_download(file, resp, content_type, content_length, content_disposition)
# send domain to backend
if this.config.send_crawl_domains:
domain = file.split_url.host
log.send('info', 'unknown domain: {}'.format(domain))
# prase data
data = resp.text
data = data.replace('\\/', '/') # lazy method to get also json encoded links
links = hoster.collect_links(data)
def _collect(tag, attr):
for i in soup.select(tag):
url = i.get(attr)
if url:
url = hoster.urljoin(file.url, url)
if not url in links:
links.add(url)
try:
soup = BeautifulSoup(data)
_collect('a', 'href')
_collect('img', 'src')
title = soup.select('title')
if title:
title = title[0].text
except UnicodeEncodeError as e:
file.log.warning('error: {}'.format(e))
title = file.url
# filter links
hoster_links = []
anonymous_links = []
if not links:
return file.no_download_link()
for url in links:
try:
host = hoster.find(url, {'ftp', 'http', 'torrent'})
except ValueError:
continue
link = {'url': url, 'referer': file.url}
if host:
link['host'], link['pmatch'] = host
hoster_links.append(link)
#elif re.search(r'\.(jpe?g|gif|png|avi|flv|mkv|rar|zip|vob|srt|sub|mp3|mp4|ogg|opus)$', url):
elif re.search(r'\.(avi|flv|mkv|rar|zip|vob|srt|sub|mp3|mp4|ogg|opus)$', url):
anonymous_links.append(link)
if hoster_links:
core.add_links(hoster_links)
elif anonymous_links:
hostname = file.split_url.host
with input_lock:
if hostname in this.config.domains:
add = this.config.domains[hostname]
else:
remember, add = file.input_remember_button(['Found #{num} links on #{domain}. Do you want to add them?', dict(num=len(anonymous_links), domain=hostname)])
if add is None:
add = False
elif remember:
with transaction:
this.config.domains[hostname] = add
if add:
core.add_links(anonymous_links, package_name=title)
file.delete_after_greenlet()
def get_download_context(file):
# check if we have a multi hoster account for this file
acc = hoster.get_multihoster_account('download', multi_match, file)
if acc:
# context already set
return
else:
# default http download
file.set_download_context(
account=this.get_account('download', file),
download_func=on_download,
download_next_func=this.on_download_next)
def on_download(chunk):
return chunk.file.url
def multi_match(acc, hostname):
for host in acc.compatible_hosts:
if host.match(hostname):
return True
return False
_http_default_icon = """
iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ
bWFnZVJlYWR5ccllPAAAA2ZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp
bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6
eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0
NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo
dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw
dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu
MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz
b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1N
Ok9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDpDOUFGQzg0RTk4NDhFMjExOTk5OUYzRjU5RTY2
REU0MSIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxMkJGMThDMDRBOTYxMUUyQUREMzk3MTM0
MjU4QjYzNyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxMkJGMThCRjRBOTYxMUUyQUREMzk3
MTM0MjU4QjYzNyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dz
KSI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOkQ4RjUxNDg4
OTI0QUUyMTFCOTM4QUIwMzc5MzZFQTExIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOkM5QUZD
ODRFOTg0OEUyMTE5OTk5RjNGNTlFNjZERTQxIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpS
REY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+zznWBAAALhFJREFUeNrsfQd8HFed
/5uZrdquVe+yJFu2JduJu9NNQhqpJoEkcJcLhDsfJJcQLglcAe4DgeN/pAB/4IADjlASSIFUSEIq
xMHEJU5c4m5Zve6utH135t535o00Gu2uVtKuLCl5yfPujnanvO/v/dr7/X6Pu/r75JS2Z83783Fa
D+2LaV9CezPti2gvpd3LupV2A+0O9v1h2hO0h2kfYL2H9qO0H6D9XdoP0j5EFlgzLIBnKKf9TNo3
0X46ALca+BKv1US8ViMpLDASl9lIrEaBmA20CzwROJ5wHCFGgZNPEE2IDiJRCpAkTySRrIgmkyQU
S5LhaIIMhuNkMBKTX8MJsZcRxE7aX6f9T7R3vU8As9vMtJ9D+0W0X+wwGZprXBZS5bCQYpuZuCjw
VgqyqPmBpL5K2mOSfBzHjPT76mBYKKHI37Op3xo7CSWOEn80XjIQjJ3dNRK5rX04QkZiSRDEs7T/
nvZXQE/zaTC5eSICjLSfT/u1tF/Z5Clw13sKSJ27gLgspjGAGcpSFuBPPKb/njTh2Ljz0g8cPUIJ
gnQEwqTNHybHfGEf/dNvaX+Y9j/SHn+fAGZOAFW0v7q40Fa/tMhG6gvtxMTzyuzVAz2L4I/jDuwL
cVEkJykhHBkcIUd94U569Ge0/4j2I+8TwPQIwEX7a5vrvK1rKz0KW08FyCkGf+L3JBKJJ8nRoSD5
88khHH6G9m/S/tJcIwB+DnOnAtDHmdWeeQW+rFvQDyaqcDYXO8nNq2u5CxuKL61xWl6kf36T9ssw
8d4ngMxNoP1Xm6rcG8+o8c4r8Nn/oy1JP1Q6C8j5DaXkksbi1Q0e6xP08F9o/+D7BJBGLNH+7fWV
rsvPri2a1+CznzF9RSIlDis5u66YXLioeG2dy/oHZj0sfp8Axrd71le4tp5XX0ySCwR8/TXKnFZy
3qJisqnaA1P2bdq/xpxT73kC2LqhwnU32H5SWnjga7+H52v0Osj1rZWmVaWOu+mhPbSf914ggCVM
xuvbFrD+llIXEeCYWcDgj11XIgI1aVvL3GRzvbeR+Q7ugz9qIRIAHG2gdPjckzoTcDPtv7y+pVLw
FJjeM+Crn9ErqKJ4w4oqbmWp4zb6cTvtyxYSAVQyV+l21SmiAb+V9seubi4zVbqs7znwx1+DIy2l
brKxytPKLIWP5N3cWvqhvIO/kTlA7qX9cR348PK9dHFjcclSajO/t8Ef+73baiKVdrNJ4MiHB8Jx
Oz34ou4n84YDfJTd/E9Z14IPL98zm+u8Va1U7mMQJFEincNh4o/ESUKU3pPgq+crLDCT0yo8pKXY
/jl65FHClqfyIZfzZcv/K+1fpv152r+gAx+LO4/By7em0iM/8P6+AHnqUG+Usb4ycAergS9wmAzE
aTYQm0kgeF9AX+1G+pl2K31vNQjy1RYS+OoLx3GktdyN1yve7h0GFwW/7p3rBABw/8fj8n58yD9w
kr7/mKr0adoDGyrdm2Hu4Um3tQ+SV9sG2+jxD9P+V/VL4YToCSdiFb2hGHSIciYyQBzV0JuYblG6
ocojrAMhLSDwRxVEkWqDJdQy4ri1u3sCWG7GqmjHXCUAODN+43K6L6XgJxn4fbrZf9PqcufWc+qL
5Cd8+Xgf+Uun/0UmLvp05xtifW+Ga3rfaB/qXF3hNvEct6DA115jcbEDQSzNu7oDr9EDH6D92Fwj
AMj0J5xOx9nstr9C+6s68FfR/h3IfJFS9ts9foD/XXrs1hRcItt21apS57wCH3c6HIuTrkCYnAxE
5ONNXpu8ZpDpGk1eEAFXv7PL/wJRgmLa5woByAodBX8TT1VX35D/TUYAeqXvkUuaSqzFBWb5QCwp
e/oDMwAf7WNLiuxzHnx87g9GleARCnogmoAL+Gnan4I11jUSfWh1ebK8qciR9hoYrYZCO+510a5u
mQjOSsE1Z50AnAr49k2mAoH0d/ugyN3IHD7a9mOq9DW0UHNPdX60ljjJi8cHPknf/gdRAjKn2mpp
P7vMblFcx3MIfMzySCJJuocjpJ2CftQXjjBzGIAjPuC47lnW7ujyPxJLiBuWl7nkc6S6hkwEXjt9
XnHJnp7hJ5kTLXSqCAA860nMfCM1UuJBeUZ/XZXZmtn/t6eXOq4+o1pZ2lUfBmvmVB8o2tEVgK7w
w+nM/k1VHm4uge8LR0knneEIEaPKawcD+2lmDWUCCt/d/Hbf8C9EIl3VSolAXQ/RX0OkNw1xkBSl
9Xv7Rn5FD109XS46EwKQ1+wh8402OihxnvgDgUOMALTgQ1u/T5b7OkDweUWpk1AC+PQ0CeCGpkJ7
TsDHjEU08NHBIDnuC5FSm5lUu6yyPZ4OfAUMkc7yqDzL9/eP4JF2YFKwmb57ig4ccMFrKKjfpj/a
ugzOMVW3SSFamikHpZ8v39cf/Ab9eMdsE8CPnE7n5UabKN9jJCTP/s+C8+m+94Pzags9pZRNiyk8
fIVWM6l3W1ce84XPpUdensL111TYzUudFuNohO90wMe99wyHybv9I+TAQLCfHnoQAFLlbP2bXX7Y
3RuWeu1ClctCyhwWYuB5EoolqCyPUAUuRE74I8gpeI7Ncsz2nhlyVczkf9zXNxKkN/y5pSUaIpCk
ceOHj4spkUQS4mepiIH/5NezRQB3OZ2uGwG+RO9XjBhJMBR4gVG9dvZft6LEcem6ysJx/Env4VtJ
uQAlgNumSAAfW0qVP2maAZxRKp8PDY6Q15WYPazCIXgTEb1qWPdLjJsV7R8YuYh2EMNFbrPB5Ysm
DjPA0WGbx/LgT/nnfQpHubOFigN4RlM7ujhS4y6AjvFj+uEd2vflmwAQ0/ZVgxXgJwjPUcVvMIBb
+bwOfPgEvg5wk5P49hfJbLwHA1xDe1uWzqbrFhXapgX+u/3D5OUTA53MPf1jkjlqF1zh56wbKPjV
ubLBs5lolAhgFmxdBhEqpXaFIxdidbnLRpVI6APrU3DhnK0FIM3qQbfXIRBeUfLFqLy0D5n3pu67
t51R5a4pc1gnXdhBP6/WixPdksU9wP7ddXqZswSZPlNl+zA9KfgwPS+k/V/I1EK2E7MIvtpuoTL+
sf09gQniS/uEVa4CQjniClUHywcBwHj/lcvtcAmmJJOfPAmH5NyHb6ZSEgUWv58JfPWBlkPWEfLJ
DIse5WwWvnRpU8nyTTXeUTk4FZmPLKBzar1O5qT6AJn7DYN9w97+kb8cGRgZ96DaRTC8a1L8IXCq
nZcPAvgGVfpWGQrEsaFNGkk4HNqtevx0bXfPSDQr8FVg6Kx207fXpxBTt9N+4Mxqzw23rq/n6jy2
aYGvcpvFdKAubCjG4sGzjOjmegNL//BbPYGOzkB4Avhjz8aRDdQshuJNsowxzJYAELx4i9mOC4qj
sz+uaP7fVb+kS/LY2RWMjiIw2ZIuyKqlVE7WvY2Mxc2fjfOsKXfde+PKaudp5e5Rh89M4/bBMrcs
LTdSJRXm5zdI6jC1udTg9r3mLx1DsUg8kXYMim0WiAKEl30pVwQAZ8/3PF4Hl5Tio7YTT8crMBzw
M7Yst4ujS7W/6wxEE7242WzX8z0WM2ktcSAU6mPMHHuZsvvWjdVeYjcbRs3IXDl5EHhxeqWHrCp1
/jNRglXsc5wIttH+heO+4OgM0T8vPtcXylL0dqazzZgAvkhZfx3P5D7Ax8WTMfkWnp3Ehbu7L6Rw
AXw7ThWwnZ1DJJ4QNQ8wXo61FDvI8mL7zy5tLPnYresXcfUe+4zs/Mk8fDy9k9MpZ9lQ4YJ18zsy
t7Ol0O490B98/thgcNzBseeV5DiCVWVOWEr/Nak3b5KQsOUwlRxekyBh/jHw0WJBjsRisXuY7ZlK
BKC1lNrMZ5Y7LGRf3zD5xdsd/hP+8H3bO32tJp6zVDgt42S5zG5MBlJPZTxmp5Rn9y6Q3tcXIE8e
7Im1D0fvpx8/M801idlur/QEo5+sdlnN0J204I+ahhYjCcbii4djyTcyWTqTUfs9bo/DKEqJceDD
9h8ZGY4zz1emtquX6gEY5GcP9yJ1GsEMiA5a9mrb4P/et+2odJDa5Fxa8ZAf8OFYOzY4Qn6084S0
rX0ItnMT7Qi98pH50eArubvdF9LEEUjjxgA+g2qXvLz8NZIhFzETAay1mK2XG6zSOPDl8KuErC8h
csc/yY3u7gnGKPh9Qwx81VeAqho30r6J/m3Xn070k6FwTL5GPsHH+buHw+Tlo33kpeMD8DquY1ZH
G5l/7XsHBoJv4Hn04KtjgHWMGqflNPr2yukQwOetdiNl/OI48EEKybh8lTeyuMlDfaFYGwN/R4q/
4xxrdnQHtj64p73/rW5/XsDH/QciMbK9fZA8fah33+Gh0BXMVn6TzN+GR7v1r50+iefSjQG1dtwy
F/jXqRIAnC6XCRYyAXzZ/E/Il9ibxU1CcUdY+M5JvoMqBRteOTEQEUUx5+Dv7vaR3+zr6trTO7yV
flxJ+xNkYTRw4Sd6R6Kpxwq6gNlIymwm1E46dyoE8HGXy2WQpMQE8PESV+zQg/of6czAUXMwiweB
TPnOubWFFo7ncsr2T/pD5M1OP0TOCkZoCbJwGpw9GwByKvBV/0uNwgXunAoB3Gi0cinBl0VAUh7D
7hw+yH1ryp0XIWYgl+Bj+fQPR/rw8W+Isqiz0NrfN3ttpbIlkAJ89b3XZladeY3ZEECL2WRZSoRE
SvDxfzIh+wRSxqKl4QKZ2tbVZc5bNlQXKgEjOVT4dnbJSj2SLV9YgOBj9t9ZSTV9MQP4ssimbxo8
NgzJJ7IhgAssVjORZOVvIvg5bohne6C52CH7sXNp6r07MExlfwDrFJ8nC7N9YonXVq6uiKYDH68D
4ThRbCzyd0RZSs9IABsFE0kLPkeUjBX9iabBBcCOHr10cYlRdvrkEPzhaJy8dmIQDp0byMRgjQYy
S5m3eWzg6Xcho1jKAvwuaorDzLabBFRLvWAyAljJG0ha8OV1AF7+mWMGD4BVv2fPqS1017ttOffw
Hegbwdu7yMToGMQSIEMZ4VNXz2MCuKmp0FaF6qfZgO+n4OOzQVGwr81EAPhcx/FiWvDRDEaZQqrS
3d0kpd9MmPkbKt2NrSzKJZfg4x+zQX4sj+66WPZ9fnN9UeHFTSVY9HmEKLkL/DwDH+N3F1YzpSzB
VxViUXnUKxkHSUkAToEXTBInpgUf/xqNMvdfMg3w0e5fU+7aLJd+y1Po9mKvvBqGKNlCZmLe21ps
/+EVS8qMGLiiAjO5rrWSay1xICLoScaR5mo7i3EztX+fPl8tZv9UwEdL0NcCI+9inFCZzHoC4AUh
I/h4bzDJdLNyGg9zK9X4t26sKSRJMT/g41/kG6ypcDmp/Y8ZXruyxHHJynK3fO/qYCBa6bQKN9ji
Jbu6AxALV2Xp3JrttnxZke3rSmFaQgEUSLnTShLS1MBXx8gki2/xYqJEMk8ggBA8cenBV44bzfKb
86Y4+3HRe5eVOPMKvvq3FVS8WARhK1V85KpcckClbsBEORSNfs8gNG1rH9rGOMZccxQdHookyKqK
MYk2XfDxj6hgCJ/A7alEgE8UkzIHSA0+Jx/ijCIVAyYsFVdm+RAoefKryxeXCo5UXqs8ZOwoVbjs
xEj1AaRnYS0AoeCpAigQlewyG6DUVs9BDnBoOJbIytSbDHyJ+QSMPNesYqfnAFRMSD2cyJdKgpgS
fPXkdkcBGRqMIaX7m5M8QDHtj32g3uuq1ioueQRfXQPY0+0nO7v9mNkIqcX6Rgm6zSjwViMvF5mw
UAKx0u62GIg/moCJeGyOEUD7SCwZo2NukgGYAfiyU4jCitozcVGCbvGQAWxbZ7fvEpP8RZwgpgRf
fW+xU91qkNzMPG1iBo31kU1V7sbmIuesgS/LsngC4GMZ+lIyfqcPIRhPltBe3E/ilYwoShmBROYg
B4Db9Ug0mVxqFoQZga+Ot6CAuUEmgBQX3JGISReZTOnBl99RMeB0OpcEAgEoT49qfo+Vh/9mpkbN
6nLX+tMrPOOSGvINvsw3B2RfwDfIxG1eMKBdrO+ZJ6bfkUiMEoBVmDH4ZGy2rhvVAXTKW5uYlDKC
r9TkoSaFQ5bn/07GR9QiA/bQunLnNde1VKzfUFU4q+DjdSQWJ1SzR47etxaI2/dwSBNcOxPwJaYf
0dYCJFM5QbqSyWRG8FVHsWCViMfjwTLrp3Qu4K9u7wq8csIXnhWFTzswuK9D/fLs/wqZQd78HGt9
EWY6zRR8xfqRoAhC6a0DASzWcYHDWO+fDHyFC4jE6hDUwa7QEAEo6COvtw91gBVzswQ+2mA4Svb0
DkOR+8EMBrxoDoGPEPl/r6KmbC7Al01BxRLAh2UggHN1CzuHotFImJOlQ3rw1U+8SSSFxW7Yzz8j
49cLwYKvfu5on//YUHBWwMcJDyuy/z/IzDJ2YXf9TZbfXUfyU+SZY8/xszNqCqn+J+QEfPU9i7uR
OQAiYpfqHv4tMS6MwZkGfJkL0P/sbgMpLPQgz+7LOlEAD9vlzx7uHTkyODzu6fJRlqU/GCH7+4NY
AHpwhoOPQIJlzGEyWbu+2mFGlbO3Se7SzLDW/3BTYcG/nd9QzNnMxpyCrxllmQBQZ2et7gZeSUQl
zXRODb56CIGjNo9sUCD48AYdESBv8ILnj/b37e0NjIv8zXVNnoMDQVUpTVkuBfek7ZM0pLz9nDmx
MrWN2BrmgobiljKb6YfMvJpJg6/+5cVe2zUoGIWV11yDrzHHZQKAvDtTdxMvRNjFRkPC04A/Wr2C
T5LyWi8+IN/+QzoiQPTvxtfaBg/s7PTJ7CfX1bh8kRg5PBTaBadTtiM9CREgmvlpdr7iNN+Bybuq
wCzICS11Suzd7TMkgBqPxbgO3km4fPMBPhlzi3t59nCbdYrgn4LBkRGeqgbZgK++NVDLv2pRKTwI
v9ESARtoZKds3N7pe+yNkwPEH4nltBRbXFlaDJA0NXnSgT0JEdxfUmBC4MrjzKmlb2vKbGY5fEbe
CUSphXA146qpGlyw/0D7L4lSPDtNk/Ow8ga+5nfFMgcotZlqdKwOHrGnE5HswcdRo8VInEUmsqi5
wsJmzsd1Aw3ZumV3z/DWX+/tDL/bN6w9zYzq8NlNsghqmAbImf6+qzcU+8OKEvsZjLPpvZw31rqt
o4MMubOq1IkbSVWwabvbbNjfVGj73vpK93VEyYI+M92F8wm+6hIG9iAAa6NHXj/XZwn+MjQSYwtD
WYBPFRWLReEYVpdAGpdXwrL4X/gEVIeTZqARnr2SioQXXz8xQIbCUR3QU6/AydyklYwt57Ldh9W4
ZUV26DZ3s2PYp3hHc5Ht7yqc1nH3Uq2IAcTeaWMMmlxmw9ozaotII5XrhTYLWVPp5pieMcEbC0dN
PsFX/iZTgBXACE1KAMW1OjHwzPBwoI0TDdmBbx0vLrBW0LKmjisrL0Uu4BOqba0RCSgpd/7e/pEb
H9vf3bWb6gaJpEimW34Vn4usRi4dF5iBPvDcyUBkV5UC9D1MHLy2ocrTsqTIKYM1XoRxIBZEHN2s
OXxlhcM8bsEEefyLC22tqXQGFITKL/ij55B3TbYjKLPEZlrF3INac/CH4eHklMEfDSgVRFJaV0Aq
qkuxILNb1TU0Ay4xLrF4R7f/qz/bczIMS0FFdKqFl50WeTI1TgLs9VPkEjj1/dgFdMuyCm5TjefK
K5eV8yV2S5rFLaRjycRyt0Z5vKrYbhl3QjxLtfI9WC3V4zmAlFfwNSLAPuoKXlEix3jerOMC3x/o
HwwL4FKTgU904LPviVKSFFVaoBeAPb/A2J5dxw1GmAlZv73Dd+9Pd58M7u3105sUNYM7edVtp6IH
NGUAk2dey29NkQv8kloYbcF4Qp65YtqAFmXYTVQcLS+2wzn2NbbKuN6ujYNgXzbQ77WUODAW94+7
Rym/4GvT8TEgw9GESJqLZAK4SSe7kE3z41BAnBx8biL4Yx8kYvcYyGmbFnEVVSXIz8OW69fo7XPm
PYQCteivnf57fr6nffDNjkHiC0UV/0EG8OUd3zMogqxtLvZ66qsqSj/BXKzZEgG44Yi8i4mUGXz1
b0rpO3k871le7OBVMaVfz0fyJuW+sBwuIUqs3iMmJag1r+DznPxmhFe1ATgcqHZqV71ZGi7wtf7e
gQgnGaYFPqd1GEl0BtUUkCWt1ZXFxV5UtfwT7WekIATsioGAzeq9fSM3P3GwZ9vTB7sJqmQlsFCV
pt6+w5Q+WJW1T5ZVOklppVVVRJuzFANY2l5cYDBMCr52xe1DS8q408rdN9ar9QxTBHP0heKEVxTY
X9D+ksNsbBRZ/B8nqZGAuZ75o06+JAggnGBJhMsVMXAr0YQNE6WI8b0hXwKe/2mDP+ZUkojVwZPq
xU7SsKTyDJfLCSL4HdEka2gIAat5qOC5iZpFy7e1D9330DudvX9u6yft/hAVEclx0xFbyGDyZVjg
ubKw2Egcbp40N1fYmL8iG31gqddqNBAuO/DVW0rKvgHLpDF8I7EksZlNbpvZyCXZjJS1c0oBoVii
l88x+BpUQjh3fzSuDKTNaCTryl1QSD6t4wJfp7rAyWRcmCH4Y3+Dq8NZZCCNrUWkvrH8cuZLf1Tr
StW5beHjRy3iqmO+8BWvnBh48OF3OodeO95PTlAFDaXZWaJEVRpQ/6GhocIs5zxg6bLaRurry1pI
djEDqzxyTeLswR/v25g8aUPSumjpPwYKPpXNeOYLg7FEzKirATAT8PGZiYABXl5rRrAkk1HLFC7w
BeaTVhtWcv4+5ItRqjTMGHztchdqDReWmcjasxr4puaqqylH2MbWD1DEQUjBFeLMrMRqXSk10S54
vX3ou4/v72p7hoqJYsUUXKST57jpreVVtlFFFbkPDZQLVVdO1AdS6AGt2LgqH+Cn0vbBJUXFs3k/
s56+Kqd65wh8vOcVRPpkDhCOjxZgI9BWN1S6vWTiTl/PDg0N/XSgIyTnBuYC/DH/EicTgqvYQJas
KCbLV9ScVVxciMLNR2n/ItFkIem4QpxZFuBYtb5IYklfOI73+g0Xr64oL6owmMYSXhSdJEnKq7PS
B1aMavF5Bh//GOhADceSao1itD1JScoZ+FqHIwjghBJ2PMaCVpbJk/82dVA0RHBHZ3tPZ3AwnlPw
tb+TqBS0URldv9RF1mxYVFNXX/4louyu8SzTqr0ZVvcOMjPz33QAfq6i0qG7rnJPTpdAli2bVB9Y
aaMcYDbA1xwBUaoVyy5X8vpyBL40isdxEMBxZNNqzQxkzZxb6zWxwdQadIO0f/TYoa5EMpoC1BmC
Px4ciQhmkZTVWMiGcxqF1pV1F1VWlPwPUQpTIKsFYWglWSz3XlZaUrjW6TFMAF99X1VjIw2L0uoD
sIxcyaQ0K+Bj9geiiRgZq8AKjC5VEluUe0ZiFujBqDGOpwL+WPAcIwB/dGLp0eWlTuxaeZ4qHzVc
ANuW3e3rjdKbMKQEbxz4ZDrgj1+A4ihXsHs4UrvERjad22BYeVrtBVVVJYg8RvkZBGOgxNvpZGKi
ixxVU1nlIEq+40TwVYW0ieoDNVVj+oCGgOCk+sIJVOfk8j3zR82+h4gStYy21iLwJWy1U76HYFzs
iyakO8JJMW4cXVvPHnyNB1MmgH39uDHdA+F6iOWn7QG2yKIlgm92dfZ8r/fEiFwzMCP43AzB1x+n
QNo9PKlbbCNnntcknLa6/ry6urL/53I6djD/waNMJwBBXFdZUbzKXWRKC/6oj4Ket7K6YJw+oCGC
e98dCD5xhMU35gt8iW1kwcZcbZeC/au/Ytvj9bPl5LOoDX8MiR4Clz34oOOEovbt46w/2Y8z+j+1
utYh8Pw4CsdlUbqNatnY8Aj19SWdZv1oeWXJ5aW1yPFP5h/8jOfhiEgZ2fBQgviGIsRHZ6zNZqbm
np043YYJ59H/Vr1WR1uYvLO3A9VPsfFCSEP0SM7btbrCXVvK1gFyDT7GOxJPBpkCjgpgCHDZbjfy
rVFxTEREEiK48Nnsp/Dc/shh5LdgL4Q481OnA182MSnMCc4Ay87Fsz+/44vEJ3iccKqV5S5yWpnz
ghS+AciNj3R19L7Q20bZI284heArgycYJOIpEUg9FRWnry8lS1rdUwIfx6spMTc1lI/qAxqCR4LJ
R3Z0+mKReDLn4I+5swWbzSg8YDHwryPAhb62xiWNwqd8eSVT0g0sxuLDw3HxM1GRRCASMoEvWxnK
RN8jB3KpwQp9rLS7PnpXFgVe2TeAwsNrdESAkJErOtt7/9B7nLJHTjhF4Gd/nkzgsycmi5udpLY6
pT6AyiJ3dwRCY2FtOQJffQ+LHIDLSZwCb0yy9+rv8NkqcE67kbuP+QjOZaf6/7AWJMJnBF9ZB5Af
9k2iUZre6B4tNjgxdNthMZLN9UVmJl+LdUQAd+3lnR29j/UcC476buYj+KP5DiRJLYO0+sD9VB/4
7VHoAzkGPxs7HxMyRtl8lLJ7SgjLaX+JKY3w4Lp5LjP4yjK7/KRvaAngtfZARGaj6VbcGgvtZF2l
G6FjT6r2soYI8LTXdHb23NtNiSAaFOcc+CRL8NXjLo9AWlsqU/kHMBw37e8fOd4zEplV8PXr+SCC
pCQTwkeYq/xfuLFlx5Tgw4pQKv3K1twoAXQMxxIH/NF4WjMHBVxXlrqwrQuUo0dYTJyWCKAy3NHV
1fOJfbtPRvy9cfn0cwV8bgrgq27u2jo7WZxeH7h2Z5c/FmU5e7MJvvZ7WKKGK99ARLuBE1fGtFFV
E2Y+UsJ4LE4dYIt84+zmZzvU/WjSxO3j1KsrPVAKUe3jlymIAA3Bk2ccOdh5tLctJJuJ8xF8laab
l7lIXWp9AFr6nRizocjsgA8LACVeRMLFzQI/avrJawfQFZLKazrw5SAtQckH1gaEqO332O+WTJK0
ARm0WtlmZQtRNlocFQcaQkBx6FUn23p+cmy/jwT9Y0Un5yL4ZBz4E/WBmlpbOn3gW4cGQ49j947A
LIAPf0A4KSHrqY6+Ps7zghzHIXJCkPYkzLtM4HNsGQhYpyKAV475wr64tlp3mrh9mJoaTvCC1iWr
IQLYmTf19Q1cuW9PW3v38ZC8z9BcBH/CdXXXcxcKZGVren2gPxg9xsurdfkBHzMd4AcT0sNEiTiG
B/TqaFJaSid9EXNXP2oQ+LTgy44bbPErymbjK6kIAHbg7076wxnBV4+B1awqd5O1Fa6NjB2uSkEE
RA32aGvreeCNV48kBjrjY+biPABf/U1dvZ0saUypD2BArw1E4zGByw/4kNsUfOhdCE3Xpr1Blg+o
3CgpcWnBxwmNSlnZ35Gx7XEn+M5/fVTejCi7jB15k6cSFzmrphDWwTbVWZRCJAwzx8XKw4c6nnn3
rX7i71O2nZ0e+GTa4JNpgK/qA0uXu0h9TUp9ADb158AFBJLrmS+D/3gK8FPFLaYFX1lple9u3AbT
egJ47ogv1I0IoWzTtUQWAHnV0nLLihLHd1iwRkUabgBT5dLBId+5+/aefGX/7j6ZEMY4QrbgZ6PM
pVmNnAb4YxwjKVsGTB9YqiOCb/ujicc4ScxJDJ8MPseTUELChtwfJZOnu39GGJ240gRbHiXlKfvH
SurzmQgAVPTTI0PBrMDX2sAo/4bCi2uV7dfgS/8k0ZSg0RECZNC5g0P+c/a+c/K5P714WOrrTIw6
JmcKPpdqnXqG4Kvf9XgEctoKObTo1yniB24ajiWPCjma+aGkBG19SxbgIxfxo3G5DN5E8OU1BCXw
9KcsiGbs+aw/mVDcEeFUh29eXcupLsippGuhIfHzyECQ7O0fwWLGPxLd/kIpQq5wAMGoH6+uKrWV
VtiI3ckrJeunCX524mNq4I995sn+vQGy/1AnTN5P6Djdatpft5sEU1yanp0PmR9JSs+xsLhsKpf9
wMpLN4di8ZTgywRpNMvbCxPdFnKpCADtyQ82FH+oarQc+dTTtfChzRcir7YN4PBTLErnLf2FdMTg
YuFcN9oKrBvLytzEW2IldgclBk6aE+CPupXpsO7aNUSOnuhGbOKDOiL4tNMsfAd+/aQ0NfDlIpeE
CzGXezY1jpAJtd8gxg1xUZoAPt4XmIyUigRgcJn+x+kI4Nxqp+WlCxpKx+3aOZ10LfimoVi+fnJI
Yn4DLCq9PgkhoCHBA/mKWwqs1tUlVNksLLJQk8xAOF6aVfBTh7tRTjeUJH98+QjkJQps7NcRwW9s
RuHDCLkXp+DhwyJTQuL8JPsC1r+w8OL1YbWaqA58nM9iNlP5zyG45+VsCQBtxyWNxaeXOKzTBl/7
wLgRmJgoGtU1EsWq2n2MIKJZEEMdUbKXUbLlnPraUnt5TQGx2fhTBr76/tjRENn5Vnuq+AFws50F
Bn5RXMrevYsJk5D4bAkAJvifBTHOJURpAvhoSGeIcoadTDRNaJlq5X/pXbXC1wzBl/Vn+opU6nPq
i8mFDSXrGzwFDzGHBghhhfbCKYpOH6f9O4wIvMdO9NzS2TacPfgkP+DLbGqRnSxdXAH/wLd1xAsQ
rwklxKgxhYcuc9JGVg2i/btmLpkWfEw6lnX0pXQnyUQATx0ZCm/v8IdmDL729/iey2oiayo9ZMuy
8sJ1le7bvFYjdIO97EaXpSEC7crjRUXF9uzB5/IDvmJbi6SlxUUaassQsfxxHRFg5t0ha/VTCuDM
qm21CWRVDCFkKcBHw35CYZHbznSwlC2TCEA7v9ZpeX5zQ4laXXJG4KezKOSggniC9AxHyJtd/jeJ
pmhVCnFwdlGh+5XV60tlP/2pBB//Dg+LVBeIE58vTg4c6UinDzxsM3DXxkRVH8icsZMkwmQiAPrR
W0YpYYspMYQTwAfRmRXZ/0G97a9thkmo7IUTgcjT7/YPX9rodeQFfPU9ZFVAiU7+3iT39PXqWtfs
gy9xZGREJD4Z7CgZ9AVJ30AgwZxbO5g3EK+pqo1/KpiQTrcIXKMoiRlm/qTTH6uvSJL5iYUTbeF4
avCVTTNMAP/pTOBnQwBon6Ua/Afr3QVGrDzlA3xZNIgiOTgYhF/9Vxnu5aqKsqKNJWVmJfwgX+BL
PAmOJInfp4IdomD7E2xm79AADtGVcrt5nQjDjL6W2vbbKBGYY0kpPfijIfsyNssYR1nDXlsNHDEJ
9Nlj8URa8I30IaKcAKfAHZOBmw0BINvm3r29gbtay9x5AR8NW7wyT1U4DfuHNvOVqhpHbsEH2EHK
xn0x2qNkiILdO+BPpgE7rV2exV5JcIrdTkXAdzOBLynJgXhIv4knBQZWmBvFMlBCB/GC8TQyXw72
Qiyh2QSRjbDxd3NBAGhf3t0zvKXQamqswKYPOQYfAQ1/7fRLzMeert1YW1O6rNBrnJDkkTX4FOxQ
MEkCvgTx+aN0do+CfUADNjqCLYOZBiQLwGXJRpT8hLVqF2R/e/q4fQgILpmA3VAQo3cWS+PbTwW+
vCcQFaURiT9MWNXWXBEAZuXNLx4fePGGFVZurGzBzMEHdQ8pwRQvailWN/tRYOeL1dX27MGnBk4Y
YFPOjZnNwBbZNd7UgT2SA7AxlssZ0OvYawtl+QZ1mRhL6NnE7UsZlnQzgY+YgThvxB8+lU40TZcA
CPMiPfB2t/82RRTkBnyAdUxZfMqk/N3SuKi82ubiR0lvHPh0ZkfCogx2wA82HqYy2ycy8aVV0Hbl
CGzVBbtW0083C1yBgaUeIb07IYlyActYlhk7mYI5JgMflzWZqOInyfEKL2X7EFMhALS79/QOb7aZ
DCvqCm0zBh8tnkySY75wJwtUSDX7QW13V9XaZfCxEKOAHaedym1/mPT2y2Af0snsXSwOIRdgl2mA
xuxeY+I5r1HgWCEsROkqMfvJ0Tz+qeXqzQR8HLEYjWD9SPa4ayqATpUA4La9blv70HaH2WDzFphn
BD5am7KpxA9J+u3a7mpuqivsaBuhcpvO7H6fpAN7B3O4BHIEtpO5TVU2vo6y1moKOBFYjl6SAR4T
Z5ainSvw4fCJcQaw0etSudYztckcQekaFmke+mhLpTwFpgs+2Pej+xAIIPv6O1LMfpRY+73G1lbB
9k92g1kCDntypU5uL7EaeF7Ox5c1cHmHrbFny1F+fq7Ah5vZYraQkCjnBvx6qkAayPQaLrT+nR7/
Z1vL3QS+jamCL1fICspL3U+o4KdoXSSLHUqzBBtu72YN0OgrLQbehHw6pEtBjsAkQGh9TC1WmevK
HDkEX/b2mcwA/97pgD8TAkC7852+kUaO4y5fVuKaMviY/Ur84XjlTwUzXdHGKbDyWuZAWc/AXm0W
eAdSqXle0SZEBnZCA26uq3HlC3y4jC0U/LDEYwLdOV0QpysC1IaQqBdXlTrXLy526ERBevDRIvEE
efZQ70E2KyUys+bVzey1dpNQispekI8hKrRRik2UlMSJ8eOfn4yd2QA/SgQsq28mM9gcyzDDgceF
L9vdE3iV47nmpkI7EbMAH4fahuR7/v40wLfpnCvrzAZ+kddqJG6UqzcbCer5IKauX03XYilv0gIA
H7zLbDQBfDivLiMz3BltpgSA1kf7Bbu6/K9Qtr6oQUsEacDHp739I2Hm+s3UUJqrRTe7l5faTAJm
t5N2VO9CmpSkM0f7T1GuXr7Bt1Bbn2r8iOu7gI09OdUEgNZO+/k7u/x/pCy2HiuHYlrwCekOyMof
AkKGdCusTTp7exWd2dZCq4m4LAY58thiNIw/ryRNAH9gAYIPto+ZT8E/xsBvzwVwuSIAwpZBz9rd
HXiBgt/c5HWMB0bzXO3Dsu0Px88VqmMFry6zwe2lYMusnAJeYDYoRR3zWJNnnsl8sP3zM1hNp5QA
CLuxc/b0DD8litLaJSVOpQKp9nnpf/WeAkz332LKe+TZbaSz20AflB8tm5p61fG9Bz5MPYAfIQLS
7xAS15tLwGZqBWRS1H6xrMh2RVORczR0S9INXr5q7y4U8OHkMSumHrjlDZOtUE6n8SQ/DTe6ZV9/
8L/e6fYTXzj2PvjTcO/Cw0fBRxj9lnyAn08OoG1wUf5odYXLriaavA9+5lU9s9EIZQ+rlkivezif
4PAk/w0PsHFHp//tg/3D42B9H/zx4GP9wWo2A3yUzt+Yb/BniwDQkDixbn//yP1PHOiW+kYimjF6
H3yEcZmpeSsaLVJE4u9nltE7swHMbBGA7P0lyhZp57/RPnT48MAwESekUr/3wEcAp9ViRiTPYWbi
3U6ySwiddwSgNoR+raDc4D+fOdgTa/eFRkO63kvgw7yzGk0kabTE6Kz/T6JkR70422DMhhKYqcHz
961Kh/mialcBKWQBJgsZfIQZQMPnBQOydhDr8E9ECV07Je1UE4DakL3ylWqnZW0VJQQ3259nIYGv
AG+Qc/Uo8HDqYJ/E5071wM8VAlDXAuDp+mKZzbS6xl1AvDazHH41n8FHGDgUPIKiDyKHiCaEaz9F
Zr4EvuAIQNuQy46slksaCm1cTOLk0HFpnoAPSkZNHpRlCUs8/vgM7d8kU4jWfa8TgNqQBIntbP/W
bhLKYCeLBKVOpQmFrU81+AAdWTwoxYZqXKwgE/ZFRsDrkbk6wHOdANSGuAAsgSIY9YoCI+9GwAfK
4ikRuqcGfBReRj0fQY5H4NUijPDbIz4PSZnxuT6w84UAtA2RvNhjF9VCLjbyXLO8ZQq4A6UIEAT8
C2KOweckZa91EB7PKTGFqPzBCi+jmhc0elQ/i86nwZyPBKBv2M9oE+uI528WOK4Ykb4CN5ZBpKZm
KcU0lHK4bAt1eb1dzRDGe16uV6hsc4tENFEBGsSFCBw1j/B11jvm8+AtBAJI1bC/z2KiBJxiM2mU
vkN2j5d1C+Mkap2/EJu58MANsA4Zjo0r32Wgw1YfWmgD9X8CDACB5yXKKmEKkwAAAABJRU5ErkJg
gg==
"""
|
gpl-3.0
| 7,281,487,483,231,738,000
| 52.585859
| 170
| 0.837135
| false
| 1.744951
| false
| false
| false
|
our-city-app/oca-backend
|
src/solutions/common/restapi/reservation.py
|
1
|
12531
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from datetime import datetime
from types import NoneType
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS
from rogerthat.utils import get_epoch_from_datetime
from mcfw.consts import MISSING
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from solutions.common.dal.reservations import get_restaurant_settings, get_restaurant_reservation
from solutions.common.to.reservation import RestaurantShiftTO, RestaurantSettingsTO, RestaurantShiftDetailsTO, \
TimestampTO, RestaurantReservationTO, RestaurantReservationStatisticsTO, RestaurantBrokenReservationTO, TableTO, \
DeleteTableStatusTO, DeleteTableReservationTO
@rest("/common/restaurant/settings/load", "get", read_only_access=True)
@returns(RestaurantSettingsTO)
@arguments()
def load_shifts():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
settings = get_restaurant_settings(service_user, service_identity)
return RestaurantSettingsTO.fromRestaurantSettingsObject(settings)
@rest("/common/restaurant/settings/shifts/save", "post")
@returns(ReturnStatusTO)
@arguments(shifts=[RestaurantShiftTO])
def save_shifts(shifts):
from solutions.common.bizz.reservation import save_shifts as save_shifts_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
save_shifts_bizz(service_user, service_identity, shifts)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/reservations", "get", read_only_access=True)
@returns([RestaurantShiftDetailsTO])
@arguments(year=int, month=int, day=int, hour=int, minute=int)
def get_reservations(year, month, day, hour, minute):
from solutions.common.bizz.reservation import get_shift_by_datetime, get_next_shift
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
result = list()
shift, start_time = get_shift_by_datetime(service_user, service_identity, datetime(year, month, day, hour, minute))
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
result.append(details)
shift, start_time = get_next_shift(service_user, service_identity, shift, start_time)
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
result.append(details)
return result
@rest("/common/restaurant/reservations/broken", "get", read_only_access=True)
@returns([RestaurantBrokenReservationTO])
@arguments()
def get_broken_reservations():
from solutions.common.dal.reservations import get_broken_reservations as dal_get_broken_reservations
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
settings = get_restaurant_settings(service_user, service_identity)
result = []
for reservation in dal_get_broken_reservations(service_user, service_identity):
alternative_shifts = [shift.name for shift in settings.get_shifts().values() if reservation.date.isoweekday() in shift.days]
result.append(RestaurantBrokenReservationTO.fromReservation(reservation, alternative_shifts))
return result
@rest("/common/restaurant/reservations/move_shift", "post")
@returns(NoneType)
@arguments(reservation_key=unicode, shift_name=unicode)
def move_reservation_to_shift(reservation_key, shift_name):
from solutions.common.bizz.reservation import move_reservation
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
move_reservation(service_user, service_identity, reservation_key, shift_name)
@rest("/common/restaurant/reservations/notified", "post")
@returns(NoneType)
@arguments(reservation_key=unicode)
def reservation_cancelled_notified(reservation_key):
from solutions.common.bizz.reservation import cancel_reservation
service_user = users.get_current_user()
cancel_reservation(service_user, reservation_key, True)
@rest("/common/restaurant/reservations/send_cancel_via_app", "post")
@returns(NoneType)
@arguments(reservation_keys=[unicode])
def reservation_send_cancel_via_app(reservation_keys):
from solutions.common.bizz.reservation import cancel_reservations
service_user = users.get_current_user()
cancel_reservations(service_user, reservation_keys)
@rest("/common/restaurant/reservations", "post")
@returns(unicode)
@arguments(year=int, month=int, day=int, hour=int, minute=int, name=unicode, people=int, comment=unicode, phone=unicode, force=bool)
def submit_reservation(year, month, day, hour, minute, name, people, comment, phone, force):
from solutions.common.bizz.reservation import reserve_table
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
return reserve_table(service_user, service_identity, None, get_epoch_from_datetime(datetime(year, month, day, hour, minute)), people, name, phone, comment, force)
@rest("/common/restaurant/reservation-stats", "get", read_only_access=True)
@returns(RestaurantReservationStatisticsTO)
@arguments(year=int, month=int, day=int)
def get_statistics(year, month, day):
from solutions.common.bizz.reservation import get_statistics as get_statistics_bizz
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
date = datetime(year, month, day)
return get_statistics_bizz(service_user, service_identity, date)
@rest("/common/restaurant/reservation/arrived", "post")
@returns(RestaurantReservationTO)
@arguments(reservation_key=unicode)
def toggle_reservation_arrived(reservation_key):
from solutions.common.bizz.reservation import toggle_reservation_arrived as toggle_reservation_arrived_bizz
reservation = toggle_reservation_arrived_bizz(users.get_current_user(), reservation_key)
return RestaurantReservationTO.fromReservation(reservation)
@rest("/common/restaurant/reservation/cancelled", "post")
@returns(RestaurantReservationTO)
@arguments(reservation_key=unicode)
def toggle_reservation_cancelled(reservation_key):
from solutions.common.bizz.reservation import toggle_reservation_cancelled as toggle_reservation_cancelled_bizz
reservation = toggle_reservation_cancelled_bizz(users.get_current_user(), reservation_key)
return RestaurantReservationTO.fromReservation(reservation)
@rest("/common/restaurant/reservation/edit", "post")
@returns(unicode)
@arguments(reservation_key=unicode, people=int, comment=unicode, force=bool, new_date=TimestampTO)
def edit_reservation(reservation_key, people, comment, force=True, new_date=None):
from solutions.common.bizz.reservation import edit_reservation as edit_reservation_bizz
new_epoch = 0
if new_date:
new_epoch = get_epoch_from_datetime(datetime(new_date.year, new_date.month, new_date.day, new_date.hour, new_date.minute))
return edit_reservation_bizz(users.get_current_user(), reservation_key, people, comment, force, True if new_date else False, new_epoch)
@rest("/common/restaurant/reservation/edit_tables", "post")
@returns(ReturnStatusTO)
@arguments(reservation_key=unicode, tables=[(int, long)])
def edit_reservation_tables(reservation_key, tables):
from solutions.common.bizz.reservation import edit_reservation_tables as edit_reservation_tables_bizz
try:
edit_reservation_tables_bizz(users.get_current_user(), reservation_key, tables)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/reservation/reply", "post")
@returns(ReturnStatusTO)
@arguments(email=unicode, app_id=unicode, message=unicode, reservation_key=unicode)
def reply_reservation(email, app_id, message, reservation_key=None):
from solutions.common.bizz.reservation import reply_reservation as reply_reservation_bizz
try:
if reservation_key is MISSING:
reservation_key = None
reply_reservation_bizz(users.get_current_user(), email, app_id, message, reservation_key)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/add", "post")
@returns(ReturnStatusTO)
@arguments(table=TableTO)
def add_table(table):
from solutions.common.bizz.reservation import add_table as add_table_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
add_table_bizz(service_user, service_identity, table)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/update", "post")
@returns(ReturnStatusTO)
@arguments(table=TableTO)
def update_table(table):
from solutions.common.bizz.reservation import update_table as update_table_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
update_table_bizz(service_user, service_identity, table)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/delete", "post")
@returns(DeleteTableStatusTO)
@arguments(table_id=(int, long), force=bool)
def delete_table(table_id, force):
from solutions.common.bizz.reservation import get_shift_by_datetime, delete_table as delete_table_bizz
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
dtsTO = DeleteTableStatusTO()
status, reservations = delete_table_bizz(service_user, service_identity, table_id, force)
dtsTO.success = status
dtsTO.reservations = list()
if not status:
for r in reservations:
dtrTO = DeleteTableReservationTO()
dtrTO.reservation = RestaurantReservationTO.fromReservation(r)
shift, start_time = get_shift_by_datetime(service_user, service_identity, r.date)
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
dtrTO.shift = details
else:
dtrTO.shift = None
dtsTO.reservations.append(dtrTO)
return dtsTO
|
apache-2.0
| -8,618,700,201,135,557,000
| 46.828244
| 166
| 0.743676
| false
| 3.815773
| false
| false
| false
|
os-cloud-storage/openstack-workload-disaster-recovery
|
dragon/openstack/common/processutils.py
|
1
|
9182
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from dragon.openstack.common.gettextutils import _
from dragon.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""
Helper method to shell out and execute a command through subprocess with
optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError, exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
|
apache-2.0
| -8,191,133,469,897,886,000
| 36.174089
| 79
| 0.574167
| false
| 4.489976
| false
| false
| false
|
jinzekid/codehub
|
python/code_snippet/DataAnalysis/1.py
|
1
|
1107
|
# -*- encoding:utf-8 -*-
import jieba.analyse
from os import path
from scipy.misc import imread
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
list_test = [1,2,3,4,5]
for i in list_test:
print(i)
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = ['FangSong']
#mpl.rcParams['axes.unicode_minus'] = False
content = open("testing.txt","rb").read()
# tags extraction based on TF-IDF algorithm
tags = jieba.analyse.extract_tags(content, topK=100, withWeight=False)
text =" ".join(tags)
#text = unicode(text)
# read the mask
d = path.dirname(__file__)
trump_coloring = imread(path.join(d, "Trump.jpg"))
wc = WordCloud(font_path='simsun.ttc',
background_color="white", max_words=300, mask=trump_coloring,
max_font_size=40, random_state=42)
# generate word cloud
wc.generate(text)
# generate color from image
image_colors = ImageColorGenerator(trump_coloring)
plt.imshow(wc)
plt.axis("off")
plt.show()
|
gpl-3.0
| 6,991,800,573,141,162,000
| 24.744186
| 74
| 0.65673
| false
| 3.227405
| false
| false
| false
|
J77D/fallout-terminal
|
fallout_hack.py
|
1
|
7441
|
import curses
import random
import os
from fallout_functions import slowWrite
from fallout_functions import upperInput
################## text strings ######################
HEADER_TEXT = 'ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL'
################## global "constants" ################
# number of characters for hex digits and spaces
CONST_CHARS = 16
# position of the attempt squares
SQUARE_X = 19
SQUARE_Y = 3
LOGIN_ATTEMPTS = 4
HEADER_LINES = 5
# amount of time to pause after correct password input
LOGIN_PAUSE = 3000
# starting number for hex generation
START_HEX = 0xf650
# list of possible symbols for password hiding
SYMBOLS = '!@#$%^*()_-+={}[]|\\:;\'",<>./?'
################## functions #########################
def generateHex(n):
"""
generates n numbers starting at START_HEX and increasing by 12 each time
"""
num = START_HEX
list = []
for i in xrange(n):
list.append(num)
num += 12
return list
def getSymbols(n):
"""
return n random symbols
"""
count = len(SYMBOLS)
result = ""
for i in xrange(n):
result += SYMBOLS[random.randint(0, count - 1)]
return result
def getPasswords():
"""
Returns an array of strings to be used as the password and the decoys
"""
groups = []
# script file / password file location
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
# read from passwords.txt
with open(os.path.join(__location__, "passwords.txt")) as pwfile:
for line in pwfile:
if not line.strip():
groups.append([])
elif len(groups) > 0:
groups[len(groups) - 1].append(line[:-1])
passwords = groups[random.randint(0, len(groups) - 1)]
random.shuffle(passwords)
return passwords
def getFiller(length, passwords):
"""
Return a string of symbols with potential passwords mixed in
length - the length of the string to create
passwords - an array of passwords to hide in the symbols
"""
filler = getSymbols(length)
# add the passwords to the symbols
pwdLen = len(passwords[0])
pwdCount = len(passwords)
i = 0
for pwd in passwords:
# skip a distance based on total size to cover then place a password
maxSkip = length / pwdCount - pwdLen
i += random.randint(maxSkip - 2, maxSkip)
filler = filler[:i] + pwd + filler[i + pwdLen:]
i += pwdLen
return filler
def initScreen(scr):
"""
Fill the screen to prepare for password entry
scr - curses window returned from curses.initscr()
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
fillerHeight = height - HEADER_LINES
hexes = generateHex(fillerHeight * 2)
hexCol1 = hexes[:fillerHeight]
hexCol2 = hexes[fillerHeight:]
# generate the symbols and passwords
fillerLength = width / 2 * fillerHeight
passwords = getPasswords()
filler = getFiller(fillerLength, passwords)
fillerCol1 = filler[:len(filler) / 2]
fillerCol2 = filler[len(filler) / 2:]
# each column of symbols and passwords should be 1/4 of the screen
fillerWidth = width / 4
# print the header stuff
slowWrite(scr, HEADER_TEXT)
slowWrite(scr, '\nENTER PASSWORD NOW\n\n')
slowWrite(scr, str(LOGIN_ATTEMPTS) + ' ATTEMPT(S) LEFT: ')
for i in xrange(LOGIN_ATTEMPTS):
scr.addch(curses.ACS_BLOCK)
slowWrite(scr, ' ')
slowWrite(scr, '\n\n')
# print the hex and filler
for i in xrange(fillerHeight):
slowWrite(scr, "0x%X %s" % (hexCol1[i], fillerCol1[i * fillerWidth: (i + 1) * fillerWidth]), 1)
if i < fillerHeight - 1:
scr.addstr('\n')
for i in xrange(fillerHeight):
scr.move(HEADER_LINES + i, CONST_CHARS / 2 + fillerWidth)
slowWrite(scr, '0x%X %s' % (hexCol2[i], fillerCol2[i * fillerWidth: (i + 1) * fillerWidth]), 1)
scr.refresh()
return passwords
def moveInput(scr, inputPad):
"""
moves the input pad to display all text then a blank line then the cursor
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
inputPad.addstr('\n>')
# cursor position relative to inputPad
cursorPos = inputPad.getyx()
inputPad.refresh(0, 0,
height - cursorPos[0] - 1,
width / 2 + CONST_CHARS,
height - 1,
width - 1)
def userInput(scr, passwords):
"""
let the user attempt to crack the password
scr - curses window returned from curses.initscr()
passwords - array of passwords hidden in the symbols
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
# set up a pad for user input
inputPad = curses.newpad(height, width / 2 + CONST_CHARS)
attempts = LOGIN_ATTEMPTS
# randomly pick a password from the list
pwd = passwords[random.randint(0, len(passwords) - 1)]
curses.noecho()
while attempts > 0:
# move the curser to the correct spot for typing
scr.move(height - 1, width / 2 + CONST_CHARS + 1)
# scroll user input up as the user tries passwords
moveInput(scr, inputPad)
guess = upperInput(scr, False, False)
cursorPos = inputPad.getyx()
# write under the last line of text
inputPad.move(cursorPos[0] - 1, cursorPos[1] - 1)
inputPad.addstr('>' + guess.upper() + '\n')
# user got password right
if guess.upper() == pwd.upper():
inputPad.addstr('>Exact match!\n')
inputPad.addstr('>Please wait\n')
inputPad.addstr('>while system\n')
inputPad.addstr('>is accessed.\n')
moveInput(scr, inputPad)
curses.napms(LOGIN_PAUSE)
return pwd
# wrong password
else:
pwdLen = len(pwd)
matched = 0
try:
for i in xrange(pwdLen):
if pwd[i].upper() == guess[i].upper():
matched += 1
except IndexError:
pass # user did not enter enough letters
inputPad.addstr('>Entry denied\n')
inputPad.addstr('>' + str(matched) + '/' + str(pwdLen) +
' correct.\n')
attempts -= 1
# show remaining attempts
scr.move(SQUARE_Y, 0)
scr.addstr(str(attempts))
scr.move(SQUARE_Y, SQUARE_X)
for i in xrange(LOGIN_ATTEMPTS):
if i < attempts:
scr.addch(curses.ACS_BLOCK)
else:
scr.addstr(' ')
scr.addstr(' ')
# Out of attempts
return None
def runLogin(scr):
"""
Start the login portion of the terminal
Returns the password if the user correctly guesses it
"""
curses.use_default_colors()
size = scr.getmaxyx()
width = size[1]
height = size[0]
random.seed()
# set screen to initial position
scr.erase()
scr.move(0, 0)
passwords = initScreen(scr)
return userInput(scr, passwords)
def beginLogin():
"""
Initialize curses and start the login process
Returns the password if the user correctly guesses it
"""
return curses.wrapper(runLogin)
|
mit
| -8,222,715,190,287,671,000
| 26.356618
| 103
| 0.57452
| false
| 3.829645
| false
| false
| false
|
rozim/KaggleFindingElo
|
generate-game2json.py
|
1
|
1513
|
#!/usr/bin/python
import chess.pgn
import sys
import cjson
import time
def StudyGame(game):
headers = game.headers
node = game
ply = 0
positions = []
while True:
board = node.board()
node = node.variations[0]
p = {'ply': ply,
'num_legal_moves': len(board.legal_moves),
'san': node.san(),
'move': str(node.move),
'fen': board.fen()}
if board.is_check():
p['in_check'] = True
positions.append(p)
ply += 1
if not node.variations:
break
last_board = node.board()
g = {
'event': headers['Event'],
'game_ply': ply,
'result': headers['Result'],
'positions': positions,
'is_mate': last_board.is_checkmate(),
'is_stalemate': last_board.is_stalemate()
}
if 'WhiteElo' in headers:
g['white_elo'] = int(headers['WhiteElo'])
g['black_elo'] = int(headers['BlackElo'])
return g
t0 = time.time()
for fn in sys.argv[1:]:
f = file(fn)
n = 0
mod = 1
while True:
game = chess.pgn.read_game(f)
if game is None:
break
g = StudyGame(game)
with file('generated/game2json/%05d.json' % int(g['event']), 'w') as cur_f:
cur_f.write(cjson.encode(g))
n += 1
if n % mod == 0:
print "%6d %.1f" % (n, time.time() - t0)
sys.stdout.flush()
mod *= 2
|
mit
| 2,885,319,990,388,454,400
| 23.803279
| 83
| 0.484468
| false
| 3.354767
| false
| false
| false
|
bartdag/recodoc2
|
recodoc2/apps/doc/management/commands/parsedoc.py
|
1
|
1160
|
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from doc.actions import parse_doc
from optparse import make_option
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--dname', action='store', dest='dname',
default='-1', help='Document name'),
make_option('--release', action='store', dest='release',
default='-1', help='Project Release'),
make_option('--skip_refs', action='store_true', dest='skip_refs',
default=False, help='Skip code reference identification'),
)
help = "Parse document model"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
dname = smart_decode(options.get('dname'))
release = smart_decode(options.get('release'))
skip = options.get('skip_refs')
parse_doc(pname, dname, release, not skip)
|
bsd-3-clause
| 3,877,381,356,536,306,000
| 40.428571
| 73
| 0.650862
| false
| 4.013841
| false
| false
| false
|
FrancoisRheaultUS/dipy
|
doc/examples/denoise_nlmeans.py
|
5
|
2864
|
"""
==============================================
Denoise images using Non-Local Means (NLMEANS)
==============================================
Using the non-local means filter [Coupe08]_ and [Coupe11]_ and you can denoise
3D or 4D images and boost the SNR of your datasets. You can also decide between
modeling the noise as Gaussian or Rician (default).
"""
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from time import time
from dipy.denoise.nlmeans import nlmeans
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.data import get_fnames
from dipy.io.image import load_nifti
dwi_fname, dwi_bval_fname, dwi_bvec_fname = get_fnames('sherbrooke_3shell')
data, affine = load_nifti(dwi_fname)
mask = data[..., 0] > 80
# We select only one volume for the example to run quickly.
data = data[..., 1]
print("vol size", data.shape)
# lets create a noisy data with Gaussian data
"""
In order to call ``non_local_means`` first you need to estimate the standard
deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired
on a 1.5T Siemens scanner with a 4 array head coil.
"""
sigma = estimate_sigma(data, N=4)
t = time()
"""
Calling the main function ``non_local_means``
"""
t = time()
den = nlmeans(data, sigma=sigma, mask=mask, patch_radius=1,
block_radius=1, rician=True)
print("total time", time() - t)
"""
Let us plot the axial slice of the denoised output
"""
axial_middle = data.shape[2] // 2
before = data[:, :, axial_middle].T
after = den[:, :, axial_middle].T
difference = np.abs(after.astype(np.float64) - before.astype(np.float64))
difference[~mask[:, :, axial_middle].T] = 0
fig, ax = plt.subplots(1, 3)
ax[0].imshow(before, cmap='gray', origin='lower')
ax[0].set_title('before')
ax[1].imshow(after, cmap='gray', origin='lower')
ax[1].set_title('after')
ax[2].imshow(difference, cmap='gray', origin='lower')
ax[2].set_title('difference')
plt.savefig('denoised.png', bbox_inches='tight')
"""
.. figure:: denoised.png
:align: center
**Showing axial slice before (left) and after (right) NLMEANS denoising**
"""
nib.save(nib.Nifti1Image(den, affine), 'denoised.nii.gz')
"""
An improved version of non-local means denoising is adaptive soft coefficient
matching, please refer to :ref:`example_denoise_ascm` for more details.
References
----------
.. [Coupe08] P. Coupe, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot,
"An Optimized Blockwise Non Local Means Denoising Filter for 3D Magnetic
Resonance Images", IEEE Transactions on Medical Imaging, 27(4):425-441, 2008
.. [Coupe11] Pierrick Coupe, Jose Manjon, Montserrat Robles, Louis Collins.
"Adaptive Multiresolution Non-Local Means Filter for 3D MR Image Denoising"
IET Image Processing, Institution of Engineering and Technology, 2011
.. include:: ../links_names.inc
"""
|
bsd-3-clause
| 7,466,975,111,948,228,000
| 26.538462
| 79
| 0.688198
| false
| 3.005247
| false
| false
| false
|
contractvm/libcontractvm
|
libcontractvm/WalletExplorer.py
|
1
|
3035
|
# Copyright (c) 2015 Davide Gessa
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from pycoin.networks import *
from pycoin.key import Key
from pycoin.key.BIP32Node import BIP32Node
from pycoin import encoding
from pycoin.ecdsa import is_public_pair_valid, generator_secp256k1, public_pair_for_x, secp256k1
from pycoin.serialize import b2h, h2b
from pycoin.tx import *
from pycoin.tx.tx_utils import sign_tx, create_tx
from pycoin.tx.Spendable import Spendable
from pycoin.tx.TxOut import TxOut
from pycoin.tx.script import tools
from pycoin.encoding import bitcoin_address_to_hash160_sec, is_sec_compressed, public_pair_to_sec, secret_exponent_to_wif, public_pair_to_bitcoin_address, wif_to_tuple_of_secret_exponent_compressed, sec_to_public_pair, public_pair_to_hash160_sec, wif_to_secret_exponent
from pycoin.tx.pay_to import address_for_pay_to_script, build_hash160_lookup
import logging
import json
import requests
import binascii
import random
import time
from libcontractvm import Wallet
from . import Log
logger = logging.getLogger('libcontractvm')
class WalletExplorer (Wallet.Wallet):
def __init__ (self, chain = 'XTN', address = None, wif = None, wallet_file = None):
self.lockedspendables = []
super (WalletExplorer, self).__init__ (chain, address, wif, wallet_file)
def _chaincodeToChainSoName (self, code):
if self.chain == 'XTN':
code = 'BTCTEST'
elif self.chain == 'XDT':
code = 'DOGETEST'
elif self.chain == 'XLT':
code = 'LTCTEST'
else:
code = self.chain
return code
def _spendables (self, value):
code = self._chaincodeToChainSoName (self.chain)
u = 'https://chain.so/api/v2/get_tx_unspent/'+code+'/'+self.address
#print (u)
while True:
try:
d = requests.get (u, headers={'content-type': 'application/json'}).json()
except:
time.sleep (5)
continue
sps = []
tot = 0
random.shuffle (d['data']['txs'])
for s in d['data']['txs']:
#if int (s['confirmations']) > 0:
txid = s['txid'] #''
#for x in range (len (s['txid']), -2, -2):
# txid += s['txid'][x:x+2]
if (txid+':'+str (s['output_no'])) in self.lockedspendables:
#print ('Locked spendable')
continue
tot += int (float (s['value']) * 100000000)
sps.append (Spendable.from_dict ({'coin_value': int (float (s['value']) * 100000000),
'script_hex': s['script_hex'], 'tx_hash_hex': txid, 'tx_out_index': int (s['output_no'])}))
self.lockedspendables.append (txid+':'+str (s['output_no']))
if tot >= value:
#print (sps)
return sps
return sps
def getBalance (self):
code = self._chaincodeToChainSoName (self.chain)
u = 'https://chain.so/api/v2/get_address_balance/'+code+'/'+self.address
while True:
try:
d = requests.get (u, headers={'content-type': 'application/json'}).json()
except:
time.sleep (5)
continue
return float (d['data']['confirmed_balance']) + float (d['data']['unconfirmed_balance'])
|
mit
| -7,089,695,856,140,569,000
| 29.049505
| 269
| 0.679407
| false
| 2.949466
| false
| false
| false
|
vividvilla/olaf
|
olaf/__init__.py
|
1
|
3809
|
# -*- coding: utf-8 -*-
"""
Olaf
~~~~~~~
Flask main app
:copyright: (c) 2015 by Vivek R.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import shutil
import click
default_theme = 'basic'
module_path = os.path.dirname(os.path.abspath(__file__))
contents_dir = '_contents'
posts_dir = 'posts'
pages_dir = 'pages'
content_extension = '.md'
def get_current_dir():
return os.getcwd()
def is_valid_path(path):
"""
check if path exists
"""
if not os.path.exists(path):
click.secho(
'path "{}" does not exist'.format(path), fg='red')
raise OSError('path "{}" does not exist'.format(path))
return True
def is_valid_site():
"""
check if the current path is a valid site directory
"""
config_path = os.path.join(get_current_dir(), 'config.py')
# check if inside site directory
if not os.path.exists(config_path):
click.secho(
'Cannot find config file, please make sure'
' you are inside the site directory', fg='red')
raise OSError('Cannot find config file, please make sure'
' you are inside the site directory')
return True
def get_themes_list(path):
"""
Get list of themes from a given themes path
"""
if not os.path.exists(path):
child_dir = []
else:
child_dir = os.walk(path).next()[1]
valid_themes = []
for dir in child_dir:
if (os.listdir(os.path.join(path, dir))
and not dir.startswith('.')):
valid_themes.append(
dict(name=dir, path=os.path.join(path, dir)))
return valid_themes
def get_theme_by_name(theme):
# get list of inbuilt themes
inbuilt_themes = get_themes_list(os.path.join(module_path, 'themes'))
# get list of custom themes
custom_themes = get_themes_list(os.path.join(get_current_dir(), 'themes'))
# check for theme in inbuilt themes directory
theme_exists_in_inbuilt = [
item['name'] for item in inbuilt_themes if item['name'] == theme]
# check for theme in custom themes directory
theme_exists_in_custom = [
item['name'] for item in custom_themes if item['name'] == theme]
theme_path = None
if theme_exists_in_inbuilt:
# If theme in bundled themes list then get from default themes directory
theme_path = os.path.join(module_path, 'themes', theme)
elif theme_exists_in_custom:
# If theme not found in bundled themes then get from sites directory
theme_path = os.path.join(get_current_dir(), 'themes', theme)
return theme_path
def get_default_theme_name(theme):
"""
get theme from config or set it default
"""
# return theme name if its set via commandline argument
if theme:
return theme
# load config file
config_path = os.path.join(get_current_dir(), 'config.py')
sys.path.append(os.path.dirname(os.path.expanduser(config_path)))
import config
# If theme specified as option then ignore other rules
# else get from config file, if not found in config file set default theme
if config.SITE.get('theme'):
return config.SITE['theme']
else:
return default_theme
def create_project_site(project_name):
try:
# create project directory
os.mkdir(os.path.join(get_current_dir(), project_name))
except OSError:
raise
try:
# copy config file
shutil.copyfile(
os.path.join(module_path, 'config-sample.py'),
os.path.join(get_current_dir(), project_name, 'config.py'))
except IOError:
raise
try:
# create init file
open(
os.path.join(get_current_dir(), project_name, '__init__.py'), 'a'
).close()
# disqus file
open(
os.path.join(get_current_dir(), project_name, 'disqus.html'), 'a'
).close()
# create contents directory
os.mkdir(os.path.join(get_current_dir(), project_name, contents_dir))
os.mkdir(
os.path.join(get_current_dir(), project_name, contents_dir, posts_dir))
os.mkdir(
os.path.join(get_current_dir(), project_name, contents_dir, pages_dir))
except OSError:
raise
return True
|
mit
| -7,437,390,118,091,616,000
| 23.416667
| 75
| 0.688107
| false
| 3.011067
| true
| false
| false
|
mefly2012/platform
|
src/clean_validate/qyxx_hzp_pro_prod_cert.py
|
1
|
3642
|
# -*- coding: utf-8 -*-
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class qyxx_hzp_pro_prod_cert():
"""中标"""
need_check_ziduan = [
u'bbd_dotime',
u'company_name',
u'location',
u'produce_address',
u'issue_date',
u'validdate',
u'certificate_no',
u'details',
u'bbd_url',
u'province',
u'product_name',
u'bbd_source'
]
def check_bbd_dotime(self, indexstr, ustr):
"""dotime 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
def check_company_name(self, indexstr, ustr):
"""企业名称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_location(self, indexstr, ustr):
"""住所 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_produce_address(self, indexstr, ustr):
"""生产地址 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
return ret
def check_issue_date(self, indexstr, ustr):
"""发证日期 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
else:
ret = u'为空'
return ret
def check_validdate(self, indexstr, ustr):
"""有效期 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
else:
ret = u'为空'
return ret
def check_certificate_no(self, indexstr, ustr):
"""证书编号/许可证编号 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
ret = None
# if not re.compile(u'^XK\d{2}-\d{3} \d{4}$').match(ustr):
# ret = u'不符合格式'
else:
ret = u'为空'
return ret
def check_details(self, indexstr, ustr):
"""明细 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_bbd_url(self, indexstr, ustr):
"""url 清洗验证"""
ret = None
return ret
def check_province(self, indexstr, ustr):
"""省份 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in public.PROVINCE:
ret = u'不是合法省份'
else:
ret = u'为空'
return ret
def check_product_name(self, indexstr, ustr):
"""产品名称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not re.compile(u'^[\u4e00-\u9fa5]{1,}$').match(ustr):
ret = u'不纯汉字'
else:
ret = u'为空'
return ret
def check_bbd_source(self, indexstr, ustr):
"""数据源 清洗验证"""
ret = None
return ret
|
apache-2.0
| 4,407,941,176,787,903,000
| 23.529412
| 70
| 0.476019
| false
| 2.928885
| false
| false
| false
|
slgobinath/SafeEyes
|
safeeyes/rpc.py
|
1
|
3412
|
#!/usr/bin/env python
# Safe Eyes is a utility to remind you to take break frequently
# to protect your eyes from eye strain.
# Copyright (C) 2017 Gobinath
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
RPC server and client implementation.
"""
import logging
from threading import Thread
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import ServerProxy
class RPCServer:
"""
An aynchronous RPC server.
"""
def __init__(self, port, context):
self.__running = False
logging.info('Setting up an RPC server on port %d', port)
self.__server = SimpleXMLRPCServer(("localhost", port), logRequests=False, allow_none=True)
self.__server.register_function(context['api']['show_settings'], 'show_settings')
self.__server.register_function(context['api']['show_about'], 'show_about')
self.__server.register_function(context['api']['enable_safeeyes'], 'enable_safeeyes')
self.__server.register_function(context['api']['disable_safeeyes'], 'disable_safeeyes')
self.__server.register_function(context['api']['take_break'], 'take_break')
self.__server.register_function(context['api']['status'], 'status')
self.__server.register_function(context['api']['quit'], 'quit')
def start(self):
"""
Start the RPC server.
"""
if not self.__running:
self.__running = True
logging.info('Start the RPC server')
server_thread = Thread(target=self.__server.serve_forever)
server_thread.start()
def stop(self):
"""
Stop the server.
"""
if self.__running:
logging.info('Stop the RPC server')
self.__running = False
self.__server.shutdown()
class RPCClient:
"""
An RPC client to communicate with the RPC server.
"""
def __init__(self, port):
self.port = port
self.proxy = ServerProxy('http://localhost:%d/' % self.port, allow_none=True)
def show_settings(self):
"""
Show the settings dialog.
"""
self.proxy.show_settings()
def show_about(self):
"""
Show the about dialog.
"""
self.proxy.show_about()
def enable_safeeyes(self):
"""
Enable Safe Eyes.
"""
self.proxy.enable_safeeyes()
def disable_safeeyes(self):
"""
Disable Safe Eyes.
"""
self.proxy.disable_safeeyes(None)
def take_break(self):
"""
Take a break now.
"""
self.proxy.take_break()
def status(self):
"""
Return the status of Safe Eyes
"""
return self.proxy.status()
def quit(self):
"""
Quit Safe Eyes.
"""
self.proxy.quit()
|
gpl-3.0
| -7,180,652,859,256,321,000
| 29.19469
| 99
| 0.611079
| false
| 4.120773
| false
| false
| false
|
sagiss/sardana
|
src/sardana/taurus/qt/qtgui/extra_macroexecutor/macroparameterseditor/customeditors/senv.py
|
1
|
14391
|
#!/usr/bin/env python
##############################################################################
##
## This file is part of Sardana
##
## http://www.sardana-controls.org/
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Sardana is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Sardana is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
from taurus.external.qt import Qt
from taurus import Database
from taurus.core.taurusbasetypes import TaurusElementType
from taurus.core.taurusdatabase import TaurusAttrInfo
from taurus.qt.qtgui.input import TaurusAttrListComboBox
from taurus.qt.qtgui.tree import TaurusDbTreeWidget
from taurus.qt.qtgui.resource import getThemeIcon
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.macroparameterseditor import MacroParametersEditor
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.parameditors import LineEditParam, ParamBase, ComboBoxParam, CheckBoxParam, DirPathParam, MSAttrListComboBoxParam
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.model import ParamEditorModel
from sardana.taurus.qt.qtgui.extra_macroexecutor.common import MSAttrListComboBox
class SenvEditor(Qt.QWidget, MacroParametersEditor):
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
MacroParametersEditor.__init__(self)
self.valueWidget = None
def initComponents(self):
self.setLayout(Qt.QFormLayout())
self.layout().addRow(Qt.QLabel("Setting environment variable:", self))
self.nameComboBox = ComboBoxParam(self)
self.nameComboBox.addItems(["ActiveMntGrp", "ExtraColumns", "JsonRecorder", "ScanFile", "ScanDir"])
self.nameComboBox.setEditable(True)
self.connect(self.nameComboBox, Qt.SIGNAL("currentIndexChanged(int)"), self.onNameComboBoxChanged)
self.layout().addRow("name:", self.nameComboBox)
nameIndex = self.model().index(0, 1, self.rootIndex())
self.nameComboBox.setIndex(nameIndex)
def setRootIndex(self, rootIndex):
self._rootIndex = rootIndex
self.initComponents()
def rootIndex(self):
return self._rootIndex
def model(self):
return self._model
def setModel(self, model):
self._model = model
if isinstance(model, ParamEditorModel):
self.setRootIndex(Qt.QModelIndex())
def onNameComboBoxChanged(self, index):
text = str(self.nameComboBox.currentText())
if self.valueWidget is not None:
label = self.layout().labelForField(self.valueWidget)
if label is not None:
self.layout().removeWidget(label)
label.setParent(None)
label = None
self.layout().removeWidget(self.valueWidget)
self.valueWidget.resetValue()
self.valueWidget.setParent(None)
self.valueWidget = None
self.valueWidget, label = getSenvValueEditor(text, self)
paramRepeatIndex = self.model().index(1, 0, self.rootIndex())
repeatIndex = paramRepeatIndex.child(0, 0)
valueIndex = repeatIndex.child(0, 1)
self.valueWidget.setIndex(valueIndex)
if label:
self.layout().addRow(label, self.valueWidget)
else:
self.layout().addRow(self.valueWidget)
def getSenvValueEditor(envName, parent):
"""Factory method, requires: string, and QWidget as a parent for returned editor.
Factory returns a tuple of widget and a label for it.
:return: (Qt.QWidget, str) """
label = "value:"
if envName == "ActiveMntGrp":
editor = MSAttrListComboBoxParam(parent)
editor.setUseParentModel(True)
editor.setModel("/MeasurementGroupList")
elif envName == "ExtraColumns":
editor = ExtraColumnsEditor(parent)
label = None
elif envName == "JsonRecorder":
editor = CheckBoxParam(parent)
elif envName == "ScanDir":
editor = DirPathParam(parent)
elif envName == "ScanFile":
editor = LineEditParam(parent)
else:
editor = LineEditParam(parent)
return editor, label
class ExtraColumnsEditor(ParamBase, Qt.QWidget):
def __init__(self, parent=None, paramModel=None):
ParamBase.__init__(self, paramModel)
Qt.QWidget.__init__(self, parent)
self.setLayout(Qt.QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
addNewColumnButton = Qt.QPushButton(getThemeIcon("list-add") , "Add new column...", self)
removeSelectedColumnsButton = Qt.QPushButton(getThemeIcon("list-remove") , "Remove selected...", self)
buttonsLayout = Qt.QHBoxLayout()
buttonsLayout.addWidget(addNewColumnButton)
buttonsLayout.addWidget(removeSelectedColumnsButton)
self.layout().addLayout(buttonsLayout)
self.extraColumnsTable = ExtraColumnsTable(self)
self.extraColumnsModel = ExtraColumnsModel()
self.extraColumnsTable.setModel(self.extraColumnsModel)
self.extraColumnsTable.setItemDelegate(ExtraColumnsDelegate(self.extraColumnsTable))
self.layout().addWidget(self.extraColumnsTable)
self.connect(addNewColumnButton, Qt.SIGNAL("clicked()"), self.onAddNewColumn)
self.connect(removeSelectedColumnsButton, Qt.SIGNAL("clicked()"), self.onRemoveSelectedColumns)
self.connect(self.extraColumnsModel, Qt.SIGNAL("dataChanged (const QModelIndex&,const QModelIndex&)"), self.onExtraColumnsChanged)
self.connect(self.extraColumnsModel, Qt.SIGNAL("modelReset()"), self.onExtraColumnsChanged)
def getValue(self):
return repr(self.extraColumnsTable.model().columns())
def setValue(self, value):
try:
columns = eval(value)
except:
columns = []
self.extraColumnsTable.setColumns(columns)
def onAddNewColumn(self):
self.extraColumnsTable.insertRows()
self.emit(Qt.SIGNAL("modelChanged()"))
def onRemoveSelectedColumns(self):
self.extraColumnsTable.removeRows()
self.emit(Qt.SIGNAL("modelChanged()"))
def onExtraColumnsChanged(self):
self.emit(Qt.SIGNAL("modelChanged()"))
class ExtraColumnsTable(Qt.QTableView):
def __init__(self, parent):
Qt.QTableView.__init__(self, parent)
self.setSelectionBehavior(Qt.QAbstractItemView.SelectRows)
self.setSelectionMode(Qt.QAbstractItemView.ExtendedSelection)
def setColumns(self, columns):
if columns == None: columns = []
self.model().setColumns(columns)
self.resizeColumnsToContents()
def insertRows(self):
self.model().insertRows(self.model().rowCount())
def removeRows(self):
rows = [index.row() for index in self.selectedIndexes()]
rows = list(set(rows))
rows.sort(reverse=True)
for row in rows:
self.model().removeRows(row)
class ExtraColumnsDelegate(Qt.QItemDelegate):
def __init__(self, parent=None):
Qt.QItemDelegate.__init__(self, parent)
db = Database()
self.host = db.getNormalName()
def createEditor(self, parent, option, index):
if index.column() == 1:
self.combo_attr_tree_widget = TaurusDbTreeWidget(perspective=TaurusElementType.Device)
self.combo_attr_tree_widget.setModel(self.host)
treeView = self.combo_attr_tree_widget.treeView()
qmodel = self.combo_attr_tree_widget.getQModel()
editor = Qt.QComboBox(parent)
editor.setModel(qmodel)
editor.setMaxVisibleItems(20)
editor.setView(treeView)
elif index.column() == 2:
editor = MSAttrListComboBox(parent)
editor.setUseParentModel(True)
editor.setModel("/InstrumentList")
else:
editor = Qt.QItemDelegate.createEditor(self, parent, option, index)
return editor
def setEditorData(self, editor, index):
if index.column() == 2:
text = Qt.from_qvariant(index.model().data(index, Qt.Qt.DisplayRole), str)
editor.setCurrentText(text)
else:
Qt.QItemDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
column = index.column()
if column == 1:
selectedItems = self.combo_attr_tree_widget.selectedItems()
if not len(selectedItems) == 1: return
taurusTreeAttributeItem = selectedItems[0]
itemData = taurusTreeAttributeItem.itemData()
if isinstance(itemData, TaurusAttrInfo):
model.setData(index, Qt.QVariant(itemData.fullName()))
elif column == 2:
model.setData(index, Qt.QVariant(editor.currentText()))
else:
Qt.QItemDelegate.setModelData(self, editor, model, index)
def sizeHint(self, option, index):
if index.column() == 0:
fm = option.fontMetrics
text = Qt.from_qvariant(index.model().data(index, Qt.Qt.DisplayRole), str)
document = Qt.QTextDocument()
document.setDefaultFont(option.font)
document.setHtml(text)
size = Qt.QSize(document.idealWidth() + 5, fm.height())
elif index.column() == 1:
editor = self.createEditor(self.parent(), option, index)
if editor is None:
size = Qt.QItemDelegate.sizeHint(self, option, index)
else:
size = editor.sizeHint()
editor.hide()
editor.setParent(None)
# editor.destroy()
else:
size = Qt.QItemDelegate.sizeHint(self, option, index)
return size
class ExtraColumnsModel(Qt.QAbstractTableModel):
def __init__(self, columns=None):
if columns is None: columns = []
Qt.QAbstractItemModel.__init__(self)
self.__columns = columns
def setColumns(self, columns):
self.__columns = columns
self.reset()
def columns(self):
return self.__columns
def rowCount(self, index=Qt.QModelIndex()):
return len(self.__columns)
def columnCount(self, index=Qt.QModelIndex()):
return 3
def data(self, index, role=Qt.Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < self.rowCount()):
return Qt.QVariant()
row = index.row()
column = index.column()
#Display Role
if role == Qt.Qt.DisplayRole:
if column == 0: return Qt.QVariant(Qt.QString(self.__columns[row]['label']))
elif column == 1: return Qt.QVariant(Qt.QString(self.__columns[row]['model']))
elif column == 2: return Qt.QVariant(Qt.QString(self.__columns[row]['instrument']))
return Qt.QVariant()
def headerData(self, section, orientation, role=Qt.Qt.DisplayRole):
if role == Qt.Qt.TextAlignmentRole:
if orientation == Qt.Qt.Horizontal:
return Qt.QVariant(int(Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter))
return Qt.QVariant(int(Qt.Qt.AlignRight | Qt.Qt.AlignVCenter))
if role != Qt.Qt.DisplayRole:
return Qt.QVariant()
#So this is DisplayRole...
if orientation == Qt.Qt.Horizontal:
if section == 0: return Qt.QVariant("Label")
elif section == 1: return Qt.QVariant("Attribute")
elif section == 2: return Qt.QVariant("Instrument")
return Qt.QVariant()
else:
return Qt.QVariant(Qt.QString.number(section + 1))
def flags(self, index):
flags = Qt.Qt.ItemIsEnabled | Qt.Qt.ItemIsSelectable
if index.isValid():
column = index.column()
if column in (0, 1, 2):
flags |= Qt.Qt.ItemIsEditable
return flags
def setData(self, index, value=None, role=Qt.Qt.EditRole):
if index.isValid() and (0 <= index.row() < self.rowCount()):
row = index.row()
column = index.column()
value = Qt.from_qvariant(value, str)
if column == 0: self.__columns[row]['label'] = value
elif column == 1: self.__columns[row]['model'] = value
elif column == 2: self.__columns[row]['instrument'] = value
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
return True
return False
def insertRows(self, row, rows=1, parentindex=None):
if parentindex is None: parentindex = Qt.QModelIndex()
first = row
last = row + rows - 1
self.beginInsertRows(parentindex, first, last)
for row in range(first, last + 1):
self.insertRow(row)
self.endInsertRows()
return True
def insertRow(self, row, parentIndex=None):
self.__columns.insert(row, {'label':'', 'model':'', 'instrument':''})
def removeRows(self, row, rows=1, parentindex=None):
if parentindex is None: parentindex = Qt.QModelIndex()
first = row
last = row + rows - 1
self.beginRemoveRows(parentindex, first, last)
for row in range(first, last + 1):
self.removeRow(row)
self.endRemoveRows()
return True
def removeRow(self, row, parentIndex=None):
self.__columns.pop(row)
CUSTOM_EDITOR = SenvEditor
if __name__ == "__main__":
import sys
import taurus
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(sys.argv)
args = app.get_command_line_args()
editor = SenvEditor()
macroServer = taurus.Device(args[0])
macroInfoObj = macroServer.getMacroInfoObj("senv")
macroNode = MacroNode()
editor.setMacroNode(macroNode)
editor.show()
sys.exit(app.exec_())
|
lgpl-3.0
| 7,709,384,933,967,993,000
| 37.47861
| 184
| 0.637968
| false
| 3.920185
| false
| false
| false
|
quaddra/engage
|
python_pkg/engage/drivers/standard/memcached__1_4/driver.py
|
1
|
7343
|
"""Service manager for memcached
"""
import os
import os.path
import shutil
import sys
import time
# fix path if necessary (if running from source or running as test)
try:
import engage.utils
except:
sys.exc_clear()
dir_to_add_to_python_path = os.path.abspath((os.path.join(os.path.dirname(__file__), "../../../..")))
sys.path.append(dir_to_add_to_python_path)
import engage.drivers.service_manager as service_manager
import engage.drivers.resource_metadata as resource_metadata
import engage.utils.path as iupath
import engage_utils.process as iuprocess
import engage.utils.http as iuhttp
import engage.utils.log_setup
import engage.utils.file as iufile
import engage.utils.timeout as iutimeout
import engage.drivers.utils
from engage.drivers.password_repo_mixin import PasswordRepoMixin
from engage.drivers.action import *
import engage.drivers.genforma.macports_pkg as macports_pkg
import engage.drivers.genforma.aptget as aptget
logger = engage.utils.log_setup.setup_script_logger(__name__)
from engage.utils.user_error import ScriptErrInf, UserError
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = ScriptErrInf(__name__, error_code, msg)
errors[error_info.error_code] = error_info
# error codes
ERR_MEMCACHED_BUILD_FAILED = 1
ERR_MEMCACHED_NO_INSTALL_DIR = 2
ERR_MEMCACHED_NO_EXEC_FOUND = 3
ERR_MEMCACHED_START_FAILED = 4
ERR_MEMCACHED_STOP_FAILED = 5
ERR_MEMCACHED_EXITED = 6
ERR_MEMCACHED_UNKNOWN_OSTYPE = 7
define_error(ERR_MEMCACHED_BUILD_FAILED,
_("Memcached build failed"))
define_error(ERR_MEMCACHED_NO_INSTALL_DIR,
_("Post install check failed: missing installation directory '%(dir)s'"))
define_error(ERR_MEMCACHED_NO_EXEC_FOUND,
_("Post install check failed: missing executable in directory '%(dir)s'"))
define_error(ERR_MEMCACHED_START_FAILED,
_("Memcached daemon execution failed in resource %(id)s"))
define_error(ERR_MEMCACHED_STOP_FAILED,
_("Memcached daemon stop failed"))
define_error(ERR_MEMCACHED_EXITED,
_("Memcached daemon appears to have exited after startup"))
define_error(ERR_MEMCACHED_UNKNOWN_OSTYPE,
_("Installation on unknown os type %(ostype)s"))
def get_packages_filename():
return engage.drivers.utils.get_packages_filename(__file__)
def make_context(resource_json, sudo_password_fn, dry_run=False):
ctx = Context(resource_json, logger, __file__,
sudo_password_fn=sudo_password_fn,
dry_run=dry_run)
ctx.check_port("input_ports.host",
os_type=str,
log_directory=str,
sudo_password=str)
ctx.check_port("output_ports.cache",
host=str,
port=int,
provider=str,
home=str)
if ctx.props.input_ports.host.os_type == 'linux':
ctx.add("memcached_exe", "/usr/bin/memcached")
# we stick the linux pid file where it would go if memcached
# is started by the os. This handles the case where the
# server is rebooted and we want to see if memcached is running.
ctx.add("pidfile", os.path.join("/var/run/memcached.pid"))
elif ctx.props.input_ports.host.os_type == 'mac-osx':
ctx.add("memcached_exe", "/opt/local/bin/memcached")
# this is hack: we should really have separate drivers for macports
# and aptget
ctx.add("input_ports.macports.macports_exe", "/opt/local/bin/port")
ctx.add("pidfile", os.path.join(ctx.props.output_ports.cache.home, "memcached.pid"))
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':ctx.props.input_ports.host.os_type})
ctx.add("logfile", os.path.join(ctx.props.input_ports.host.log_directory, "memcached.log"))
ctx.add("memsize", 64)
return ctx
@make_action
def start_memcached(self):
"""We start memcached as a daemon process. The pidfile is created
by memcached.
"""
p = self.ctx.props
memcached_args = [p.memcached_exe, "-d", "-P", p.pidfile,
"-m", str(p.memsize)]
if os.geteuid()==0:
memcached_args.extend(["-u", "root"])
rc = procutils.run_and_log_program(memcached_args,
None, self.ctx.logger)
if rc != 0:
raise UserError(errors[ERR_MEMCACHED_START_FAILED],
msg_args={"id":p.id},
developer_msg="rc was %d" % rc)
self.ctx.logger.debug("memcached daemon started successfully")
class Manager(service_manager.Manager, PasswordRepoMixin):
def __init__(self, metadata, dry_run=False):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
service_manager.Manager.__init__(self, metadata, package_name)
self.ctx = make_context(metadata.to_json(),
sudo_password_fn=self._get_sudo_password,
dry_run=dry_run)
def validate_pre_install(self):
pass
def get_pid_file_path(self):
return self.ctx.props.pidfile
def install(self, package):
r = self.ctx.r
p = self.ctx.props
home_path = p.output_ports.cache.home
# on linux, use apt-get
if p.input_ports.host.os_type == 'linux':
# use apt-get
r(aptget.install, ['memcached'])
elif p.input_ports.host.os_type == 'mac-osx':
# otherwise install using macports
r(macports_pkg.port_install, ['memcached'])
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
# home_path used for pidfile
r(ensure_dir_exists, home_path)
self.validate_post_install()
def is_installed(self):
p = self.ctx.props
rv = self.ctx.rv
if not os.path.exists(p.output_ports.cache.home):
return False
if p.input_ports.host.os_type == 'linux':
return rv(aptget.is_pkg_installed, 'memcached')
elif p.input_ports.host.os_type == 'mac-osx':
return rv(macports_pkg.is_installed, "memcached")
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
def validate_post_install(self):
r = self.ctx.r
p = self.ctx.props
home_path = p.output_ports.cache.home
r(check_dir_exists, home_path)
if p.input_ports.host.os_type == 'linux':
r(aptget.check_installed, "memcached")
elif p.input_ports.host.os_type == 'mac-osx':
r(macports_pkg.check_installed, "memcached")
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
def start(self):
p = self.ctx.props
self.ctx.r(start_memcached)
# make sure that it is up
self.ctx.poll_rv(10, 1.0, lambda x: x, get_server_status,
p.pidfile)
def is_running(self):
return self.ctx.rv(get_server_status, self.ctx.props.pidfile)!=None
def stop(self):
r = self.ctx.r
p = self.ctx.props
r(stop_server, p.pidfile, force_stop=True, timeout_tries=20)
|
apache-2.0
| 4,515,107,118,665,952,000
| 37.244792
| 105
| 0.625221
| false
| 3.495002
| false
| false
| false
|
enricopesce/ec2_snap
|
ec2_snap_exec.py
|
1
|
3106
|
import logging
import boto3
import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
simulate = False
def get_instance_tag(id, tag_name):
res_ec2 = boto3.resource('ec2')
tags = res_ec2.Instance(id).tags
if tags is not None:
for tag in tags:
if tag['Key'] == tag_name:
return tag['Value']
return id
def get_volume_tag(id, tag_name):
res_ec2 = boto3.resource('ec2')
tags = res_ec2.Volume(id).tags
if tags is not None:
for tag in tags:
if tag['Key'] == tag_name:
return tag['Value']
return id
def snapshots_by_instance(instance, delete_date, mode):
devices = instance.block_device_mappings
inst_id = instance.instance_id
inst_name = get_instance_tag(inst_id, "Name")
mode_type = "HOT-SNAPSHOT"
try:
if mode == 'cold':
res_instance = boto3.resource('ec2').Instance(inst_id)
res_instance.stop(DryRun=simulate)
logging.info("Stopping instance %s" % inst_name)
res_instance.wait_until_stopped()
logging.info("Stopped instance %s" % inst_name)
mode_type = "COLD-SNAPSHOT"
for dev in devices:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
vol_name = get_volume_tag(vol_id, "Name")
dev_name = dev['DeviceName']
volume = boto3.resource('ec2').Volume(vol_id)
logging.info("Snapshotting instance %s (%s) mode %s device %s" % (inst_id, inst_name, mode_type, dev_name))
res_snap = volume.create_snapshot(DryRun=simulate)
res_snap.create_tags(DryRun=simulate, Tags=[{'Key': 'Name', 'Value': vol_name},
{'Key': 'DeviceName', 'Value': dev_name},
{'Key': 'InstanceName', 'Value': inst_name},
{'Key': 'VolumeID', 'Value': vol_id},
{'Key': 'SnapMode', 'Value': mode_type},
{'Key': 'DeleteOn', 'Value': delete_date}])
logging.info("Snapshots finished")
if mode == "cold":
logging.info("Starting instance %s %s" % (inst_id, inst_name))
res_instance.start(DryRun=simulate)
except Exception as e:
logging.error("Unexpected error: %s" % e)
return
#lambda call
def ec2_snap_exec(event, context):
try:
days = int(event['retention'])
instance = boto3.resource('ec2').Instance(event['instance_id'])
delete_date = datetime.date.today() + datetime.timedelta(days=days)
mode = event['mode']
except Exception as e:
logging.error("Unexpected error: %s" % e)
else:
snapshots_by_instance(instance, delete_date.strftime('%Y-%m-%d'), mode)
return
params = {'instance_id': 'i-a44d9064', 'retention': '15', 'mode': 'hot'}
print params
ec2_snap_exec(params, '')
|
gpl-3.0
| 1,819,731,942,005,402,600
| 34.295455
| 119
| 0.538635
| false
| 3.764848
| false
| false
| false
|
dianchang/flask-debugtoolbar
|
flask_debugtoolbar/panels/routes.py
|
1
|
1876
|
from collections import OrderedDict
from flask_debugtoolbar.panels import DebugPanel
from flask import current_app
_ = lambda x: x
class RoutesDebugPanel(DebugPanel):
"""
A panel to display Flask app routes.
"""
name = 'Routes'
has_content = True
def nav_title(self):
return _('Routes')
def title(self):
return _('Routes')
def url(self):
return ''
def process_request(self, request):
pass
def content(self):
context = self.context.copy()
blueprints = {}
raw_endpoints = {}
for endpoint, _rules in current_app.url_map._rules_by_endpoint.iteritems():
if any(item in endpoint for item in ['_debug_toolbar', 'debugtoolbar', 'static']):
continue
for rule in _rules:
rule.methods = sorted(filter(lambda x: x not in ['HEAD', 'OPTIONS'], rule.methods))
if '.' in endpoint:
blueprint_name = endpoint.split('.')[0]
if not blueprint_name in blueprints:
blueprints[blueprint_name] = {}
blueprints[blueprint_name][endpoint] = _rules
else:
raw_endpoints[endpoint] = _rules
# Reorder
blueprints = OrderedDict(sorted(blueprints.iteritems()))
for key in blueprints.keys():
blueprints[key] = OrderedDict(sorted(blueprints[key].iteritems()))
raw_endpoints = OrderedDict(sorted(raw_endpoints.iteritems()))
context.update({
'blueprints': blueprints,
'raw_endpoints': raw_endpoints
})
return self.render('panels/routes.html', context)
def remove_http_methods(rules):
"""Do not show HEAD, OPTION methods."""
for rule in rules:
rule.methods = sorted(filter(lambda x: x not in ['HEAD', 'OPTIONS'], rule.methods))
|
bsd-3-clause
| -7,090,675,488,362,132,000
| 30.266667
| 99
| 0.585288
| false
| 4.332564
| false
| false
| false
|
google-research/google-research
|
poem/cv_mim/train.py
|
1
|
1327
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose representation training with TFRecord inputs."""
from absl import app
from absl import flags
import tensorflow as tf
from poem.core import common
from poem.core import input_generator
from poem.core import keypoint_profiles
from poem.core import tfe_input_layer
from poem.cv_mim import train_base
FLAGS = flags.FLAGS
flags.adopt_module_key_flags(train_base)
def main(_):
train_base.run(
input_dataset_class=tf.data.TFRecordDataset,
common_module=common,
keypoint_profiles_module=keypoint_profiles,
input_example_parser_creator=tfe_input_layer.create_tfe_parser,
keypoint_preprocessor_3d=input_generator.preprocess_keypoints_3d)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
| 8,310,518,479,641,039,000
| 29.860465
| 74
| 0.75584
| false
| 3.605978
| false
| false
| false
|
same-moon/smutils
|
windows/juggler/pyhook/hookit.py
|
1
|
9077
|
import os, collections, threading, Queue, time
import win32api
import win32com.client
import pythoncom, pyHook
"""
MessageName: key down
Message: 256
Time: 112416317
Window: 197094
WindowName: Emacs/Python <ruttbe@LAGER> hookit.py
Ascii: 120 x
Key: X
KeyID: 88
ScanCode: 45
Extended: 0
Injected: 0
Alt 0
Transition 0
"""
# Globals
last_window = None
bufs = { 'Emacs': { 'active_window' : None,
'buf': [] },
'VS': { 'active_window' : None,
'buf': [] } }
valid_abbrev_chars = [chr(x) for x in range(ord('0'), ord('0') + 10)]
valid_abbrev_chars += [chr(x) for x in range(ord('A'), ord('A') + 26)]
valid_abbrev_chars += [chr(x) for x in range(ord('a'), ord('a') + 26)]
shell = win32com.client.Dispatch("WScript.Shell")
JUGGLER_DEFNS = os.getenv("JUGGLER_DEFNS")
JUGGLER_AUTOHOTKEY_SCRIPT = os.getenv("JUGGLER_AUTOHOTKEY_SCRIPT")
assert JUGGLER_DEFNS
assert JUGGLER_AUTOHOTKEY_SCRIPT
langs = 'global python javascript'.split()
expanding_now = False
# map from lang to abbrev to text
defns = collections.defaultdict(dict)
class CommandThread(threading.Thread):
def __init__(self, cmd):
threading.Thread.__init__(self)
self.cmd = cmd
def run(self):
(status, output) = commands.getstatusoutput(self.cmd)
print 'CommandThread: (status, output) of [%s] is (%d, %s)' % \
(self.cmd, status, output)
class HelperThread(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while True:
item = q.get()
if item is None:
break
action, arg = item
if action == 'SendKeys':
time.sleep(1)
shell.SendKeys(arg)
q = Queue.Queue()
helper = HelperThread(q)
helper.setDaemon(True)
helper.start()
def process_lang(lang):
for fn in os.listdir(os.path.join(JUGGLER_DEFNS, lang)):
fn2 = os.path.join(JUGGLER_DEFNS, lang, fn)
with open(fn2) as f:
txt = f.read()
defns[lang][fn] = txt
print defns
def sendkeys(keystrokes):
print 'sendkeys(%s) called' % (repr(keystrokes))
shell.SendKeys(keystrokes)
# see http://ss64.com/vb/sendkeys.html or better yet https://msdn.microsoft.com/en-us/library/aa266279%28v=vs.60%29.aspx
def executeAbbrevEmacs(lang, abbrev):
global expanding_now
if lang in defns:
if abbrev in defns[lang]:
assert not expanding_now
expanding_now = True
replacement = defns[lang][abbrev]
sendkeys("{BACKSPACE}" * len(abbrev))
replacements = []
lastChar = None
for char in replacement:
if char == '\n':
if len(replacements) > 0 and replacements[-1] == '\r':
replacements[-1] = '\r\n'
else:
replacements.append('\n')
elif char == 'l' and lastChar == '\\':
replacements[-1] = '\\l'
elif char == 'r' and lastChar == '\\':
replacements[-1] = '\\r'
elif char == '>' and len(replacements) >= 9 and replacements[-9:] == ['<','e','n','d','p','o','i','n','t']:
replacements[-9:] = ['<endpoint>']
else:
replacements.append(char)
lastChar = char
print 'replacements are', replacements
endpointActive = False
for sequence in replacements:
if sequence in ['\n', '\r\n']:
sendkeys("^o^f")
elif sequence == r'\l':
sendkeys("{Left}")
elif sequence == r'\r':
sendkeys("{Right}")
elif sequence == ':':
sendkeys("^q:")
elif sequence in ['{', '}', '[', ']', '+', '^', '%', '~', '(', ')']:
sendkeys("{%s}" % (sequence))
elif sequence == '<endpoint>':
sendkeys("%xjuggler-make-endpoint-marker{ENTER}")
endpointActive = True
else:
sendkeys(sequence)
if endpointActive:
sendkeys("%xjuggler-goto-endpoint-marker{ENTER}")
expanding_now = False
return True
return False
def executeAbbrevVS(lang, abbrev):
# global executing_now
# if lang in defns:
# if abbrev in defns[lang]:
# replacement = defns[lang][abbrev]
# executing_now = True
# shell.SendKeys("{BACKSPACE}" * len(abbrev))
# replacements = []
# for char in replacement:
# if char == '\n':
# if len(replacements) > 0 and replacements[-1] == '\r':
# replacements[-1] = '\r\n'
# else:
# replacements.append('\n')
# else:
# replacements.append(char)
# print 'replacements are', replacements
# for sequence in replacements:
# if sequence in ['\n', '\r\n']:
# shell.SendKeys("{ENTER}")
# else:
# shell.SendKeys(sequence)
# executing_now = False
# return True
return False
"""
x bar7 foo foo foo foo foo foo
bar7
ff
lklkjlkj bar7
bar7
x y z bar7
if foo:
"""
def get_editor(event):
window = event.WindowName
if window.startswith("Emacs/"):
return 'Emacs'
elif 'Microsoft Visual Studio' in window:
return 'VS'
return None
# import win32ui
# wnd = win32ui.GetForegroundWindow()
# print wnd.GetWindowText()
def get_lang(event):
if event.WindowName.startswith("Emacs/") and '<' in event.WindowName:
return event.WindowName.split('Emacs/')[1].split('<')[0].strip().lower()
return None
def try_expand_abbrev(editor, candidate, event):
lang = get_lang(event)
executed = False
cmd = 'executeAbbrev' + editor
execution_cmd = globals()[cmd]
if lang:
executed = execution_cmd(lang, candidate)
print '%s(%s, %s) returned %s' % (cmd, lang, candidate, executed)
if not executed:
print '%s(%s, %s) in global returned %s' % (cmd, 'global', candidate, executed)
executed = execution_cmd('global', candidate)
return executed
def intercepted_VS_keys(event):
if event.Ascii == 14: # Ctrl-n
# sendkeys('{DOWN}')
q.put(('SendKeys', "{DOWN}"))
elif event.Ascii == 16: # Ctrl-p
# sendkeys('{UP}')
q.put(('SendKeys', "{UP}"))
else:
return False
return True
def OnKeyUp(event):
print 'key up event:', event
if not q.empty():
try:
item = q.get_nowait()
action, arg = item
if action == "SendKeys":
sendkeys(arg)
except Queue.Empty:
pass
def OnKeyDown(event):
if expanding_now:
return True
print 'key down event:', event
editor = get_editor(event)
if not editor:
return True
global last_window, bufs
print bufs
# intercept for VS first
if editor == 'VS':
intercepted = intercepted_VS_keys(event)
if intercepted:
return False
prev_window, buf = bufs[editor]['active_window'], bufs[editor]['buf']
if prev_window is None:
assert buf == []
bufs[editor]['active_window'] = event.Window
elif event.Window != prev_window:
bufs[editor]['active_window'] = event.Window
bufs[editor]['buf'] = []
return True
k = event.Ascii
if chr(k) in valid_abbrev_chars:
buf.append(chr(k))
else:
if chr(k) == ' ':
candidate = ''.join(buf)
executed = try_expand_abbrev(editor, candidate, event)
bufs[editor]['buf'] = []
if executed:
return False # disable other handlers
bufs[editor]['buf'] = []
# print 'MessageName:',event.MessageName
# print 'Message:',event.Message
# print 'Time:',event.Time
# print 'Window:',event.Window
# print 'WindowName:',event.WindowName
print 'Ascii:', event.Ascii, chr(event.Ascii)
# print 'Key:', event.Key
# print 'KeyID:', event.KeyID
# print 'ScanCode:', event.ScanCode
# print 'Extended:', event.Extended
# print 'Injected:', event.Injected
# print 'Alt', event.Alt
# print 'Transition', event.Transition
# print '---'
# return True to pass the event to other handlers
return True
def main():
for lang in os.listdir(JUGGLER_DEFNS):
if lang in langs:
process_lang(lang)
# create a hook manager
hm = pyHook.HookManager()
# watch for all key events
hm.KeyDown = OnKeyDown
hm.KeyUp = OnKeyUp
# set the hook
hm.HookKeyboard()
# wait forever
pythoncom.PumpMessages()
if __name__ == "__main__": # when run as a script
main()
|
mit
| 169,636,825,907,364,860
| 30.19244
| 123
| 0.539495
| false
| 3.735391
| false
| false
| false
|
YiqunPeng/Leetcode-pyq
|
solutions/298BinaryTreeLongestConsecutiveSequence.py
|
1
|
1902
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# dfs, post order, no global variable
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(node, ans):
if not node:
return 0, 0
if not node.left and not node.right:
return 1, max(ans, 1)
res = 1
left, la = dfs(node.left, ans)
right, ra = dfs(node.right, ans)
if node.left and node.left.val == node.val + 1:
res = max(left + 1, res)
if node.right and node.right.val == node.val + 1:
res = max(right + 1, res)
return res, max(ans, la, ra, res)
return dfs(root, 0)[1]
# dfs, post order traverse, use global variable
# def __init__(self):
# self.ans = 0
# def longestConsecutive(self, root):
# """
# :type root: TreeNode
# :rtype: int
# """
# def dfs(node):
# if not node:
# return 0
# if not node.left and not node.right:
# self.ans = max(self.ans, 1)
# return 1
# res = 1
# left = dfs(node.left)
# right = dfs(node.right)
# if node.left and node.left.val == node.val + 1:
# res = max(left + 1, res)
# if node.right and node.right.val == node.val + 1:
# res = max(right + 1, res)
# self.ans = max(self.ans, res)
# return res
# dfs(root)
# return self.ans
|
gpl-3.0
| -9,160,639,779,577,161,000
| 27.402985
| 63
| 0.419558
| false
| 3.819277
| false
| false
| false
|
UCL-dataspring/cluster-code
|
bluclobber/model/corpus.py
|
1
|
2359
|
import glob
import os
import traceback
from book import Book
from archive import Archive
from functools import reduce
from dataset import DataSet
from ..harness.mapreduce import MapReduce
from ..harness.utils import merge
from ..harness.decomposer import Decomposer
import logging
class Corpus(DataSet):
def __init__(self, path=None, communicator=None):
if type(path)==str:
path+='/*.zip'
super(Corpus, self).__init__(Archive,path,communicator )
def analyse_by_book_in_archives(self, mapper, reducer, subsample=1, shuffler=None):
partition=Corpus(Decomposer(self.paths, self.communicator))
harness=MapReduce(self.loadingMap(mapper), reducer, self.communicator,
prepartitioned=True, subsample=subsample, shuffler=shuffler )
return harness.execute(partition)
def analyse_by_book(self, mapper, reducer, subsample=1, shuffler=None):
harness = MapReduce(self.loadingMap(mapper), reducer, self.communicator, subsample, shuffler=shuffler)
return harness.execute(self)
def analyse(self,mapper, reducer, subsample=1, bybook=False, shuffler=None):
if bybook:
self.logger.info("Analysing by book")
return self.analyse_by_book_in_archives(mapper, reducer, subsample, shuffler)
else:
self.logger.info("Analysing by archive")
return self.analyse_by_file(self.loadingMap(mapper), reducer, subsample, shuffler)
def loadingMap(self, mapper):
def _map(book):
self.logger.debug("Loading book")
try:
book.load()
except Exception as exception:
self.logger.warn("Problem loading " + book.code + " in " + book.archive.path)
self.logger.warn(traceback.format_exc())
self.logger.warn(str(exception))
self.logger.debug("Loaded book")
try:
self.logger.debug("Considering book")
result= mapper(book)
self.logger.debug("Considered book")
return result
except Exception as exception:
self.logger.warn("Problem parsing " + book.code + " in " + book.archive.path)
self.logger.warn(traceback.format_exc())
self.logger.warn(str(exception))
return _map
|
mit
| 1,672,840,677,074,940,200
| 38.316667
| 110
| 0.635863
| false
| 3.998305
| false
| false
| false
|
canesin/FEVal
|
feval/ShapeFunctions.py
|
1
|
37904
|
# -*- coding: iso-8859-1 -*-
#============================================================
#
# This file is part of FEval, a module for the
# evaluation of Finite Element results
#
# Licencse: FEval is provided under the GNU General Public License (GPL)
#
# Authors: Martin Lüthi, tinu@tnoo.net
#
# Homepage: http://feval.sourceforge.net
#
# History: long, long, most of it in 2000
# 2001.09.21 (ml): Code cleaned up for intial release
#
# Purpose: Provide Finite Element shape functions
#
#============================================================
import numpy as N
try:
import scipy.linalg as LA
except:
print 'could not import scipy.linalg!'
pass
## calculate coordinates and weights of Gauss points
## (cf. Numerical Recipies in C++, p.157)
##
## Results are the same as in Bathe 1982 for the first 7 digits
#============================================================================
# try to use Psyco (psyco.sourceforge.net)
# if configured, this will speed up things considerably
# try:
# import psyco
# from psyco.classes import *
# except ImportError:
# class _psyco:
# def jit(self): pass
# def bind(self, f): pass
# def proxy(self, f): return f
# psyco = _psyco()
class ShapeFunctionPrototype:
"""Defines the prototype of a interpolation function
cornernodes defines the nodes at the geometrical corner
We use MARC numbering, i.e. corner nodes first, anticlockwise
"""
dim, nnodes = 0, 0 # dimension and number of nodes
nsides = 0 # number of element sides
cornernodes = N.array([]) # list of corner nodes
sidenodes = N.array([]) # list of nodes on the side with index
nextnodes = N.array([]) # list of nodes that are adjecent to the node with index
lcoordGauss = None
gaussShape = None
gaussShapeInv = None
def __init__(self):
self.f = N.zeros(self.nnodes, N.float_)
self.df = N.zeros( (self.dim, self.nnodes), N.float_)
def __call__(self, args):
return self.calcShape(args)
# must be overrridden by the shape function
def calcShape(self, lcoord):
return None
# must be overrridden by the shape function
def calcShapeDeriv(self, lcoord):
return None
def nextPattern(self, lcoord):
return [0]
def calcGauss(self):
"""Calculate the inverse of the shape functions at the Gauss points"""
a = []
for lc in self.lcoordGauss:
a.append(N.array(self.calcShape(lc)))
self.gaussShape = N.take(N.array(a), self.cornernodes, 1)
#self.gaussShapeInv = LA.inverse(self.gaussShape)
self.gaussShapeInv = LA.pinv(self.gaussShape)
class ShapeFunction_Line2(ShapeFunctionPrototype):
"""Element function for linear element defined
0-------1
"""
name = 'Line2'
dim, nnodes = 2, 2
cornernodes = N.array([0,1])
nsides = 2
sidetype = 'Point1'
sidenodes = N.array(
[[0],
[1],
])
nextnodes = N.array(
[[1],
[0],
])
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1.],
[ 1.],
])*gaussDist
def calcShape(self, lcoord):
x = lcoord[0]
return 0.5*N.array([
1.0-x,
1.0+x ])
def calcShapeDeriv(self, lcoord):
x = lcoord[0]
return N.array([ -0.5, 0.5 ])
def nextPattern(self, lcoord):
x = lcoord * 1.01
if x > 1: return [1]
elif x < -1: return [0]
else: return None
class ShapeFunction_Tri3(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
2
/ \
/ \
/ \
0-------1
"""
name = 'Tri3'
dim, nnodes = 2, 3
cornernodes = N.array([0,1,2])
nsides = 3
sidetype = 'Line2'
sidenodes = N.array(
[[0,1],
[1,2],
[2,0],
])
nextnodes = N.array(
[[1,2],
[0,2],
[0,1],
])
triangles = N.array([[0,1,2]])
#!!!! worng, this is still from quads
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
# def calcShape(self, lcoord):
# x, y = lcoord
# return N.array([
# 1.-x-y,
# x,
# y ])
def calcShape(self, lcoord):
x, y = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
x = 0.5*(x+1)
y = 0.5*(y+1)
return N.array([
1.-x-y,
x,
y ])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
self.df[0,0] = -0.5
self.df[0,1] = 0.5
self.df[0,2] = 0.
self.df[1,0] = -0.5
self.df[1,1] = 0.
self.df[1,2] = 0.5
return self.df
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x+y > 1: return [1,2]
elif y < 0: return [0,1]
elif x < 0: return [2,0]
else: return None
class ShapeFunction_Tri6(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
2
/ \
5 4
/ \
0---3---1
"""
name = 'Tri6'
dim, nnodes = 2, 6
cornernodes = N.array([0,1,2])
nsides = 3
sidetype = 'Line3'
sidenodes = N.array(
[[0,3,1],
[1,4,2],
[2,5,0],
])
nextnodes = N.array(
[[1,2],
[0,2],
[0,1],
])
#triangles = N.array([[0,1,2]])
#!!!! worng, this is still from quads
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
xi1, xi2 = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
y = 0.5*(xi1+1.)
z = 0.5*(xi2+1.)
x = 1. - y - z
return N.array([
2.*x*(x-0.5),
2.*y*(y-0.5),
2.*z*(z-0.5),
4.*y*z,
4.*z*x,
4.*x*y,
])
def calcShapeDeriv(self, lcoord):
stop
xi1, xi2 = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
zeta1 = 0.5*(xi1+1.)
zeta2 = 0.5*(xi2+1.)
zeta0 = 1. - zeta1 - zeta2
self.df[0,0] = 4.*zeta0-1.
self.df[0,1] = 4.*zeta1-1.
self.df[0,2] = 4.*zeta2-1.
self.df[0,3] = 4.*zeta2-1.
self.df[0,4] = 4.*zeta2-1.
self.df[0,5] = 4.*zeta2-1.
self.df[1,0] = -0.5
self.df[1,1] = 0.
self.df[1,2] = 0.5
return self.df
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x+y > 1: return [1,2]
elif y < 0: return [0,1]
elif x < 0: return [2,0]
else: return None
# def nextPattern(self, lcoord):
# xi1, xi2 = lcoord / max(N.absolute(lcoord)) * 1.01
# # 0.5*(x+1) [-1,1] -> x [0,1]
# y = 0.5*(xi1+1.)
# z = 0.5*(xi2+1.)
# x = 1. - y - z
# #x,y = lcoord
# if x < 0: return [0,1]
# elif y < 0: return [1,2]
# elif z < 0: return [2,0]
# else: return None
class ShapeFunction_Quad4(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
3-------2
| |
| |
| |
0-------1
"""
name = 'Quad4'
dim, nnodes = 2, 4
cornernodes = N.array([0,1,2,3])
nsides = 4
sidetype = 'Line2'
sidenodes = N.array(
[[0,1],
[1,2],
[2,3],
[3,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[0,1,3],
[1,2,3]])
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
x, y = lcoord
xy = x*y
return 0.25*N.array([
1.0-x-y+xy,
1.0+x-y-xy,
1.0+x+y+xy,
1.0-x+y-xy ])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
return 0.25*N.array([
[ -1.0+y, 1.0-y, 1.0+y, -1.0-y],
[ -1.0+x, -1.0-x, 1.0+x, 1.0-x ]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Quad8(ShapeFunctionPrototype):
"""Element function for quadratic element defined in MARC Manual B2.7-1
Taylor&Hughes (1981), p. 50
Element nodes numbering is the same as for MARC
3-----6-----2
|(5) (6) (7)|
| |
7(3) (4)5
| |
|(0) (1) (2)|
0-----4-----1
"""
name = 'Quad8'
dim, nnodes = 2, 8
cornernodes = [0,1,2,3]
nsides = 4
sidetype = 'Line3'
sidenodes = N.array(
[[0,4,1],
[1,5,2],
[2,6,3],
[3,7,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[7,0,4],
[4,1,5],
[5,2,6],
[6,3,7],
[7,4,5],
[5,6,7]])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy = xx*y, x*yy
return 0.25*N.array([
# the corner nodes
(-1.0+xy+xx+yy-xxy-xyy),
(-1.0-xy+xx+yy-xxy+xyy),
(-1.0+xy+xx+yy+xxy+xyy),
(-1.0-xy+xx+yy+xxy-xyy),
# the mid-side nodes
2.*(1.0-y-xx+xxy),
2*(1.0+x-yy-xyy),
2*(1.0+y-xx-xxy),
2*(1.0-x-yy+xyy)])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy, xy2 = xx*y, x*yy, xy*xy
return 0.25*N.array([
[
# the corner nodes
y+xx-xy2-yy,
y+xx-xy2+yy,
y+xx+xy2+yy,
y+xx+xy2-yy,
# the mid-side nodes
(-x+xy)*4.,
(1.0-yy)*2.,
(-x-xy)*4.,
(-1.0+yy)*2.,
],[
# the corner nodes
x+yy-xx-xy2,
x+yy-xx+xy2,
x+yy+xx+xy2,
x+yy+xx-xy2,
# the mid-side nodes
(-1.0+xx)*2.,
(-y-xy)*4.,
(1.0-xx)*2.,
(-y+xy)*4.]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Quad9(ShapeFunctionPrototype):
"""Element function for quadratic element defined in MARC Manual B2.7-1
Taylor&Hughes (1981), p. 50
Element nodes numbering is the same as for MARC
3-----6-----2
|(5) (6) (7)|
| |
7(3) 8 (4)5
| |
|(0) (1) (2)|
0-----4-----1
"""
name = 'Quad9'
dim, nnodes = 2, 9
cornernodes = [0,1,2,3]
nsides = 4
sidetype = 'Line3'
sidenodes = N.array(
[[0,4,1],
[1,5,2],
[2,6,3],
[3,7,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[7,0,4],
[4,1,5],
[5,2,6],
[6,3,7],
[7,4,5],
[5,6,7]])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
print "not implemented correctly"
stop
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy = xx*y, x*yy
return 0.25*N.array([
# the corner nodes
(-1.0+xy+xx+yy-xxy-xyy),
(-1.0-xy+xx+yy-xxy+xyy),
(-1.0+xy+xx+yy+xxy+xyy),
(-1.0-xy+xx+yy+xxy-xyy),
# the mid-side nodes
2.*(1.0-y-xx+xxy),
2*(1.0+x-yy-xyy),
2*(1.0+y-xx-xxy),
2*(1.0-x-yy+xyy)])
def calcShapeDeriv(self, lcoord):
print "not implemented correctly"
stop
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy, xy2 = xx*y, x*yy, xy*xy
return 0.25*N.array([
[
# the corner nodes
y+xx-xy2-yy,
y+xx-xy2+yy,
y+xx+xy2+yy,
y+xx+xy2-yy,
# the mid-side nodes
(-x+xy)*4.,
(1.0-yy)*2.,
(-x-xy)*4.,
(-1.0+yy)*2.,
],[
# the corner nodes
x+yy-xx-xy2,
x+yy-xx+xy2,
x+yy+xx+xy2,
x+yy+xx-xy2,
# the mid-side nodes
(-1.0+xx)*2.,
(-y-xy)*4.,
(1.0-xx)*2.,
(-y+xy)*4.]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Hex8(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
7---------------6
/|(6) (7)/|
/ | / |
/ | / |
/ | / |
/ (4)| (5)/ |
4---------------5 |
| | | |
| 3---------|-----2
| / (2) | (3)/
| / | /
| / | /
| / | /
|/ (0) (1)|/
0---------------1
7-------6
/| /|
/ | / |
4-------5 |
| 3----|--2
| / | /
|/ |/
0-------1
"""
name = 'Hex8'
dim, nnodes = 3, 8
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad4'
sidenodes = N.array(
[[0,3,2,1],
[0,1,5,4],
[1,2,6,5],
[2,3,7,6],
[3,0,4,7],
[4,5,6,7],
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.577350269189626 # = 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
# lcoordGauss = N.array([ [-1., -1., -1.],
# [ 1., -1., -1.],
# [ 1., 1., -1.],
# [-1., 1., -1.],
# [-1., -1., 1.],
# [ 1., -1., 1.],
# [ 1., 1., 1.],
# [-1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xyz = x*y*z
return 0.125*N.array([
1.0-x-y-z+xy+xz+yz-xyz, # -1,-1,-1,
1.0+x-y-z-xy-xz+yz+xyz, # 1,-1,-1,
1.0+x+y-z+xy-xz-yz-xyz, # 1, 1,-1,
1.0-x+y-z-xy+xz-yz+xyz, # -1, 1,-1,
1.0-x-y+z+xy-xz-yz+xyz, # -1,-1, 1,
1.0+x-y+z-xy+xz-yz-xyz, # 1,-1, 1,
1.0+x+y+z+xy+xz+yz+xyz, # 1, 1, 1,
1.0-x+y+z-xy-xz+yz-xyz]) # -1, 1, 1,
def calcShapeDeriv(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
self.df[0,0] = -1.0+y+z-yz
self.df[1,0] = -1.0+x+z-xz
self.df[2,0] = -1.0+x+y-xy
self.df[0,1] = 1.0-y-z+yz
self.df[1,1] = -1.0-x+z+xz
self.df[2,1] = -1.0-x+y+xy
self.df[0,2] = 1.0+y-z-yz
self.df[1,2] = 1.0+x-z-xz
self.df[2,2] = -1.0-x-y-xy
self.df[0,3] = -1.0-y+z+yz
self.df[1,3] = 1.0-x-z+xz
self.df[2,3] = -1.0+x-y+xy
self.df[0,4] = -1.0+y-z+yz
self.df[1,4] = -1.0+x-z+xz
self.df[2,4] = 1.0-x-y+xy
self.df[0,5] = 1.0-y+z-yz
self.df[1,5] = -1.0-x-z-xz
self.df[2,5] = 1.0+x-y-xy
self.df[0,6] = 1.0+y+z+yz
self.df[1,6] = 1.0+x+z+xz
self.df[2,6] = 1.0+x+y+xy
self.df[0,7] = -1.0-y-z-yz
self.df[1,7] = 1.0-x+z-xz
self.df[2,7] = 1.0-x+y-xy
self.df = self.df/8.0
return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return self.sidenodes[2] #[1,2,6,5]
elif x < -1: return self.sidenodes[4] #[0,4,7,3]
elif y > 1: return self.sidenodes[3] #[2,3,7,6]
elif y < -1: return self.sidenodes[1] #[0,1,5,4]
elif z > 1: return self.sidenodes[5] #[4,5,6,7]
elif z < -1: return self.sidenodes[0] #[0,3,2,1]
else: return None
class ShapeFunction_Hex20(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
Here we adopt the numbering from Libmesh, i.e. the second level
of second order nodes comes befor the 3rd level
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
# 7-------14------6
# /|(6) (7)/|
# / | / |
# 15 | 13 |
# / 19 / 18
# / (4)| (5)/ |
# 4-------12------5 |
# | | | |
# | 3------10-|-----2
# | / (2) | (3)/
# 16 / 17 /
# | 11 | 9
# | / | /
# |/ (0) (1)|/
# 0-------8-------1
7-------18------6
/|(6) (7)/|
/ | / |
19 | 17 |
/ 15 / 14
/ (4)| (5)/ |
4-------16------5 |
| | | |
| 3------10-|-----2
| / (2) | (3)/
12 / 13 /
| 11 | 9
| / | /
|/ (0) (1)|/
0-------8-------1
16 - 12
17 - 13
18 - 14
19 - 15
12 - 16
13 - 17
14 - 18
15 - 19
"""
name = 'Hex20'
dim, nnodes = 3, 20
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad8'
sidenodes = N.array(
[[0,3,2,1,11,10,9,8], # side 0
[0,1,5,4,8, 13, 16, 12], # side 1
[1,2,6,5,9, 14, 17, 13], # side 2
[2,3,7,6,10, 15, 18, 14], # side 3
[3,0,4,7,11, 12, 19, 15], # side 4
[4,5,6,7,16, 17, 18, 19] # side 5
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xx, yy, zz = x*x, y*y, z*z
xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
yyz, xzz, yzz = yy*z, x*zz, y*zz
xxyz, xyyz, xyzz = xxy*z, xyy*z, xyz*z
self.f[0] = (x+y+z-xyz+xx+yy+zz-xxy-xyy-xxz-xzz-yyz-yzz+ \
xxyz+xyyz+xyzz-2.0)/8.0
self.f[1] = (-x+y+z+xyz+xx+yy+zz-xxy+xyy-xxz+xzz-yyz-yzz+ \
xxyz-xyyz-xyzz-2.0)/8.0
self.f[2] = (-x-y+z-xyz+xx+yy+zz+xxy+xyy-xxz+xzz-yyz+yzz- \
xxyz-xyyz+xyzz-2.0)/8.0
self.f[3] = (x-y+z+xyz+xx+yy+zz+xxy-xyy-xxz-xzz-yyz+yzz- \
xxyz+xyyz-xyzz-2.0)/8.0
self.f[4] = (x+y-z+xyz+xx+yy+zz-xxy-xyy+xxz-xzz+yyz-yzz- \
xxyz-xyyz+xyzz-2.0)/8.0
self.f[5] = (-x+y-z-xyz+xx+yy+zz-xxy+xyy+xxz+xzz+yyz-yzz- \
xxyz+xyyz-xyzz-2.0)/8.0
self.f[6] = (-x-y-z+xyz+xx+yy+zz+xxy+xyy+xxz+xzz+yyz+yzz+ \
xxyz+xyyz+xyzz-2.0)/8.0
self.f[7] = (x-y-z-xyz+xx+yy+zz+xxy-xyy+xxz-xzz+yyz+yzz+ \
xxyz-xyyz-xyzz-2.0)/8.0
self.f[8] = (1.0-z-y+yz-xx+xxz+xxy-xxyz)/4.0
self.f[9] = (1.0-z+x-xz-yy+yyz-xyy+xyyz)/4.0
self.f[10] = (1.0-z+y-yz-xx+xxz-xxy+xxyz)/4.0
self.f[11] = (1.0-z-x+xz-yy+yyz+xyy-xyyz)/4.0
self.f[16] = (1.0+z-y-yz-xx-xxz+xxy+xxyz)/4.0
self.f[17] = (1.0+z+x+xz-yy-yyz-xyy-xyyz)/4.0
self.f[18] = (1.0+z+y+yz-xx-xxz-xxy-xxyz)/4.0
self.f[19] = (1.0+z-x-xz-yy-yyz+xyy+xyyz)/4.0
self.f[12] = (1.0-y-x+xy-zz+yzz+xzz-xyzz)/4.0
self.f[13] = (1.0-y+x-xy-zz+yzz-xzz+xyzz)/4.0
self.f[14] = (1.0+y+x+xy-zz-yzz-xzz-xyzz)/4.0
self.f[15] = (1.0+y-x-xy-zz-yzz+xzz+xyzz)/4.0
return self.f
def calcShapeDeriv(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xx, yy, zz = x*x, y*y, z*z
xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
yyz, xzz, yzz = yy*z, x*zz, y*zz
self.df[0, 0] = 1.0-yz+2.0*x-2.0*xy-yy-2.0*xz-zz+2.0*xyz+yyz+yzz
self.df[1, 0] = 1.0-xz+2.0*y-xx-2.0*xy-2.0*yz-zz+xxz+2.0*xyz+xzz
self.df[2, 0] = 1.0-xy+2.0*z-xx-2.0*xz-yy-2.0*yz+xxy+xyy+2.0*xyz
self.df[0, 1] = -1.0+yz+2.0*x-2.0*xy+yy-2.0*xz+zz+2.0*xyz-yyz-yzz
self.df[1, 1] = 1.0+xz+2.0*y-xx+2.0*xy-2.0*yz-zz+xxz-2.0*xyz-xzz
self.df[2, 1] = 1.0+xy+2.0*z-xx+2.0*xz-yy-2.0*yz+xxy-xyy-2.0*xyz
self.df[0, 2] = -1.0-yz+2.0*x+2.0*xy+yy-2.0*xz+zz-2.0*xyz-yyz+yzz
self.df[1, 2] = -1.0-xz+2.0*y+xx+2.0*xy-2.0*yz+zz-xxz-2.0*xyz+xzz
self.df[2, 2] = 1.0-xy+2.0*z-xx+2.0*xz-yy+2.0*yz-xxy-xyy+2.0*xyz
self.df[0, 3] = 1.0+yz+2.0*x+2.0*xy-yy-2.0*xz-zz-2.0*xyz+yyz-yzz
self.df[1, 3] = -1.0+xz+2.0*y+xx-2.0*xy-2.0*yz+zz-xxz+2.0*xyz-xzz
self.df[2, 3] = 1.0+xy+2.0*z-xx-2.0*xz-yy+2.0*yz-xxy+xyy-2.0*xyz
self.df[0, 4] = 1.0+yz+2.0*x-2.0*xy-yy+2.0*xz-zz-2.0*xyz-yyz+yzz
self.df[1, 4] = 1.0+xz+2.0*y-xx-2.0*xy+2.0*yz-zz-xxz-2.0*xyz+xzz
self.df[2, 4] = -1.0+xy+2.0*z+xx-2.0*xz+yy-2.0*yz-xxy-xyy+2.0*xyz
self.df[0, 5] = -1.0-yz+2.0*x-2.0*xy+yy+2.0*xz+zz-2.0*xyz+yyz-yzz
self.df[1, 5] = 1.0-xz+2.0*y-xx+2.0*xy+2.0*yz-zz-xxz+2.0*xyz-xzz
self.df[2, 5] = -1.0-xy+2.0*z+xx+2.0*xz+yy-2.0*yz-xxy+xyy-2.0*xyz
self.df[0, 6] = -1.0+yz+2.0*x+2.0*xy+yy+2.0*xz+zz+2.0*xyz+yyz+yzz
self.df[1, 6] = -1.0+xz+2.0*y+xx+2.0*xy+2.0*yz+zz+xxz+2.0*xyz+xzz
self.df[2, 6] = -1.0+xy+2.0*z+xx+2.0*xz+yy+2.0*yz+xxy+xyy+2.0*xyz
self.df[0, 7] = 1.0-yz+2.0*x+2.0*xy-yy+2.0*xz-zz+2.0*xyz-yyz-yzz
self.df[1, 7] = -1.0-xz+2.0*y+xx-2.0*xy+2.0*yz+zz+xxz-2.0*xyz-xzz
self.df[2, 7] = -1.0-xy+2.0*z+xx-2.0*xz+yy+2.0*yz+xxy-xyy-2.0*xyz
self.df[:, 0:8] = self.df[:, 0:8]/2.0
self.df[0, 8] = -2.0*x+2.0*xz+2.0*xy-2.0*xyz
self.df[1, 8] = -1.0+z+xx-xxz
self.df[2, 8] = -1.0+y+xx-xxy
self.df[0, 9] = 1.0-z-yy+yyz
self.df[1, 9] = -2.0*y+2.0*yz-2.0*xy+2.0*xyz
self.df[2, 9] = -1.0-x+yy+xyy
self.df[0, 10] = -2.0*x+2.0*xz-2.0*xy+2.0*xyz
self.df[1, 10] = 1.0-z-xx+xxz
self.df[2, 10] = -1.0-y+xx+xxy
self.df[0, 11] = -1.0+z+yy-yyz
self.df[1, 11] = -2.0*y+2.0*yz+2.0*xy-2.0*xyz
self.df[2, 11] = -1.0+x+yy-xyy
self.df[0, 16] = -2*x-2*xz+2*xy+2*xyz
self.df[1, 16] = -1.0-z+xx+xxz
self.df[2, 16] = 1.0-y-xx+xxy
self.df[0, 17] = 1.0+z-yy-yyz
self.df[1, 17] = -2*y-2*yz-2*xy-2*xyz
self.df[2, 17] = 1.0+x-yy-xyy
self.df[0, 18] = -2*x-2*xz-2*xy-2*xyz
self.df[1, 18] = 1.0+z-xx-xxz
self.df[2, 18] = 1.0+y-xx-xxy
self.df[0, 19] = -1.0-z+yy+yyz
self.df[1, 19] = -2*y-2*yz+2*xy+2*xyz
self.df[2, 19] = 1.0-x-yy+xyy
self.df[0, 12] = -1.0+y+zz-yzz
self.df[1, 12] = -1.0+x+zz-xzz
self.df[2, 12] = -2*z+2*yz+2*xz-2*xyz
self.df[0, 13] = 1.0-y-zz+yzz
self.df[1, 13] = -1.0-x+zz+xzz
self.df[2, 13] = -2*z+2*yz-2*xz+2*xyz
self.df[0, 14] = 1.0+y-zz-yzz
self.df[1, 14] = 1.0+x-zz-xzz
self.df[2, 14] = -2*z-2*yz-2*xz-2*xyz
self.df[0, 15] = -1.0-y+zz+yzz
self.df[1, 15] = 1.0-x-zz+xzz
self.df[2, 15] = -2*z-2*yz+2*xz+2*xyz
self.df = self.df/4.0
return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2,6,5]
elif x < -1: return [0,3,7,4]
elif y > 1: return [2,3,7,6]
elif y < -1: return [0,1,5,4]
elif z > 1: return [4,5,6,7]
elif z < -1: return [0,1,2,3]
else: return None
class ShapeFunction_Hex27(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
Here we adopt the numbering from Libmesh, i.e. the second level
of second order nodes comes before the 3rd level
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
7-------18------6
/|(6) (7)/|
/ | / |
19 | [25] 17 |
/ 15 [23] / 14 center node: 26
/ (4)| (5)/ |
4-------16------5 |
| [24]| | [22]|
| 3------10-|-----2
| / (2) | (3)/
12 / [21] 13 /
| 11 [20] | 9
| / | /
|/ (0) (1)|/
0-------8-------1
"""
name = 'Hex27'
dim, nnodes = 3, 27
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad9'
sidenodes = N.array([
[0, 3, 2, 1, 11, 10, 9, 8, 20], # Side 0 (exodus: 5) 20 -> 22
[0, 1, 5, 4, 8, 13, 16, 12, 21], # Side 1 (exodus: 1) 21 -> 26
[1, 2, 6, 5, 9, 14, 17, 13, 22], # Side 2 (exodus: 2) 22 -> 25
[2, 3, 7, 6, 10, 15, 18, 14, 23], # Side 3 (exodus: 3) 23 -> 27
[3, 0, 4, 7, 11, 12, 19, 15, 24], # Side 4 (exodus: 4) 24 -> 24
[4, 5, 6, 7, 16, 17, 18, 19, 25] # Side 5 (exodus: 6) 25 -> 23
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
print 'not implemented'
return None
# x, y, z = lcoord
# xy, xz, yz = x*y, x*z, y*z
# xx, yy, zz = x*x, y*y, z*z
# xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
# yyz, xzz, yzz = yy*z, x*zz, y*zz
# xxyz, xyyz, xyzz = xxy*z, xyy*z, xyz*z
# self.f[0] = (x+y+z-xyz+xx+yy+zz-xxy-xyy-xxz-xzz-yyz-yzz+ \
# xxyz+xyyz+xyzz-2.0)/8.0
# self.f[1] = (-x+y+z+xyz+xx+yy+zz-xxy+xyy-xxz+xzz-yyz-yzz+ \
# xxyz-xyyz-xyzz-2.0)/8.0
# self.f[2] = (-x-y+z-xyz+xx+yy+zz+xxy+xyy-xxz+xzz-yyz+yzz- \
# xxyz-xyyz+xyzz-2.0)/8.0
# self.f[3] = (x-y+z+xyz+xx+yy+zz+xxy-xyy-xxz-xzz-yyz+yzz- \
# xxyz+xyyz-xyzz-2.0)/8.0
# self.f[4] = (x+y-z+xyz+xx+yy+zz-xxy-xyy+xxz-xzz+yyz-yzz- \
# xxyz-xyyz+xyzz-2.0)/8.0
# self.f[5] = (-x+y-z-xyz+xx+yy+zz-xxy+xyy+xxz+xzz+yyz-yzz- \
# xxyz+xyyz-xyzz-2.0)/8.0
# self.f[6] = (-x-y-z+xyz+xx+yy+zz+xxy+xyy+xxz+xzz+yyz+yzz+ \
# xxyz+xyyz+xyzz-2.0)/8.0
# self.f[7] = (x-y-z-xyz+xx+yy+zz+xxy-xyy+xxz-xzz+yyz+yzz+ \
# xxyz-xyyz-xyzz-2.0)/8.0
# self.f[8] = (1.0-z-y+yz-xx+xxz+xxy-xxyz)/4.0
# self.f[9] = (1.0-z+x-xz-yy+yyz-xyy+xyyz)/4.0
# self.f[10] = (1.0-z+y-yz-xx+xxz-xxy+xxyz)/4.0
# self.f[11] = (1.0-z-x+xz-yy+yyz+xyy-xyyz)/4.0
# self.f[12] = (1.0+z-y-yz-xx-xxz+xxy+xxyz)/4.0
# self.f[13] = (1.0+z+x+xz-yy-yyz-xyy-xyyz)/4.0
# self.f[14] = (1.0+z+y+yz-xx-xxz-xxy-xxyz)/4.0
# self.f[15] = (1.0+z-x-xz-yy-yyz+xyy+xyyz)/4.0
# self.f[16] = (1.0-y-x+xy-zz+yzz+xzz-xyzz)/4.0
# self.f[17] = (1.0-y+x-xy-zz+yzz-xzz+xyzz)/4.0
# self.f[18] = (1.0+y+x+xy-zz-yzz-xzz-xyzz)/4.0
# self.f[19] = (1.0+y-x-xy-zz-yzz+xzz+xyzz)/4.0
# return self.f
def calcShapeDeriv(self, lcoord):
print 'not implemented'
return None
# x, y, z = lcoord
# xy, xz, yz = x*y, x*z, y*z
# xx, yy, zz = x*x, y*y, z*z
# xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
# yyz, xzz, yzz = yy*z, x*zz, y*zz
# self.df[0, 0] = 1.0-yz+2.0*x-2.0*xy-yy-2.0*xz-zz+2.0*xyz+yyz+yzz
# self.df[1, 0] = 1.0-xz+2.0*y-xx-2.0*xy-2.0*yz-zz+xxz+2.0*xyz+xzz
# self.df[2, 0] = 1.0-xy+2.0*z-xx-2.0*xz-yy-2.0*yz+xxy+xyy+2.0*xyz
# self.df[0, 1] = -1.0+yz+2.0*x-2.0*xy+yy-2.0*xz+zz+2.0*xyz-yyz-yzz
# self.df[1, 1] = 1.0+xz+2.0*y-xx+2.0*xy-2.0*yz-zz+xxz-2.0*xyz-xzz
# self.df[2, 1] = 1.0+xy+2.0*z-xx+2.0*xz-yy-2.0*yz+xxy-xyy-2.0*xyz
# self.df[0, 2] = -1.0-yz+2.0*x+2.0*xy+yy-2.0*xz+zz-2.0*xyz-yyz+yzz
# self.df[1, 2] = -1.0-xz+2.0*y+xx+2.0*xy-2.0*yz+zz-xxz-2.0*xyz+xzz
# self.df[2, 2] = 1.0-xy+2.0*z-xx+2.0*xz-yy+2.0*yz-xxy-xyy+2.0*xyz
# self.df[0, 3] = 1.0+yz+2.0*x+2.0*xy-yy-2.0*xz-zz-2.0*xyz+yyz-yzz
# self.df[1, 3] = -1.0+xz+2.0*y+xx-2.0*xy-2.0*yz+zz-xxz+2.0*xyz-xzz
# self.df[2, 3] = 1.0+xy+2.0*z-xx-2.0*xz-yy+2.0*yz-xxy+xyy-2.0*xyz
# self.df[0, 4] = 1.0+yz+2.0*x-2.0*xy-yy+2.0*xz-zz-2.0*xyz-yyz+yzz
# self.df[1, 4] = 1.0+xz+2.0*y-xx-2.0*xy+2.0*yz-zz-xxz-2.0*xyz+xzz
# self.df[2, 4] = -1.0+xy+2.0*z+xx-2.0*xz+yy-2.0*yz-xxy-xyy+2.0*xyz
# self.df[0, 5] = -1.0-yz+2.0*x-2.0*xy+yy+2.0*xz+zz-2.0*xyz+yyz-yzz
# self.df[1, 5] = 1.0-xz+2.0*y-xx+2.0*xy+2.0*yz-zz-xxz+2.0*xyz-xzz
# self.df[2, 5] = -1.0-xy+2.0*z+xx+2.0*xz+yy-2.0*yz-xxy+xyy-2.0*xyz
# self.df[0, 6] = -1.0+yz+2.0*x+2.0*xy+yy+2.0*xz+zz+2.0*xyz+yyz+yzz
# self.df[1, 6] = -1.0+xz+2.0*y+xx+2.0*xy+2.0*yz+zz+xxz+2.0*xyz+xzz
# self.df[2, 6] = -1.0+xy+2.0*z+xx+2.0*xz+yy+2.0*yz+xxy+xyy+2.0*xyz
# self.df[0, 7] = 1.0-yz+2.0*x+2.0*xy-yy+2.0*xz-zz+2.0*xyz-yyz-yzz
# self.df[1, 7] = -1.0-xz+2.0*y+xx-2.0*xy+2.0*yz+zz+xxz-2.0*xyz-xzz
# self.df[2, 7] = -1.0-xy+2.0*z+xx-2.0*xz+yy+2.0*yz+xxy-xyy-2.0*xyz
# self.df[:, 0:8] = self.df[:, 0:8]/2.0
# self.df[0, 8] = -2.0*x+2.0*xz+2.0*xy-2.0*xyz
# self.df[1, 8] = -1.0+z+xx-xxz
# self.df[2, 8] = -1.0+y+xx-xxy
# self.df[0, 9] = 1.0-z-yy+yyz
# self.df[1, 9] = -2.0*y+2.0*yz-2.0*xy+2.0*xyz
# self.df[2, 9] = -1.0-x+yy+xyy
# self.df[0, 10] = -2.0*x+2.0*xz-2.0*xy+2.0*xyz
# self.df[1, 10] = 1.0-z-xx+xxz
# self.df[2, 10] = -1.0-y+xx+xxy
# self.df[0, 11] = -1.0+z+yy-yyz
# self.df[1, 11] = -2.0*y+2.0*yz+2.0*xy-2.0*xyz
# self.df[2, 11] = -1.0+x+yy-xyy
# self.df[0, 12] = -2*x-2*xz+2*xy+2*xyz
# self.df[1, 12] = -1.0-z+xx+xxz
# self.df[2, 12] = 1.0-y-xx+xxy
# self.df[0, 13] = 1.0+z-yy-yyz
# self.df[1, 13] = -2*y-2*yz-2*xy-2*xyz
# self.df[2, 13] = 1.0+x-yy-xyy
# self.df[0, 14] = -2*x-2*xz-2*xy-2*xyz
# self.df[1, 14] = 1.0+z-xx-xxz
# self.df[2, 14] = 1.0+y-xx-xxy
# self.df[0, 15] = -1.0-z+yy+yyz
# self.df[1, 15] = -2*y-2*yz+2*xy+2*xyz
# self.df[2, 15] = 1.0-x-yy+xyy
# self.df[0, 16] = -1.0+y+zz-yzz
# self.df[1, 16] = -1.0+x+zz-xzz
# self.df[2, 16] = -2*z+2*yz+2*xz-2*xyz
# self.df[0, 17] = 1.0-y-zz+yzz
# self.df[1, 17] = -1.0-x+zz+xzz
# self.df[2, 17] = -2*z+2*yz-2*xz+2*xyz
# self.df[0, 18] = 1.0+y-zz-yzz
# self.df[1, 18] = 1.0+x-zz-xzz
# self.df[2, 18] = -2*z-2*yz-2*xz-2*xyz
# self.df[0, 19] = -1.0-y+zz+yzz
# self.df[1, 19] = 1.0-x-zz+xzz
# self.df[2, 19] = -2*z-2*yz+2*xz+2*xyz
# self.df = self.df/4.0
# return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2,6,5]
elif x < -1: return [0,3,7,4]
elif y > 1: return [2,3,7,6]
elif y < -1: return [0,1,5,4]
elif z > 1: return [4,5,6,7]
elif z < -1: return [0,1,2,3]
else: return None
# all shape functions are registered here
shapeFunctions = {
'Line2': ShapeFunction_Line2,
'Tri3': ShapeFunction_Tri3,
'Tri6': ShapeFunction_Tri6,
'Quad4': ShapeFunction_Quad4,
'Quad8': ShapeFunction_Quad8,
'Quad9': ShapeFunction_Quad9,
'Hex8' : ShapeFunction_Hex8,
'Hex20': ShapeFunction_Hex20,
'Hex27': ShapeFunction_Hex27
}
if __name__ == '__main__':
sh6 = ShapeFunction_Tri6()
sh3 = ShapeFunction_Tri3()
def shape(zeta):
zeta1, zeta2 = zeta
zeta0 = 1. - zeta1 - zeta2
return [2.*zeta0*(zeta0-0.5),
2.*zeta1*(zeta1-0.5),
2.*zeta2*(zeta2-0.5),
4.*zeta1*zeta2,
4.*zeta2*zeta0,
4.*zeta0*zeta1,
]
print shape([0.,0.])
print sh6([-1.,-1.])
print sh3([-1.,-1.])
print '----------------------'
print shape([1.,0.])
print sh6([1.,-1.])
print sh3([1.,-1.])
print '----------------------'
print shape([0.,1.])
print sh6([-1.,1.])
print sh3([-1.,1.])
print '----------------------'
print shape([0.5,0.5])
print sh6([0.,0.])
print sh3([0.,0.])
print '----------------------'
print shape([0.,0.5])
print sh6([-1.,0.])
print sh3([-1.,0.])
print '----------------------'
print shape([0.5,0.])
print sh6([0.,-1.])
print sh3([0.,-1.])
print '----------------------'
print shape([0.3,0.4])
print sh6([-0.4,-0.2])
print sh3([-0.4,-0.2])
# for n, sf in shapeFunctions.items():
# print '===== %s =====' % n
# s = sf()
# s.calcGauss()
# print s.gaussShapeInv
|
gpl-2.0
| -1,197,586,550,653,715,200
| 31.675862
| 93
| 0.400828
| false
| 2.429121
| false
| false
| false
|
gibil5/openhealth
|
models/management/mgt_patient_line.py
|
1
|
2816
|
# -*- coding: utf-8 -*-
"""
Management Patient Line
Should contain class methods
Created: 20 Jun 2019
Last up: 27 oct 2020
"""
from __future__ import print_function
from __future__ import absolute_import
from openerp import models, fields, api
from .management_db import ManagementDb
#from openerp.addons.openhealth.models.patient import pat_vars
from ..patient import pat_vars
class MgtPatientLine(models.Model):
"""
Patient line
"""
_name = 'openhealth.management.patient.line'
_order = 'amount_total desc'
# ----------------------------------------------------- Const ------------------
_MODEL = "openhealth.management.patient.line"
# ----------------------------------------------------- Class methods ----------
# Create
@classmethod
#def create_oh(cls, patient_id, management_id, env):
def create_oh(cls, patient, management_id, env):
#print('Class method - create')
#print(cls)
#print(patient_id, management_id)
# create
patient_line = env.create({
'patient': patient.id,
'management_id': management_id,
})
#cls.sex = patient.sex
#cls.age = patient.age
return patient_line
# Count
@classmethod
def count_oh(cls, patient_id, management_id, env):
#print('Class method - count')
#print(cls)
#print(patient_id, management_id)
# count
count = env.search_count([
('patient', '=', patient_id),
('management_id', '=', management_id),
],
#order='x_serial_nr asc',
#limit=1,
)
return count
# ----------------------------------------------------------- Handles ----------
# Management
management_id = fields.Many2one(
'openhealth.management',
ondelete='cascade',
)
# Patient
patient = fields.Many2one(
'oeh.medical.patient',
#string='Paciente',
)
# -------------------------------------------------------------- Vars ----------
amount_total = fields.Float()
count_total = fields.Integer()
age = fields.Char(
#string="Edad",
)
sex = fields.Selection(
selection=pat_vars.get_sex_type_list(),
#string="Sexo",
#required=False,
)
# ----------------------------------------------------------- Update -------------------------
# Update
@api.multi
def update(self):
"""
Update
"""
print()
print('** MgtPatientLine - Update')
# Update vars
self.sex = self.patient.sex
self.age = self.patient.age
# Calc Amount total - All sales ever
self.amount_total = 0
self.count_total = 0
# Get Orders
#orders, count = mgt_db.get_orders_filter_by_patient_fast(self, self.patient.id)
orders, count = ManagementDb.get_orders_filter_by_patient(self, self.patient.id)
for order in orders:
self.amount_total = self.amount_total + order.x_amount_flow
for line in order.order_line:
self.count_total = self.count_total + line.product_uom_qty
|
agpl-3.0
| 2,078,314,972,504,652,000
| 22.663866
| 94
| 0.57848
| false
| 3.309048
| false
| false
| false
|
Fokko/incubator-airflow
|
airflow/contrib/example_dags/example_winrm_operator.py
|
1
|
2457
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for using the WinRMOperator.
"""
from datetime import timedelta
import airflow
from airflow.contrib.hooks.winrm_hook import WinRMHook
from airflow.contrib.operators.winrm_operator import WinRMOperator
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
with DAG(
dag_id='POC_winrm_parallel',
default_args=default_args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60)
) as dag:
cmd = 'ls -l'
run_this_last = DummyOperator(task_id='run_this_last')
winRMHook = WinRMHook(ssh_conn_id='ssh_POC1')
t1 = WinRMOperator(
task_id="wintask1",
command='ls -altr',
winrm_hook=winRMHook
)
t2 = WinRMOperator(
task_id="wintask2",
command='sleep 60',
winrm_hook=winRMHook
)
t3 = WinRMOperator(
task_id="wintask3",
command='echo \'luke test\' ',
winrm_hook=winRMHook
)
[t1, t2, t3] >> run_this_last
|
apache-2.0
| -5,874,133,508,260,719,000
| 31.76
| 82
| 0.607245
| false
| 3.956522
| false
| false
| false
|
mikewesner-wf/glasshouse
|
glasshouse.indigoPlugin/Contents/Server Plugin/werkzeug/testsuite/wsgi.py
|
1
|
10308
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os import path
from cStringIO import StringIO
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield 'NOT FOUND'
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared')
})
for p in '/test.txt', '/sources/test.txt':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
self.assert_equal(''.join(app_iter).strip(), 'FOUND')
app_iter, status, headers = run_wsgi_app(app, create_environ('/pkg/debugger.js'))
contents = ''.join(app_iter)
assert '$(function() {' in contents
app_iter, status, headers = run_wsgi_app(app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(''.join(app_iter).strip(), 'NOT FOUND')
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.org')
assert wsgi.get_host(create_environ('/', 'http://example.org')) \
== 'example.org'
def test_responder(self):
def foo(environ, start_response):
return BaseResponse('Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
self.assert_equal(response.status_code, 200)
self.assert_equal(response.data, 'Test')
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
self.assert_equal(env.get('PATH_INFO'), path_info)
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
self.assert_equal(pop(), 'a')
assert_tuple('/foo/a', '/b///c')
self.assert_equal(pop(), 'b')
assert_tuple('/foo/a/b', '///c')
self.assert_equal(pop(), 'c')
assert_tuple('/foo/a/b///c', '')
assert pop() is None
def test_peek_path_info(self):
env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/aaa/b///c'}
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
self.assert_equal(stream.read(), '123')
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
self.assert_equal(stream.tell(), 0)
self.assert_equal(stream.read(1), '1')
self.assert_equal(stream.tell(), 1)
self.assert_equal(stream.read(1), '2')
self.assert_equal(stream.tell(), 2)
self.assert_equal(stream.read(1), '3')
self.assert_equal(stream.tell(), 3)
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readline(), '123456\n')
self.assert_equal(stream.readline(), 'ab')
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(), ['123456\n', 'ab'])
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(2), ['12'])
self.assert_equal(stream.readlines(2), ['34'])
self.assert_equal(stream.readlines(), ['56\n', 'ab'])
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readline(100), '123456\n')
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(100), ['123456\n', 'ab'])
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_equal(stream.read(1), '1')
self.assert_equal(stream.read(1), '2')
self.assert_equal(stream.read(), '3')
self.assert_equal(stream.read(), '')
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_equal(stream.read(-1), '123')
def test_limited_stream_disconnection(self):
io = StringIO('A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = StringIO('x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
self.assert_equal(x, u'/')
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
assert x is None
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
assert x is None
def test_get_host_fallback(self):
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}) == 'foobar.example.com'
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}) == 'foobar.example.com:81'
def test_get_current_url_unicode(self):
env = create_environ()
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
rv = wsgi.get_current_url(env)
self.assertEqual(rv, 'http://localhost/?foo=bar&baz=blah&meh=%CF')
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz'])
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in xrange(1, 10):
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
assert lines == ['abc\r', 'def\r\n', 'ghi']
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
def test_make_chunk_iter(self):
data = ['abcdefXghi', 'jklXmnopqrstuvwxyzX', 'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, ['abcdef', 'ghijkl', 'mnopqrstuvwxyz', 'ABCDEFGHIJK'])
data = 'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = StringIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data), buffer_size=4))
self.assert_equal(rv, ['abcdef', 'ghijkl', 'mnopqrstuvwxyz', 'ABCDEFGHIJK'])
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in xrange(1, 15):
lines = list(wsgi.make_line_iter(StringIO(data), limit=len(data), buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
|
apache-2.0
| 2,790,075,410,506,452,500
| 39.301961
| 109
| 0.587331
| false
| 3.448658
| true
| false
| false
|
romanarranz/PPR
|
P1/calculaGanancia.py
|
1
|
2508
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import re
# Si el numero de parametros del programa es menor que 3 o los parametros primero y segundo son el mismo archivo
if len(argv) < 3 or argv[1] == argv[2]:
print "Error la sintaxis es:"
print "\t$",argv[0]," output/floydS.dat"," output/floyd1D.dat"
print "\t$",argv[0]," output/floydS.dat"," output/floyd2D.dat"
else:
archivoFloydS = argv[1]
archivoFloydP = argv[2]
flujoArchivoS = open(archivoFloydS)
flujoArchivoP = open(archivoFloydP)
# creo un diccionario vacio
ganancia = {}
# <== Me quedo con los tiempos del archivo secuencial
# =========================================>
print "Flujo de %r:" % archivoFloydS
# Para cada linea
for linea in flujoArchivoS:
# me creo una lista usando como delimitador el caracter '\t'
arrayLinea = re.split(r'\t+', linea.rstrip('\t'))
# reemplazo en cada elemento de la lista el salto de linea por la cadena vacia
arrayLinea = ([elemento.replace('\n', '') for elemento in arrayLinea])
if arrayLinea:
print "\tarrayLinea: ", arrayLinea
clave = int(arrayLinea[0])
ganancia[clave] = float(arrayLinea[1])
else:
print "\tNo match"
flujoArchivoS.close()
print ""
# <== Me quedo con los tiempos del archivo paralelo
# =========================================>
print "Flujo de %r:" % archivoFloydP
# Para cada linea
for linea in flujoArchivoP:
# me creo una lista usando como delimitador el caracter '\t'
arrayLinea = re.split(r'\t+', linea.rstrip('\t'))
# reemplazo en cada elemento de la lista el salto de linea por la cadena vacia
arrayLinea = ([elemento.replace('\n', '') for elemento in arrayLinea])
if arrayLinea:
print "\tarrayLinea: ", arrayLinea
clave = int(arrayLinea[0])
# divido el tiempo secuencial entre el tiempo paralelo y lo guardo como valor del diccionario
ganancia[clave] = ganancia[clave]/float(arrayLinea[1])
else:
print "\tNo match"
flujoArchivoP.close()
print ""
# <== Imprimo el diccionario
# =========================================>
print "Diccionario ganancia"
for key, value in sorted(ganancia.iteritems()):
print "\t",key, value
# <== Guardo el diccionario en un fichero ganancia.dat
# =========================================>
archivoSalida = "ganancia"+archivoFloydP[-6:]
flujoSalida = open("output/"+archivoSalida, 'w')
for key, value in sorted(ganancia.iteritems()):
linea = str(key)+'\t'+str(value)+'\n'
s = str(linea)
flujoSalida.write(s)
flujoSalida.close()
|
mit
| -8,811,057,606,774,372,000
| 31.141026
| 112
| 0.647247
| false
| 2.610417
| false
| false
| false
|
pernici/sympy
|
sympy/polys/tests/test_euclidtools.py
|
1
|
20892
|
"""Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.polys.euclidtools import (
dup_gcdex, dup_half_gcdex, dup_invert,
dup_euclidean_prs, dmp_euclidean_prs,
dup_primitive_prs, dmp_primitive_prs,
dup_subresultants, dmp_subresultants,
dup_prs_resultant, dmp_prs_resultant,
dmp_zz_collins_resultant,
dmp_qq_collins_resultant,
dup_resultant, dmp_resultant,
dup_discriminant, dmp_discriminant,
dup_zz_heu_gcd, dmp_zz_heu_gcd,
dup_qq_heu_gcd, dmp_qq_heu_gcd,
dup_rr_prs_gcd, dmp_rr_prs_gcd,
dup_ff_prs_gcd, dmp_ff_prs_gcd,
dup_inner_gcd, dmp_inner_gcd,
dup_lcm, dmp_lcm,
dmp_content, dmp_primitive,
dup_cancel, dmp_cancel)
from sympy.polys.densebasic import (
dmp_one_p,
dup_LC, dmp_LC,
dup_normal, dmp_normal)
from sympy.polys.densearith import (
dup_add,
dup_mul, dmp_mul,
dup_exquo)
from sympy.polys.densetools import (
dup_diff)
from sympy.polys.specialpolys import (
f_4, f_5, f_6,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3)
from sympy.polys.domains import ZZ, QQ
def test_dup_gcdex():
f = dup_normal([1,-2,-6,12,15], QQ)
g = dup_normal([1,1,-4,-4], QQ)
s = [QQ(-1,5),QQ(3,5)]
t = [QQ(1,5),QQ(-6,5),QQ(2)]
h = [QQ(1),QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
f = dup_normal([1,4,0,-1,1], QQ)
g = dup_normal([1,0,-1,1], QQ)
s, t, h = dup_gcdex(f, g, QQ)
S, T, H = dup_gcdex(g, f, QQ)
assert dup_add(dup_mul(s, f, QQ),
dup_mul(t, g, QQ), QQ) == h
assert dup_add(dup_mul(S, g, QQ),
dup_mul(T, f, QQ), QQ) == H
f = dup_normal([2,0], QQ)
g = dup_normal([1,0,-16], QQ)
s = [QQ(1,32),QQ(0)]
t = [QQ(-1,16)]
h = [QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
def test_dup_invert():
assert dup_invert([QQ(2),QQ(0)], [QQ(1),QQ(0),QQ(-16)], QQ) == [QQ(1,32),QQ(0)]
def test_dup_euclidean_prs():
f = QQ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = QQ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_euclidean_prs(f, g, QQ) == [f, g,
[-QQ(5,9), QQ(0,1), QQ(1,9), QQ(0,1), -QQ(1,3)],
[-QQ(117,25), -QQ(9,1), QQ(441,25)],
[QQ(233150,19773), -QQ(102500,6591)],
[-QQ(1288744821,543589225)]]
def test_dup_primitive_prs():
f = ZZ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = ZZ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_primitive_prs(f, g, ZZ) == [f, g,
[-ZZ(5), ZZ(0), ZZ(1), ZZ(0), -ZZ(3)],
[ZZ(13), ZZ(25), -ZZ(49)],
[ZZ(4663), -ZZ(6150)],
[ZZ(1)]]
def test_dup_subresultants():
assert dup_resultant([], [], ZZ) == ZZ(0)
assert dup_resultant([ZZ(1)], [], ZZ) == ZZ(0)
assert dup_resultant([], [ZZ(1)], ZZ) == ZZ(0)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], ZZ)
g = dup_normal([3,0,5,0,-4,-9,21], ZZ)
a = dup_normal([15,0,-3,0,9], ZZ)
b = dup_normal([65,125,-245], ZZ)
c = dup_normal([9326,-12300], ZZ)
d = dup_normal([260708], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a, b, c, d]
assert dup_resultant(f, g, ZZ) == dup_LC(d, ZZ)
f = dup_normal([1,-2,1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([2,-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0, 1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 4
f = dup_normal([1,0,-1], ZZ)
g = dup_normal([1,-1,0,2], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([3,0,-1,0], ZZ)
g = dup_normal([5,0,1], ZZ)
assert dup_resultant(f, g, ZZ) == 64
f = dup_normal([1,-2,7], ZZ)
g = dup_normal([1,0,-1,5], ZZ)
assert dup_resultant(f, g, ZZ) == 265
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-15,74,-120], ZZ)
assert dup_resultant(f, g, ZZ) == -8640
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-10,29,-20], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0,0,-1], ZZ)
g = dup_normal([1,2,2,-1], ZZ)
assert dup_resultant(f, g, ZZ) == 16
f = dup_normal([1,0,0,0,0,0,0,0,-2], ZZ)
g = dup_normal([1,-1], ZZ)
assert dup_resultant(f, g, ZZ) == -1
def test_dmp_subresultants():
assert dmp_resultant([[]], [[]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[ZZ(1)]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
f = dmp_normal([[3,0],[],[-1,0,0,-4]], 1, ZZ)
g = dmp_normal([[1],[1,0,0,0],[-9]], 1, ZZ)
a = dmp_normal([[3,0,0,0,0],[1,0,-27,4]], 1, ZZ)
b = dmp_normal([[-3,0,0,-12,1,0,-54,8,729,-216,16]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a, b]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = dmp_normal([[-1],[],[],[5]], 1, ZZ)
g = dmp_normal([[3,1],[],[]], 1, ZZ)
a = dmp_normal([[45,30,5]], 1, ZZ)
b = dmp_normal([[675,675,225,25]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = [[[[[6]]]], [[[[-3]]], [[[-2]], [[]]]], [[[[1]], [[]]], [[[]]]]]
g = [[[[[1]]]], [[[[-1], [-1, 0]]]], [[[[1, 0], []]]]]
r = [[[[1]], [[-3], [-3, 0]], [[9, 0], []]], [[[-2], [-2, 0]], [[6],
[12, 0], [6, 0, 0]], [[-18, 0], [-18, 0, 0], []]], [[[4, 0],
[]], [[-12, 0], [-12, 0, 0], []], [[36, 0, 0], [], []]]]
assert dmp_zz_collins_resultant(f, g, 4, ZZ) == r
f = [[[[[QQ(1,1)]]]], [[[[QQ(-1,2)]]], [[[QQ(-1,3)]], [[]]]], [[[[QQ(1,6)]], [[]]], [[[]]]]]
g = [[[[[QQ(1,1)]]]], [[[[QQ(-1,1)], [QQ(-1,1), QQ(0, 1)]]]], [[[[QQ(1,1), QQ(0,1)], []]]]]
r = [[[[QQ(1,36)]], [[QQ(-1,12)], [QQ(-1,12), QQ(0,1)]], [[QQ(1,4), QQ(0,1)], []]],
[[[QQ(-1,18)], [QQ(-1,18), QQ(0,1)]], [[QQ(1,6)], [QQ(1,3), QQ(0,1)], [QQ(1,6),
QQ(0,1), QQ(0,1)]], [[QQ(-1,2), QQ(0,1)], [QQ(-1,2), QQ(0,1), QQ(0,1)], []]],
[[[QQ(1,9), QQ(0,1)], []], [[QQ(-1,3), QQ(0,1)], [QQ(-1,3), QQ(0,1), QQ(0,1)], []],
[[QQ(1,1), QQ(0,1), QQ(0,1)], [], []]]]
assert dmp_qq_collins_resultant(f, g, 4, QQ) == r
def test_dup_discriminant():
assert dup_discriminant([], ZZ) == 0
assert dup_discriminant([1,0], ZZ) == 1
assert dup_discriminant([1,3,9,-13], ZZ) == -11664
assert dup_discriminant([5,0,1,0,0,2], ZZ) == 31252160
assert dup_discriminant([1,2,6,-22,13], ZZ) == 0
assert dup_discriminant([12,0,0,15,30,1,0,1], ZZ) == -220289699947514112
def test_dmp_discriminant():
assert dmp_discriminant([], 0, ZZ) == 0
assert dmp_discriminant([[]], 1, ZZ) == []
assert dmp_discriminant([[1,0]], 1, ZZ) == []
assert dmp_discriminant([1,3,9,-13], 0, ZZ) == -11664
assert dmp_discriminant([5,0,1,0,0,2], 0, ZZ) == 31252160
assert dmp_discriminant([1,2,6,-22,13], 0, ZZ) == 0
assert dmp_discriminant([12,0,0,15,30,1,0,1], 0, ZZ) == -220289699947514112
assert dmp_discriminant([[1,0],[],[2,0]], 1, ZZ) == [-8,0,0]
assert dmp_discriminant([[1,0,2],[]], 1, ZZ) == [1]
assert dmp_discriminant([[[1],[]],[[1,0]]], 2, ZZ) == [[1]]
assert dmp_discriminant([[[[1]],[[]]],[[[1],[]]],[[[1,0]]]], 3, ZZ) == \
[[[-4, 0]], [[1], [], []]]
assert dmp_discriminant([[[[[1]]],[[[]]]],[[[[1]],[[]]]],[[[[1],[]]]],[[[[1,0]]]]], 4, ZZ) == \
[[[[-27,0,0]]],[[[18,0],[]],[[-4],[],[],[]]],[[[-4,0]],[[1],[],[]],[[]],[[]]]]
def test_dup_gcd():
assert dup_zz_heu_gcd([], [], ZZ) == ([], [], [])
assert dup_rr_prs_gcd([], [], ZZ) == ([], [], [])
assert dup_zz_heu_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_rr_prs_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_zz_heu_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_rr_prs_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_zz_heu_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_rr_prs_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_zz_heu_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_rr_prs_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_zz_heu_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_rr_prs_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_zz_heu_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_rr_prs_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_zz_heu_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_rr_prs_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_zz_heu_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_rr_prs_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_zz_heu_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_rr_prs_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_zz_heu_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_rr_prs_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_zz_heu_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_rr_prs_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_zz_heu_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_rr_prs_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_zz_heu_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_rr_prs_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_zz_heu_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_rr_prs_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_zz_heu_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
assert dup_rr_prs_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
f, g = [1, -31], [1, 0]
assert dup_zz_heu_gcd(f, g, ZZ) == ([1], f, g)
assert dup_rr_prs_gcd(f, g, ZZ) == ([1], f, g)
f = [1,8,21,22,8]
g = [1,6,11,6]
h = [1,3,2]
cff = [1,5,4]
cfg = [1,3]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,0,0,-4]
g = [1,0,4,0, 4]
h = [1,0,2]
cff = [1,0,-2]
cfg = [1,0, 2]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,1,0,-3,-3,8,2,-5]
g = [3,0,5,-0,-4,-9,21]
h = [1]
cff = f
cfg = g
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], QQ)
g = dup_normal([3,0,5,-0,-4,-9,21], QQ)
h = dup_normal([1], QQ)
assert dup_qq_heu_gcd(f, g, QQ) == (h, cff, cfg)
assert dup_ff_prs_gcd(f, g, QQ) == (h, cff, cfg)
f = [-352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272,
0, 0, 0, 0, 0, 0,
46818041807522713962450042363465092040687472354933295397472942006618953623327997952,
0, 0, 0, 0, 0, 0,
378182690892293941192071663536490788434899030680411695933646320291525827756032,
0, 0, 0, 0, 0, 0,
112806468807371824947796775491032386836656074179286744191026149539708928,
0, 0, 0, 0, 0, 0,
-12278371209708240950316872681744825481125965781519138077173235712,
0, 0, 0, 0, 0, 0,
289127344604779611146960547954288113529690984687482920704,
0, 0, 0, 0, 0, 0,
19007977035740498977629742919480623972236450681,
0, 0, 0, 0, 0, 0,
311973482284542371301330321821976049]
g = [365431878023781158602430064717380211405897160759702125019136,
0, 0, 0, 0, 0, 0,
197599133478719444145775798221171663643171734081650688,
0, 0, 0, 0, 0, 0,
-9504116979659010018253915765478924103928886144,
0, 0, 0, 0, 0, 0,
-311973482284542371301330321821976049]
f = dup_normal(f, ZZ)
g = dup_normal(g, ZZ)
assert dup_zz_heu_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
assert dup_rr_prs_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
f = [QQ(1,2),QQ(1),QQ(1,2)]
g = [QQ(1,2),QQ(1,2)]
h = [QQ(1), QQ(1)]
assert dup_qq_heu_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
assert dup_ff_prs_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
def test_dmp_gcd():
assert dmp_zz_heu_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_rr_prs_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_zz_heu_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_zz_heu_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_rr_prs_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_zz_heu_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_rr_prs_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_zz_heu_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_rr_prs_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_zz_heu_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_zz_heu_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_rr_prs_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_zz_heu_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_rr_prs_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_rr_prs_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_zz_heu_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_rr_prs_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_rr_prs_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_zz_heu_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_rr_prs_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_zz_heu_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
assert dmp_rr_prs_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
f, g = [[[[1,2,1],[1,1],[]]]], [[[[1,2,1]]]]
h, cff, cfg = [[[[1,1]]]], [[[[1,1],[1],[]]]], [[[[1,1]]]]
assert dmp_zz_heu_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_rr_prs_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_zz_heu_gcd(g, f, 3, ZZ) == (h, cfg, cff)
assert dmp_rr_prs_gcd(g, f, 3, ZZ) == (h, cfg, cff)
f, g, h = dmp_fateman_poly_F_1(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(4, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(6, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 6, ZZ)
assert H == h and dmp_mul(H, cff, 6, ZZ) == f \
and dmp_mul(H, cfg, 6, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(8, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 8, ZZ)
assert H == h and dmp_mul(H, cff, 8, ZZ) == f \
and dmp_mul(H, cfg, 8, ZZ) == g
f, g, h = dmp_fateman_poly_F_2(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(4, ZZ)
H, cff, cfg = dmp_inner_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f = [[QQ(1,2)],[QQ(1)],[QQ(1,2)]]
g = [[QQ(1,2)],[QQ(1,2)]]
h = [[QQ(1)],[QQ(1)]]
assert dmp_qq_heu_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
assert dmp_ff_prs_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
def test_dup_lcm():
assert dup_lcm([2], [6], ZZ) == [6]
assert dup_lcm([2,0,0,0], [6,0], ZZ) == [6,0,0,0]
assert dup_lcm([2,0,0,0], [3,0], ZZ) == [6,0,0,0]
assert dup_lcm([1,1,0], [1,0], ZZ) == [1,1,0]
assert dup_lcm([1,1,0], [2,0], ZZ) == [2,2,0]
assert dup_lcm([1,2,0], [1,0], ZZ) == [1,2,0]
assert dup_lcm([2,1,0], [1,0], ZZ) == [2,1,0]
assert dup_lcm([2,1,0], [2,0], ZZ) == [4,2,0]
def test_dmp_lcm():
assert dmp_lcm([[2]], [[6]], 1, ZZ) == [[6]]
assert dmp_lcm([[1],[]], [[1,0]], 1, ZZ) == [[1,0],[]]
assert dmp_lcm([[2],[],[],[]], [[6,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[2],[],[],[]], [[3,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[1,0],[],[]], [[1,0,0],[]], 1, ZZ) == [[1,0,0],[],[]]
f = [[2,-3,-2,3,0,0],[]]
g = [[1,0,-2,0,1,0]]
h = [[2,-3,-4,6,2,-3,0,0],[]]
assert dmp_lcm(f, g, 1, ZZ) == h
f = [[1],[-3,0],[-9,0,0],[-5,0,0,0]]
g = [[1],[6,0],[12,0,0],[10,0,0,0],[3,0,0,0,0]]
h = [[1],[1,0],[-18,0,0],[-50,0,0,0],[-47,0,0,0,0],[-15,0,0,0,0,0]]
assert dmp_lcm(f, g, 1, ZZ) == h
def test_dmp_content():
assert dmp_content([[-2]], 1, ZZ) == [2]
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_content(F, 1, ZZ) == f
assert dmp_one_p(dmp_content(f_4, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_5, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_6, 3, ZZ), 2, ZZ)
def test_dmp_primitive():
assert dmp_primitive([[]], 1, ZZ) == ([], [[]])
assert dmp_primitive([[1]], 1, ZZ) == ([1], [[1]])
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_primitive(F, 1, ZZ) == (f,
[ dup_exquo(c, f, ZZ) for c in F ])
cont, f = dmp_primitive(f_4, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_4
cont, f = dmp_primitive(f_5, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_5
cont, f = dmp_primitive(f_6, 3, ZZ)
assert dmp_one_p(cont, 2, ZZ) and f == f_6
def test_dup_cancel():
f = ZZ.map([2, 0, -2])
g = ZZ.map([1, -2, 1])
p = [ZZ(2), ZZ(2)]
q = [ZZ(1), -ZZ(1)]
assert dup_cancel(f, g, ZZ) == (p, q)
assert dup_cancel(f, g, ZZ, multout=False) == (ZZ(1), ZZ(1), p, q)
f = [-ZZ(1),-ZZ(2)]
g = [ ZZ(3),-ZZ(4)]
F = [ ZZ(1), ZZ(2)]
G = [-ZZ(3), ZZ(4)]
dup_cancel(f, g, ZZ) == (f, g)
dup_cancel(F, G, ZZ) == (f, g)
def test_dmp_cancel():
f = ZZ.map([[2], [0], [-2]])
g = ZZ.map([[1], [-2], [1]])
p = [[ZZ(2)], [ZZ(2)]]
q = [[ZZ(1)], [-ZZ(1)]]
assert dmp_cancel(f, g, 1, ZZ) == (p, q)
assert dmp_cancel(f, g, 1, ZZ, multout=False) == (ZZ(1), ZZ(1), p, q)
|
bsd-3-clause
| 8,572,424,864,415,290,000
| 32.970732
| 101
| 0.448736
| false
| 2.235873
| true
| false
| false
|
botswana-harvard/eit
|
eit/config/urls.py
|
1
|
3578
|
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.views.generic import RedirectView
from django.db.models import get_models
import django_databrowse
from edc.subject.rule_groups.classes import site_rule_groups
from edc.subject.lab_tracker.classes import site_lab_tracker
from edc.dashboard.section.classes import site_sections
from edc.subject.visit_schedule.classes import site_visit_schedules
from edc.lab.lab_profile.classes import site_lab_profiles
from edc.dashboard.subject.views import additional_requisition
from django.contrib import admin
admin.autodiscover()
site_lab_profiles.autodiscover()
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
from eit.apps.eit.eit_app_configuration.classes import EitAppConfiguration
EitAppConfiguration().prepare()
site_visit_schedules.autodiscover()
site_visit_schedules.build_all()
site_rule_groups.autodiscover()
site_lab_tracker.autodiscover()
# data_manager.prepare()
site_sections.autodiscover()
site_sections.update_section_lists()
APP_NAME = settings.APP_NAME
for model in get_models():
try:
django_databrowse.site.register(model)
except:
pass
urlpatterns = patterns(
'',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/logout/$', RedirectView.as_view(url='/{app_name}/logout/'.format(app_name=APP_NAME))),
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns(
'',
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
)
# this is for additional_requisitions
urlpatterns += patterns(
'',
url(r'^{app_name}/dashboard/visit/add_requisition/'.format(app_name=APP_NAME),
additional_requisition,
name="add_requisition"),
)
urlpatterns += patterns(
'',
url(r'^databrowse/(.*)', login_required(django_databrowse.site.root)),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/section/labclinic/'.format(app_name=APP_NAME),
include('edc.lab.lab_clinic_api.urls'), name="section_url_name"),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/dashboard/'.format(app_name=APP_NAME),
include('eit.apps.{app_name}_dashboard.urls'.format(app_name=APP_NAME))),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/login/'.format(app_name=APP_NAME),
'django.contrib.auth.views.login',
name='{app_name}_login'.format(app_name=APP_NAME)),
url(r'^{app_name}/logout/'.format(app_name=APP_NAME),
'django.contrib.auth.views.logout_then_login',
name='{app_name}_logout'.format(app_name=APP_NAME)),
url(r'^{app_name}/password_change/'.format(app_name=APP_NAME),
'django.contrib.auth.views.password_change',
name='password_change_url'.format(app_name=APP_NAME)),
url(r'^{app_name}/password_change_done/'.format(app_name=APP_NAME),
'django.contrib.auth.views.password_change_done',
name='password_change_done'.format(app_name=APP_NAME)),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/section/'.format(app_name=APP_NAME), include('edc.dashboard.section.urls'), name='section'),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/$'.format(app_name=APP_NAME),
RedirectView.as_view(url='/{app_name}/section/'.format(app_name=APP_NAME))),
url(r'', RedirectView.as_view(url='/{app_name}/section/'.format(app_name=APP_NAME))),
)
urlpatterns += staticfiles_urlpatterns()
|
gpl-3.0
| -2,307,491,408,388,197,000
| 32.754717
| 114
| 0.707658
| false
| 3.264599
| false
| false
| false
|
vaginessa/Pentesting-Scripts
|
InSpy.py
|
1
|
8805
|
#!/usr/bin/python
# InSpy - A LinkedIn employee enumerator
# This script enumerates employees from any organization
# using LinkedIn. Please note that this will not harvest all
# employees within a given organization.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Jonathan Broche
# Contact: @g0jhonny
# Version: 1.0
# Date: 2015-08-10
#
# usage: ./inspy.py -c <company> [-d dept/title] [-e email output format] [-i input file with dept/titles] [-o output file]
# example: ./inspy.py -c abc -e flast@abc.com -o abc_employees.txt
import requests, BeautifulSoup, argparse, signal, time, datetime, os
start_time = time.time()
class colors:
lightblue = "\033[1;36m"
blue = "\033[1;34m"
normal = "\033[0;00m"
red = "\033[1;31m"
yellow = "\033[1;33m"
white = "\033[1;37m"
green = "\033[1;32m"
#----------------------------------------#
# HARVEST USERS #
#----------------------------------------#
def inspy_enum(company, dept, ifile):
try:
dept_dictionary = ['sales', 'hr', 'marketing', 'finance', 'accounting', 'director', 'administrative', 'lawyer', 'it', 'security']
employees = {}
if dept is not None:
dept_dictionary = [dept.lower()]
if ifile is not None:
try:
if os.path.exists(ifile):
with open(ifile, 'r') as f:
dept_dictionary = []
for line in f.readlines():
if line.rstrip():
dept_dictionary.append(line.rstrip())
except IOError as e:
print "{}[!]{} Problem opening the file. {}".format(e)
for dd in dept_dictionary:
print "{}[*]{} Searching for employees working at {} with '{}' in their title".format(colors.lightblue, colors.normal, company, dd)
try:
response = requests.get('https://www.linkedin.com/title/{}-at-{}'.format(dd.replace('-', ' '), company.replace('-', ' ')), timeout=2)
if response.status_code == 200:
soup = BeautifulSoup.BeautifulSoup(response.text)
else:
raise Exception
except requests.exceptions.Timeout:
print "{}[!]{} Timeout enumerating the {} department".format(colors.red, colors.normal, dd)
except requests.exceptions.ConnectionError:
print "{}[!]{} Connection error.".format(colors.red, colors.normal)
except requests.exceptions.HTTPError:
print "{}[!]{} HTTP error.".format(colors.red, colors.normal)
#get employee names
for n, t in zip(soup.findAll('h3', { "class" : "name" }), soup.findAll('p', { "class" : "headline" })):
name = u''.join(n.getText()).encode('utf-8')
title = u''.join(t.getText()).encode('utf-8')
if not name in employees:
employees[name] = title
return employees
except Exception as e:
print "{}[!]{} Error harvesting users. {}".format(colors.red, colors.normal, e)
#----------------------------------------#
# EMAILS #
#----------------------------------------#
def format_email(names, eformat):
emails = []
for name in names:
spaces = []
for x,y in enumerate(name):
if ' ' in y:
spaces.append(x)
if eformat[:eformat.find('@')] == 'flast':
emails.append('{}{}{}'.format(name[0], name[(spaces[-1]+1):], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'lfirst':
emails.append('{}{}{}'.format(name[spaces[-1]+1], name[0:spaces[0]], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'first.last':
emails.append('{}.{}{}'.format(name[0:spaces[0]], name[(spaces[-1]+1):], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'last.first':
emails.append('{}.{}{}'.format(name[(spaces[-1]+1):], name[0:spaces[0]], eformat[eformat.find('@'):]))
return [e.lower() for e in emails]
#----------------------------------------#
# OUTPUT #
#----------------------------------------#
def output(employees, email, company, ofile):
counter = 0
ge, be = {}, {}
print '\n'
if email:
for k, e in zip(employees, email):
if company in employees[k].lower():
if ',' in k:
be[e] = '{}, {}'.format(k, employees[k])
else:
ge[e] = '{}, {}'.format(k, employees[k])
print "{}[*]{} {}, {}, {}".format(colors.green, colors.normal, k.replace('&', '&'), employees[k].replace('&', '&'), e)
counter +=1
else:
for k in employees:
if company in employees[k].lower():
ge[k] = employees[k]
print "{}[*]{} {} {}".format(colors.green, colors.normal, k.replace('&', '&'), employees[k].replace('&', '&'))
counter +=1
if be:
print "\n{}[!]{} The following employees have commas in their names. Their emails were not accurate.".format(colors.red, colors.normal)
for k in be:
print "{}[*]{} {}".format(colors.yellow, colors.normal, be[k])
if ofile:
with open(ofile, 'w') as f:
f.write("\n" + "-" * 69 + "\n" + "InSpy Output" + "\n" + "-" * 69 + "\n\n")
if [e for e in ge.keys() if '@' in e]: #if emails in keys
f.write("\n" + "E-mails" + "\n" + "-" * 25 + "\n\n")
for k in ge.keys():
f.write(k+'\n')
f.write("\n" + "All" + "\n" + "-" * 25 + "\n\n")
for k in ge:
f.write('{}, {}\n'.format(ge[k], k))
else:
for k in ge:
f.write('{}, {}\n'.format(k, ge[k]))
print "\n{}[*]{} Done! {}{}{} employees found.".format(colors.lightblue, colors.normal, colors.green, counter, colors.normal)
print "{}[*]{} Completed in {:.1f}s\n".format(colors.lightblue, colors.normal, time.time()-start_time)
#----------------------------------------#
# MAIN #
#----------------------------------------#
def main():
print "\n " + "-" * 74 + "\n " + colors.white + "InSpy v1.0 - LinkedIn Employee Enumerator by Jonathan Broche (@g0jhonny)\n " + colors.normal + "-" * 74 + "\n "
parser = argparse.ArgumentParser(description='InSpy - A LinkedIn employee enumerator by Jonathan Broche (@g0jhonny)')
parser.add_argument('-c', '--company', required=True, help='Company name')
parser.add_argument('-d', '--dept', nargs='?', const='', help='Department or title to query employees against. Inspy searches through a predefined list by default.')
parser.add_argument('-e', '--emailformat', help='Email output format. Acceptable formats: first.last@xyz.com, last.first@xyz.com, flast@xyz.com, lastf@xyz.com')
parser.add_argument('-i', '--inputfilename', nargs='?', const='', help='File with list of departments or titles to query employees against (one item per line)')
parser.add_argument('-o', '--outfilename', nargs='?', const='', help='Output results to text file')
args = parser.parse_args()
employees = inspy_enum(args.company, args.dept, args.inputfilename)
if args.emailformat:
if args.emailformat.find('@') and args.emailformat[:args.emailformat.find('@')] in {'flast', 'lfirst', 'first.last', 'last.first'}:
e = format_email(employees.keys(), args.emailformat)
output(employees, e,args.company.lower(), args.outfilename)
else:
print "{}[!]{} Please provide a valid email address format (i.e., flast@xyz.com, lfirst@xyz.com, first.last@xyz.com, last.first@xyz.com)".format(colors.red, colors.normal)
else:
if employees is not None:
output(employees,'',args.company.lower(), args.outfilename)
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,991,955,847,205,722,000
| 44.864583
| 183
| 0.527655
| false
| 3.738854
| false
| false
| false
|
r3dact3d/tweeter
|
tweepy/chucknorrisTwitBot.py
|
1
|
1485
|
#!/usr/bin/env python
# written by r3dact3d (brady)
import requests
import tweepy
from random import choice
from config import *
'''
Chucknorris.io is free and will always be! However, as maintaining this service costs $$$,
we are glad to be sponsored by Jugendstil.io.
'''
# Available Catagories - I did this way so specific catagories could be removed if you want... but chuck would not approve.
chuckagories = ["explicit", "dev", "movie", "food", "celebrity", "science", "sport", "political", "religion", "animal", "history", "music", "travel", "career", "money", "fashion"]
chuckagory = choice(chuckagories)
url = 'https://api.chucknorris.io/jokes/random'
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
def chuck(url, params):
myParams = {
'query' : params,
}
page = requests.get(url, params=myParams)
if page.status_code == 200:
output = page.json()
chuckSays = output['value']
payLoad = '#chucknorris "%s"' % (chuckSays[:125])
tweet(payLoad)
else:
print('Something went wrong with the API, someone is in big trouble if Chuck finds out!')
exit()
chuck(url, chuckagory)
|
unlicense
| 7,604,012,989,098,353,000
| 29.9375
| 179
| 0.657912
| false
| 3.132911
| false
| false
| false
|
Dikovinka/HRCS
|
API/permissions.py
|
1
|
2558
|
from rest_framework import permissions
from API.models import *
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class OnlyPMorQALeadCanEdit(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
leads = ProjectTeam.objects.filter(project=project, team_role__in=['PM', 'QALEAD'])
team = ProjectTeam.objects.filter(project=project)
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS and request.user in [member.user for member in team]:
return True
# Write permissions are only allowed to the qa lead or PM
if request.user in [member.user for member in leads]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staff
class IsProjectTeamOnly(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
team = ProjectTeam.objects.filter(project=project)
# Write permissions are only allowed to the project team
if request.user in [member.user for member in team]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staf
|
apache-2.0
| -8,638,617,535,118,737,000
| 37.772727
| 107
| 0.654808
| false
| 4.543517
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.