repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
insertion/opinion-mining
|
Spider/scrapy/getComment/getComment/spiders/getcomment.py
|
1
|
2917
|
# -*- coding: utf-8 -*-
import scrapy
import json
from getComment.items import GetcommentItem
import codecs
#需要加入cookie,不然有些页面没有权限
class GetcommentSpider(scrapy.Spider):
name = "getComment"
allowed_domains = ["douban.com"]
cookie={ '__utma':"30149280.901747088.1445074673.1463148044.1463205092.69",
'__utma':"223695111.47263706.1446025707.1463148044.1463205092.27",
'__utmb':"30149280.0.10.1463205092",
'__utmb':"223695111.0.10.1463205092",
'__utmc':"30149280",
'__utmc':"223695111",
'__utmv':"30149280.13938",
'__utmz':"30149280.1463051064.63.51.utmcsr=baidu|utmccn=(organic)|utmcmd=organic",
'__utmz':"223695111.1463035423.19.13.utmcsr=baidu|utmccn=(organic)|utmcmd=organic",
'_pk_id.100001.4cf6':"54f6d2f316960e51.1446025708.27.1463205379.1463148922.",
'_pk_ref.100001.4cf6':'["","",1463204969,"http://www.baidu.com/link?url=YQLEs5QV1zmk47dXRps0dqtoMVwYwRFUN5-N9639eoU21p9BFeaxhNRstgUq9Vvs&wd=&eqid=f68d50f40003ae9a000000035734261a"]',
'_pk_ses.100001.4cf6':"*",
'ap':"1",
'bid':'"8P5Iz4n5Ws8"',
'ck':"8vtY",
'ct':"y",
'dbcl2':'"59034306:TCI0yjpqBT4"',
'gr_user_id':"8121958b-b647-4f44-bc4a-6ce28baf2d5d",
'll':'"118163"',
'ps':"y",
'push_doumail_num':"38",
'push_noty_num':"6",
'ue':'"398758695@qq.com"',
'viewed':'"1756954_1052241_1831698_25952655_1231361_7906768_24703171_3288908_2305237_6510682"',
}
#cookie是个字典
start_urls = []
def __init__(self):
file=open('film_URL.json')
for line in file.readlines():
js=json.loads(line)
url=js['url'][0]+'comments'
self.start_urls.append(url)
file.close()
def parse(self, response):
filmname=response.xpath('//*[@id="content"]/h1/text()').extract()[0]+'.json'
#extract()返回的是一个列表,里面第一个元素是unicode字符串
file=codecs.open(filmname,'ab',encoding='utf-8')
next=response.xpath('//*[@id="paginator"]/a[@class="next"]/@href').extract()
item=GetcommentItem()
item['comment']=response.xpath('//*[@id="comments"]/div[@class="comment-item"]/div[2]/p/text()').extract()
item['title'] =response.xpath('//*[@id="comments"]/div[@class="comment-item"]/div[2]/h3/span[2]/span[1]/@title').extract()
commentlines = json.dumps(dict(item),ensure_ascii=False) + "\n"
file.write(commentlines)
if next:
next_url=response.url.split('?')[0]+next[0]
if int(next[0].split('=')[1].split('&')[0]) < 10000:
#只取前10000条评论
return scrapy.Request(next_url,self.parse,cookies=self.cookie)
|
mit
| 7,418,473,381,262,514,000
| 46.116667
| 195
| 0.576937
| false
| 2.728764
| false
| false
| false
|
OrbitzWorldwide/droned
|
droned/lib/droned/responders/events.py
|
1
|
2722
|
###############################################################################
# Copyright 2006 to the present, Orbitz Worldwide, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from droned.models.event import Event
from droned.responders import responder, dispatch
@responder(pattern="^subscribe (?P<event>\S+)", form="subscribe <event>|all", help="Subscribe to event notifications")
def subscribe(conversation, event):
subscriptions = set(conversation.context.get('subscriptions', set()))
if event == 'all':
events = Event.objects
else:
events = [ Event(event) ]
for event in events:
conversation.say("Subscribed to %s events" % event.name)
subscriptions.add(event.name)
event.subscribe(conversation.notify)
conversation.context['subscriptions'] = subscriptions
@responder(pattern="^unsubscribe (?P<event>\S+)", form="unsubscribe <event>|all", help="Unsubscribe from event notifications")
def unsubscribe(conversation, event):
subscriptions = set(conversation.context.get('subscriptions', []))
if event == 'all':
eventList = Event.objects
else:
eventList = [ Event(event) ]
for event in eventList:
conversation.say("Unsubscribed from %s events" % event.name)
event.unsubscribe(conversation.notify)
subscriptions.discard(event.name)
conversation.context['subscriptions'] = sorted(subscriptions)
@responder(pattern=".*<b>(?P<event>\S+)</b>.*occurred \((?P<string>.*)\).*")
def notification(conversation, event, string):
#hopefully you know how to parse this string
if Event.exists(event):
context = {
'conversation': conversation,
'message': string,
'event': event,
}
Event(event).fire(**context)
@responder(pattern="<b>Announcement from .*</b> ::: (?P<string>.*)")
def annoucement(conversation, string):
return dispatch(conversation, string)
@responder(pattern="Sorry I don't know what you mean by that.*")
def circular_conversation(conversation, *args):
"""Blackhole these circular conversations"""
return
|
apache-2.0
| -4,877,406,572,939,446,000
| 39.029412
| 126
| 0.646951
| false
| 4.14939
| false
| false
| false
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part02-e07_file_extensions/test/test_file_extensions.py
|
1
|
1537
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.file_extensions"
file_extensions = load(module_name, "file_extensions")
main = load(module_name, "main")
class FileExtensions(unittest.TestCase):
@points('p02-07.1')
def test_first(self):
correct_d = {'txt': ['file1.txt', 'file2.txt'],
'pdf': ['mydocument.pdf'],
'gz': ['archive.tar.gz']}
no_extension, d = file_extensions("src/filenames.txt")
self.assertEqual(no_extension, ["test"],
msg="There should be exactly one filename without an extension!")
self.assertEqual(d, correct_d, msg="The dictionary of files with an extension is incorrect!")
@points('p02-07.1')
def test_calls(self):
with patch('builtins.open', side_effect=open) as o:
file_extensions("src/filenames.txt")
o.assert_called_once()
@points('p02-07.2')
def test_main(self):
with patch('src.file_extensions.file_extensions', side_effect=[([], {})]) as fe:
main()
self.assertEqual(fe.call_count, 1,
msg="You should call function 'file_extensions' from main!")
result = get_out().split('\n')
self.assertEqual(len(result), 1, msg="Expected one line of output!")
self.assertEqual(result[0], "0 files with no extension")
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -542,415,368,093,614,340
| 33.931818
| 101
| 0.595316
| false
| 3.785714
| true
| false
| false
|
brchiu/tensorflow
|
tensorflow/python/ops/metrics_impl.py
|
1
|
162465
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.
If running in a `DistributionStrategy` context, the variable will be
"replica local". This means:
* The returned object will be a container with separate variables
per replica of the model.
* When writing to the variable, e.g. using `assign_add` in a metric
update, the update will be applied to the variable local to the
replica.
* To get a metric's result value, we need to sum the variable values
across the replicas before computing the final answer. Furthermore,
the final answer should be computed once instead of in every
replica. Both of these are accomplished by running the computation
of the final result value inside
`distribution_strategy_context.get_replica_context().merge_call(fn)`.
Inside the `merge_call()`, ops are only added to the graph once
and access to a replica-local variable in a computation returns
the sum across all replicas.
Args:
shape: Shape of the created variable.
dtype: Type of the created variable.
validate_shape: (Optional) Whether shape validation is enabled for
the created variable.
name: (Optional) String name of the created variable.
Returns:
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a replica-local variable container.
"""
# Note that synchronization "ON_READ" implies trainable=False.
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM,
name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Squeeze or expand last dim if needed.
Squeezes last dim of `predictions` or `labels` if their rank differs by 1
(using confusion_matrix.remove_squeezable_dimensions).
Squeezes or expands last dim of `weights` if its rank differs by 1 from the
new rank of `predictions`.
If `weights` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight scalar or `Tensor` whose dimensions match
`predictions`.
Returns:
Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
the last dimension squeezed, `weights` could be extended by one dimension.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: weights,
_maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' %
(labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions),
array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)
def _safe_div(numerator, denominator, name):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator, name=name)
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero, name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return _safe_div(numerator, denominator, name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = metric_variable(
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args):
"""Aggregate metric value across replicas."""
def fn(distribution, *a):
"""Call `metric_value_fn` in the correct control flow context."""
if hasattr(distribution.extended, '_outer_control_flow_context'):
# If there was an outer context captured before this method was called,
# then we enter that context to create the metric value op. If the
# caputred context is `None`, ops.control_dependencies(None) gives the
# desired behavior. Else we use `Enter` and `Exit` to enter and exit the
# captured context.
# This special handling is needed because sometimes the metric is created
# inside a while_loop (and perhaps a TPU rewrite context). But we don't
# want the value op to be evaluated every step or on the TPU. So we
# create it outside so that it can be evaluated at the end on the host,
# once the update ops have been evaluted.
# pylint: disable=protected-access
if distribution.extended._outer_control_flow_context is None:
with ops.control_dependencies(None):
metric_value = metric_value_fn(distribution, *a)
else:
distribution.extended._outer_control_flow_context.Enter()
metric_value = metric_value_fn(distribution, *a)
distribution.extended._outer_control_flow_context.Exit()
# pylint: enable=protected-access
else:
metric_value = metric_value_fn(distribution, *a)
if metrics_collections:
ops.add_to_collections(metrics_collections, metric_value)
return metric_value
return distribution_strategy_context.get_replica_context().merge_call(
fn, args=args)
@tf_export('metrics.mean')
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable([], dtypes.float32, name='total')
count = metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def compute_mean(_, t, c):
return _safe_div(t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.accuracy')
def accuracy(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.accuracy is not supported when eager '
'execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections, updates_collections,
name or 'accuracy')
def _confusion_matrix_at_thresholds(labels,
predictions,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_p,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_n,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_n,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_p,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def _aggregate_variable(v, collections):
f = lambda distribution, value: distribution.read_var(value)
return _aggregate_across_replicas(collections, f, v)
@tf_export('metrics.auc')
def auc(labels,
predictions,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None,
summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that
applies the trapezoidal rule; 'careful_interpolation', a variant of it
differing only by a more correct interpolation scheme for PR-AUC -
interpolating (true/false) positives but not the ratio that is precision;
'minoring' that applies left summation for increasing intervals and right
summation for decreasing intervals; 'majoring' that does the opposite.
Note that 'careful_interpolation' is strictly preferred to 'trapezoidal'
(to be deprecated soon) as it applies the same method for ROC, and a
better one (see Davis & Goadrich 2006 for details) for the PR curve.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.auc is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'auc',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def interpolate_pr_auc(tp, fp, fn):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
Note here we derive & use a closed formula not present in the paper
- as follows:
Modeling all of TP (true positive weight),
FP (false positive weight) and their sum P = TP + FP (positive weight)
as varying linearly within each interval [A, B] between successive
thresholds, we get
Precision = (TP_A + slope * (P - P_A)) / P
with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A).
The area within the interval is thus (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Args:
tp: true positive counts
fp: false positive counts
fn: false negative counts
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = tp[:num_thresholds - 1] - tp[1:]
p = tp + fp
prec_slope = _safe_div(
dtp,
math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0),
name='prec_slope')
intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0),
_safe_div(p[:num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
_safe_div(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(tp[1:] + fn[1:], 0),
name='pr_auc_increment'),
name='interpolate_pr_auc')
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
if curve == 'PR':
if summation_method == 'trapezoidal':
logging.warning(
'Trapezoidal rule is known to produce incorrect PR-AUCs; '
'please switch to "careful_interpolation" instead.')
elif summation_method == 'careful_interpolation':
# This one is a bit tricky and is handled separately.
return interpolate_pr_auc(tp, fp, fn)
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method in ('trapezoidal', 'careful_interpolation'):
# Note that the case ('PR', 'careful_interpolation') has been handled
# above.
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
def compute_auc_value(_, values):
return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'],
'value')
auc_value = _aggregate_across_replicas(
metrics_collections, compute_auc_value, values)
update_op = compute_auc(update_ops['tp'], update_ops['fn'],
update_ops['tn'], update_ops['fp'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
@tf_export('metrics.mean_absolute_error')
def mean_absolute_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_absolute_error is not supported '
'when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
@tf_export('metrics.mean_cosine_distance')
def mean_cosine_distance(labels,
predictions,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = mean(radial_diffs, weights, None, None, name or
'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@tf_export('metrics.mean_per_class_accuracy')
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates the accuracy of each class and returns
them.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since two variables with shape =
[num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that updates the accuracy tensor.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported '
'when eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
labels = math_ops.to_int64(labels)
# Flatten the input if its rank > 1.
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total = metric_variable([num_classes], dtypes.float32, name='total')
count = metric_variable([num_classes], dtypes.float32, name='count')
ones = array_ops.ones([array_ops.size(labels)], dtypes.float32)
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
if weights is not None:
if weights.get_shape().ndims > 1:
weights = array_ops.reshape(weights, [-1])
weights = math_ops.to_float(weights)
is_correct *= weights
ones *= weights
update_total_op = state_ops.scatter_add(total, labels, ones)
update_count_op = state_ops.scatter_add(count, labels, is_correct)
def compute_mean_accuracy(_, count, total):
per_class_accuracy = _safe_div(
count, math_ops.maximum(total, 0), name=None)
mean_accuracy_v = math_ops.reduce_mean(
per_class_accuracy, name='mean_accuracy')
return mean_accuracy_v
mean_accuracy_v = _aggregate_across_replicas(
metrics_collections, compute_mean_accuracy, count, total)
update_op = _safe_div(update_count_op,
math_ops.maximum(update_total_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
@tf_export('metrics.mean_iou')
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_iou is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_iou',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(_, total_cm):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)
return result
# TODO(priyag): Use outside_compilation if in TPU context.
mean_iou_v = _aggregate_across_replicas(
metrics_collections, compute_mean_iou, total_cm)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
@tf_export('metrics.mean_relative_error')
def mean_relative_error(labels,
predictions,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_relative_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
@tf_export('metrics.mean_squared_error')
def mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_squared_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections, updates_collections,
name or 'mean_squared_error')
@tf_export('metrics.mean_tensor')
def mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_tensor is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable(
values.get_shape(), dtypes.float32, name='total_tensor')
count = metric_variable(
values.get_shape(), dtypes.float32, name='count_tensor')
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
compute_mean = lambda _, t, c: _safe_div(
t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.percentage_below')
def percentage_below(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.percentage_below is not supported when '
'eager execution is enabled.')
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold, weights, metrics_collections,
updates_collections, name or 'percentage_below_threshold')
def _count_condition(values,
weights=None,
metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(
weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
@tf_export('metrics.false_negatives')
def false_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_negatives_at_thresholds')
def false_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
fn_value = _aggregate_variable(values['fn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return fn_value, update_ops['fn']
@tf_export('metrics.false_positives')
def false_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_positives_at_thresholds')
def false_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
fp_value = _aggregate_variable(values['fp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return fp_value, update_ops['fp']
@tf_export('metrics.true_negatives')
def true_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_negative = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, False))
return _count_condition(is_true_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_negatives_at_thresholds')
def true_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
tn_value = _aggregate_variable(values['tn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return tn_value, update_ops['tn']
@tf_export('metrics.true_positives')
def true_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_positives_at_thresholds')
def true_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
tp_value = _aggregate_variable(values['tp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return tp_value, update_ops['tp']
@tf_export('metrics.precision')
def precision(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name)
def once_across_replicas(_, true_p, false_p):
return compute_precision(true_p, false_p, 'value')
p = _aggregate_across_replicas(metrics_collections, once_across_replicas,
true_p, false_p)
update_op = compute_precision(true_positives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
@tf_export('metrics.precision_at_thresholds')
def precision_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
def precision_across_replicas(_, values):
return compute_precision(values['tp'], values['fp'], 'value')
prec = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, values)
update_op = compute_precision(update_ops['tp'], update_ops['fp'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
@tf_export('metrics.recall')
def recall(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_n, false_negatives_update_op = false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n), 0, name)
def once_across_replicas(_, true_p, false_n):
return compute_recall(true_p, false_n, 'value')
rec = _aggregate_across_replicas(
metrics_collections, once_across_replicas, true_p, false_n)
update_op = compute_recall(true_positives_update_op,
false_negatives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values,
selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(ids_shape,
array_ops.reshape(
ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(filled_selected_id_shape,
math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fn = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
@tf_export('metrics.recall_at_k')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.recall_at_top_k')
def recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def compute_recall(_, tp, fn):
return math_ops.div(tp, math_ops.add(tp, fn), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, compute_recall, tp, fn)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.recall_at_thresholds')
def recall_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
def recall_across_replicas(_, values):
return compute_recall(values['tp'], values['fn'], 'value')
rec = _aggregate_across_replicas(
metrics_collections, recall_across_replicas, values)
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
@tf_export('metrics.root_mean_squared_error')
def root_mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.root_mean_squared_error is not '
'supported when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,
None, name or
'root_mean_squared_error')
once_across_replicas = lambda _, mse: math_ops.sqrt(mse)
rmse = _aggregate_across_replicas(
metrics_collections, once_across_replicas, mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
@tf_export('metrics.sensitivity_at_specificity')
def sensitivity_at_specificity(labels,
predictions,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sensitivity_at_specificity is not '
'supported when eager execution is enabled.')
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
name)
def sensitivity_across_replicas(_, values):
return compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
sensitivity = _aggregate_across_replicas(
metrics_collections, sensitivity_across_replicas, values)
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(name, 'expand_and_tile',
(tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(None, 'average_precision',
(predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k),
math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k,
math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = metric_variable([], dtypes.float64, name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = metric_variable([], dtypes.float64, name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
def precision_across_replicas(_, total_var, max_var):
return _safe_scalar_div(total_var, max_var, name='mean')
mean_average_precision = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, total_var, max_var)
update = _safe_scalar_div(total_update, max_update, name=scope)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
@tf_export('metrics.sparse_average_precision_at_k')
@deprecated(None, 'Use average_precision_at_k instead')
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `average_precision_at_k`, please use that method instead."""
return average_precision_at_k(
labels=labels,
predictions=predictions,
k=k,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.average_precision_at_k')
def average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not '
'supported when eager execution is enabled.')
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fp = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
@tf_export('metrics.precision_at_top_k')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_top_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def precision_across_replicas(_, tp, fp):
return math_ops.div(tp, math_ops.add(tp, fp), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, tp, fp)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.sparse_precision_at_k')
@deprecated(None, 'Use precision_at_k instead')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `precision_at_k`, please use that method instead."""
return precision_at_k(
labels=labels,
predictions=predictions,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.precision_at_k')
def precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_precision_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.specificity_at_sensitivity')
def specificity_at_sensitivity(labels,
predictions,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.specificity_at_sensitivity is not '
'supported when eager execution is enabled.')
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon,
name)
def specificity_across_replicas(_, values):
return compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
specificity = _aggregate_across_replicas(
metrics_collections, specificity_across_replicas, values)
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
|
apache-2.0
| -4,886,929,974,480,028,000
| 43.016527
| 80
| 0.658819
| false
| 4.06325
| false
| false
| false
|
marzig76/blexplor
|
opcode.py
|
1
|
3840
|
"""A simple module housing Bitcoin Script OP Code values."""
class opcode(object):
"""Define Bitcoin Script OP Code values."""
opcodes = {}
# Constants
opcodes['OP_FALSE'] = 0x00
opcodes['OP_1NEGATE'] = 0x4f
opcodes['OP_TRUE'] = 0x51
opcodes['OP_2'] = 0x52
opcodes['OP_3'] = 0x53
opcodes['OP_4'] = 0x54
opcodes['OP_5'] = 0x55
opcodes['OP_6'] = 0x56
opcodes['OP_7'] = 0x57
opcodes['OP_8'] = 0x58
opcodes['OP_9'] = 0x59
opcodes['OP_10'] = 0x5a
opcodes['OP_11'] = 0x5b
opcodes['OP_12'] = 0x5c
opcodes['OP_13'] = 0x5d
opcodes['OP_14'] = 0x5e
opcodes['OP_15'] = 0x5f
opcodes['OP_16'] = 0x60
# Flow Control
opcodes['OP_NOP'] = 0x61
opcodes['OP_IF'] = 0x63
opcodes['OP_NOTIF'] = 0x64
opcodes['OP_ELSE'] = 0x67
opcodes['OP_ENDIF'] = 0x68
opcodes['OP_VERIFY'] = 0x69
opcodes['OP_RETURN'] = 0x6a
# Stack
opcodes['OP_TOALTSTACK'] = 0x6b
opcodes['OP_FROMALTSTACK'] = 0x6c
opcodes['OP_IFDUP'] = 0x73
opcodes['OP_DEPTH'] = 0x74
opcodes['OP_DROP'] = 0x75
opcodes['OP_DUP'] = 0x76
opcodes['OP_NIP'] = 0x77
opcodes['OP_OVER'] = 0x78
opcodes['OP_PICK'] = 0x79
opcodes['OP_ROLL'] = 0x7a
opcodes['OP_ROT'] = 0x7b
opcodes['OP_SWAP'] = 0x7c
opcodes['OP_TUCK'] = 0x7d
opcodes['OP_2DROP'] = 0x6d
opcodes['OP_2DUP'] = 0x6e
opcodes['OP_3DUP'] = 0x6f
opcodes['OP_2OVER'] = 0x70
opcodes['OP_2ROT'] = 0x71
opcodes['OP_2SWAP'] = 0x72
# Splice
opcodes['OP_CAT'] = 0x7e
opcodes['OP_SUBSTR'] = 0x7f
opcodes['OP_LEFT'] = 0x80
opcodes['OP_RIGHT'] = 0x81
opcodes['OP_SIZE'] = 0x82
# Bitwise logic
opcodes['OP_INVERT'] = 0x83
opcodes['OP_AND'] = 0x84
opcodes['OP_OR'] = 0x85
opcodes['OP_XOR'] = 0x86
opcodes['OP_EQUAL'] = 0x87
opcodes['OP_EQUALVERIFY'] = 0x88
# Arithmetic
opcodes['OP_1ADD'] = 0x8b
opcodes['OP_1SUB'] = 0x8c
opcodes['OP_2MUL'] = 0x8d
opcodes['OP_2DIV'] = 0x8e
opcodes['OP_NEGATE'] = 0x8f
opcodes['OP_ABS'] = 0x90
opcodes['OP_NOT'] = 0x91
opcodes['OP_0NOTEQUAL'] = 0x92
opcodes['OP_ADD'] = 0x93
opcodes['OP_SUB'] = 0x94
opcodes['OP_MUL'] = 0x95
opcodes['OP_DIV'] = 0x96
opcodes['OP_MOD'] = 0x97
opcodes['OP_LSHIFT'] = 0x98
opcodes['OP_RSHIFT'] = 0x99
opcodes['OP_BOOLAND'] = 0x9a
opcodes['OP_BOOLOR'] = 0x9b
opcodes['OP_NUMEQUAL'] = 0x9c
opcodes['OP_NUMEQUALVERIFY'] = 0x9d
opcodes['OP_NUMNOTEQUAL'] = 0x9e
opcodes['OP_LESSTHAN'] = 0x9f
opcodes['OP_GREATERTHAN'] = 0xa0
opcodes['OP_LESSTHANOREQUAL'] = 0xa1
opcodes['OP_GREATERTHANOREQUAL'] = 0xa2
opcodes['OP_MIN'] = 0xa3
opcodes['OP_MAX'] = 0xa4
opcodes['OP_WITHIN'] = 0xa5
# Crypto
opcodes['OP_RIPEMD160'] = 0xa6
opcodes['OP_SHA1'] = 0xa7
opcodes['OP_SHA256'] = 0xa8
opcodes['OP_HASH160'] = 0xa9
opcodes['OP_HASH256'] = 0xaa
opcodes['opcodeSEPARATOR'] = 0xab
opcodes['OP_CHECKSIG'] = 0xac
opcodes['OP_CHECKSIGVERIFY'] = 0xad
opcodes['OP_CHECKMULTISIG'] = 0xae
opcodes['OP_CHECKMULTISIGVERIFY'] = 0xaf
# Locktime
opcodes['OP_CHECKLOCKTIMEVERIFY'] = 0xb1
opcodes['OP_CHECKSEQUENCEVERIFY'] = 0xb2
# Pseudo-words
opcodes['OP_PUBKEYHASH'] = 0xfd
opcodes['OP_PUBKEY'] = 0xfe
opcodes['OP_INVALIDOPCODE'] = 0xff
# Reserved words
opcodes['OP_RESERVED'] = 0x50
opcodes['OP_VER'] = 0x62
opcodes['OP_VERIF'] = 0x65
opcodes['OP_VERNOTIF'] = 0x66
opcodes['OP_RESERVED1'] = 0x89
opcodes['OP_RESERVED2'] = 0x8a
opcodes['OP_NOP1'] = 0xb0
opcodes['OP_NOP4'] = 0xb3
opcodes['OP_NOP5'] = 0xb4
opcodes['OP_NOP6'] = 0xb5
opcodes['OP_NOP7'] = 0xb6
opcodes['OP_NOP8'] = 0xb7
opcodes['OP_NOP9'] = 0xb8
opcodes['OP_NOP10'] = 0xb9
|
gpl-3.0
| 5,800,116,986,678,497,000
| 26.826087
| 60
| 0.589323
| false
| 2.696629
| false
| false
| false
|
shadycuz/cloudatcost-ansible-module
|
cac_inv.py
|
1
|
6017
|
#!/usr/bin/env python
"""
CloudAtCost external inventory script. Automatically finds hosts and
returns them under the host group 'cloudatcost'
Some code borrowed from linode.py inventory script by Dan Slimmon
"""
import os.path
# import re
import sys
import argparse
# from time import time
from cacpy import CACPy
# import ConfigParser
try:
import json
except ImportError:
import simplejson as json
_group = 'cloudatcost' # a default group
_prepend = 'cloud_' # Prepend all CloudAtCost data, to avoid conflicts
class CloudAtCostInventory(object):
def __init__(self):
"""Main execution path."""
self.api_key = None
self.api_user = None
self.args = self.parse_cli_args()
self.inventory = {}
self.groups = []
# CloudAtCost API Object
self.setupAPI()
self.update_inventory()
# Data to print
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
# Generates output
groups = {}
for group in self.groups:
groups[group]= [server['label'] for server
in self.inventory if server['label'] and server['group_label'] == group]
meta = {
'_meta': {
'hostvars': dict((server['label'],
self.get_host_info(label=server['label']))
for server in self.inventory)
}
}
data_to_print = groups.copy()
data_to_print.update(meta)
else:
data_to_print = "Error: Invalid options"
print(json_format_dict(data_to_print, True))
def update_inventory(self):
"""Makes a CloudAtCost API call to get the list of servers."""
res = self.api.get_server_info()
if res['status'] == 'ok':
self.inventory = res['data']
for server in self.inventory:
server['isnew'] = False
if not server['label']:
server['label'] = server['servername']
server['group_label'] = 'New'
if 'New' not in self.groups:
self.groups.append('New')
server['isnew'] = True
else:
if ' ' in server['label']:
split = (server['label']).split()
server['label'] = split[1]
if split[0] not in self.groups:
self.groups.append(split[0])
server['group_label'] = split[0]
else:
if _group not in self.groups:
self.groups.append(_group)
server['group_label'] = _group
else:
print("Looks like CloudAtCost's API is down:")
print("")
print(res)
sys.exit(1)
def get_server(self, server_id=None, label=None):
"""Gets details about a specific server."""
for server in self.inventory:
if (server_id and server['id'] == server_id) or \
(label and server['label'] == label):
return server
return None
def get_host_info(self, label):
"""Get variables about a specific host."""
server = self.get_server(label=label)
if not server:
return json_format_dict({}, True)
retval = {}
for (key, value) in server.iteritems():
retval["{}{}".format(_prepend, key)] = value
# Set the SSH host information, so these inventory items can be used if
# their labels aren't FQDNs
retval['ansible_ssh_host'] = server["ip"]
retval['ansible_host'] = server["ip"]
if server['isnew'] or 'New' in server['group_label']:
retval['ansible_ssh_pass'] = server["rootpass"]
retval['ansible_pass'] = server["rootpass"]
return retval
def setupAPI(self):
# Setup the api_key
if not self.api_key:
try:
self.api_key = os.environ['CAC_API_KEY']
except KeyError, e:
print "Please provide API Key."
sys.exit(1)
# Setup the api_user
if not self.api_user:
try:
self.api_user = os.environ['CAC_API_USER']
except KeyError, e:
print "Please provide API User."
sys.exit(1)
# setup the auth
try:
self.api = CACPy(self.api_user, self.api_key)
self.api.get_resources()
except Exception, e:
print "Failed to contact CloudAtCost API."
print ""
print e
sys.exit(1)
@staticmethod
def parse_cli_args():
"""Command line argument processing"""
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on CloudAtCost')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true', default=True,
help='List servers (default: True)')
group.add_argument('--host', action='store',
help='Get all the variables about a specific server')
parser.add_argument('--refresh-cache', action='store_true',
default=False,
help='Force refresh of cache by making API requests to CloudAtCost (default: False - use cache files)')
return parser.parse_args()
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string.
:param data: string
"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CloudAtCostInventory()
|
mit
| -8,786,081,238,855,413,000
| 32.243094
| 131
| 0.525179
| false
| 4.41129
| false
| false
| false
|
joferkington/tutorials
|
1412_Tuning_and_AVO/tuning_wedge_v2.py
|
1
|
10772
|
"""
Python script to generate a zero-offset synthetic from a 3-layer wedge model.
Created by: Wes Hamlyn
Create Date: 19-Aug-2014
Last Mod: 5-Feb-2015
-addition of bandpass wavelet
This script is provided without warranty of any kind.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
###########################################################
#
# DEFINE MODELING PARAMETERS HERE
#
# 3-Layer Model Parameters [Layer1, Layer2, Layer 3]
vp_mod = [2500.0, 2600.0, 2550.0] # P-wave velocity (m/s)
vs_mod = [1200.0, 1300.0, 1200.0] # S-wave velocity (m/s)
rho_mod= [1.95, 2.0, 1.98] # Density (g/cc)
dz_min = 0.0 # Minimum thickness of Layer 2 (m)
dz_max = 60.0 # Maximum thickness of Layer 2 (m)
dz_step= 1.0 # Thickness step from trace-to-trace (normally 1.0 m)
# Wavelet Parameters
wvlt_type = 'bandpass' # Valid values: 'ricker' or 'bandpass'
wvlt_length= 0.128 # Wavelet length in seconds
wvlt_phase = 0.0 # Wavelet phase in degrees
wvlt_scalar = 1.0 # Multiplier to scale wavelet amplitude (default = 1.0)
wvlt_cfreq = 30.0 # Ricker wavelet central frequency
f1 = 5.0 # Bandpass wavelet low truncation frequency
f2 = 10.0 # Bandpass wavelet low cut frequency
f3 = 50.0 # Bandpass wavelet high cut frequency
f4 = 65.0 # Bandpass wavelet high truncation frequency
# Trace Parameters
tmin = 0.0
tmax = 0.5
dt = 0.0001 # changing this from 0.0001 can affect the display quality
# Plot Parameters
min_plot_time = 0.15
max_plot_time = 0.3
excursion = 2
###########################################################
#
# FUNCTIONS DEFINITIONS
#
def plot_vawig(axhdl, data, t, excursion, highlight=None):
import numpy as np
import matplotlib.pyplot as plt
[ntrc, nsamp] = data.shape
t = np.hstack([0, t, t.max()])
for i in range(0, ntrc):
tbuf = excursion * data[i] / np.max(np.abs(data)) + i
tbuf = np.hstack([i, tbuf, i])
if i==highlight:
lw = 2
else:
lw = 0.5
axhdl.plot(tbuf, t, color='black', linewidth=lw)
plt.fill_betweenx(t, tbuf, i, where=tbuf>i, facecolor=[0.6,0.6,1.0], linewidth=0)
plt.fill_betweenx(t, tbuf, i, where=tbuf<i, facecolor=[1.0,0.7,0.7], linewidth=0)
axhdl.set_xlim((-excursion, ntrc+excursion))
axhdl.xaxis.tick_top()
axhdl.xaxis.set_label_position('top')
axhdl.invert_yaxis()
def ricker(cfreq, phase, dt, wvlt_length):
'''
Calculate a ricker wavelet
Usage:
------
t, wvlt = wvlt_ricker(cfreq, phase, dt, wvlt_length)
cfreq: central frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
import numpy as np
import scipy.signal as signal
nsamp = int(wvlt_length/dt + 1)
t_max = wvlt_length*0.5
t_min = -t_max
t = np.arange(t_min, t_max, dt)
t = np.linspace(-wvlt_length/2, (wvlt_length-dt)/2, wvlt_length/dt)
wvlt = (1.0 - 2.0*(np.pi**2)*(cfreq**2)*(t**2)) * np.exp(-(np.pi**2)*(cfreq**2)*(t**2))
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def wvlt_bpass(f1, f2, f3, f4, phase, dt, wvlt_length):
'''
Calculate a trapezoidal bandpass wavelet
Usage:
------
t, wvlt = wvlt_ricker(f1, f2, f3, f4, phase, dt, wvlt_length)
f1: Low truncation frequency of wavelet in Hz
f2: Low cut frequency of wavelet in Hz
f3: High cut frequency of wavelet in Hz
f4: High truncation frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift
nsamp = int(wvlt_length/dt + 1)
freq = fftfreq(nsamp, dt)
freq = fftshift(freq)
aspec = freq*0.0
pspec = freq*0.0
# Calculate slope and y-int for low frequency ramp
M1 = 1/(f2-f1)
b1 = -M1*f1
# Calculate slop and y-int for high frequency ramp
M2 = -1/(f4-f3)
b2 = -M2*f4
# Build initial frequency and filter arrays
freq = fftfreq(nsamp, dt)
freq = fftshift(freq)
filt = np.zeros(nsamp)
# Build LF ramp
idx = np.nonzero((np.abs(freq)>=f1) & (np.abs(freq)<f2))
filt[idx] = M1*np.abs(freq)[idx]+b1
# Build central filter flat
idx = np.nonzero((np.abs(freq)>=f2) & (np.abs(freq)<=f3))
filt[idx] = 1.0
# Build HF ramp
idx = np.nonzero((np.abs(freq)>f3) & (np.abs(freq)<=f4))
filt[idx] = M2*np.abs(freq)[idx]+b2
# Unshift the frequencies and convert filter to fourier coefficients
filt2 = ifftshift(filt)
Af = filt2*np.exp(np.zeros(filt2.shape)*1j)
# Convert filter to time-domain wavelet
wvlt = fftshift(ifft(Af))
wvlt = np.real(wvlt)
wvlt = wvlt/np.max(np.abs(wvlt)) # normalize wavelet by peak amplitude
# Generate array of wavelet times
t = np.linspace(-wvlt_length*0.5, wvlt_length*0.5, nsamp)
# Apply phase rotation if desired
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def calc_rc(vp_mod, rho_mod):
'''
rc_int = calc_rc(vp_mod, rho_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
rc_int = []
for i in range(0, nint):
buf1 = vp_mod[i+1]*rho_mod[i+1]-vp_mod[i]*rho_mod[i]
buf2 = vp_mod[i+1]*rho_mod[i+1]+vp_mod[i]*rho_mod[i]
buf3 = buf1/buf2
rc_int.append(buf3)
return rc_int
def calc_times(z_int, vp_mod):
'''
t_int = calc_times(z_int, vp_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
t_int = []
for i in range(0, nint):
if i == 0:
tbuf = z_int[i]/vp_mod[i]
t_int.append(tbuf)
else:
zdiff = z_int[i]-z_int[i-1]
tbuf = 2*zdiff/vp_mod[i] + t_int[i-1]
t_int.append(tbuf)
return t_int
def digitize_model(rc_int, t_int, t):
'''
rc = digitize_model(rc, t_int, t)
rc = reflection coefficients corresponding to interface times
t_int = interface times
t = regularly sampled time series defining model sampling
'''
import numpy as np
nlayers = len(rc_int)
nint = nlayers - 1
nsamp = len(t)
rc = list(np.zeros(nsamp,dtype='float'))
lyr = 0
for i in range(0, nsamp):
if t[i] >= t_int[lyr]:
rc[i] = rc_int[lyr]
lyr = lyr + 1
if lyr > nint:
break
return rc
##########################################################
#
# COMPUTATIONS BELOW HERE...
#
# Some handy constants
nlayers = len(vp_mod)
nint = nlayers - 1
nmodel = int((dz_max-dz_min)/dz_step+1)
# Generate wavelet
if wvlt_type == 'ricker':
wvlt_t, wvlt_amp = ricker(wvlt_cfreq, wvlt_phase, dt, wvlt_length)
elif wvlt_type == 'bandpass':
wvlt_t, wvlt_amp = wvlt_bpass(f1, f2, f3, f4, wvlt_phase, dt, wvlt_length)
# Apply amplitude scale factor to wavelet (to match seismic amplitude values)
wvlt_amp = wvlt_scalar * wvlt_amp
# Calculate reflectivities from model parameters
rc_int = calc_rc(vp_mod, rho_mod)
syn_zo = []
rc_zo = []
lyr_times = []
for model in range(0, nmodel):
# Calculate interface depths
z_int = [500.0]
z_int.append(z_int[0]+dz_min+dz_step*model)
# Calculate interface times
t_int = calc_times(z_int, vp_mod)
lyr_times.append(t_int)
# Digitize 3-layer model
nsamp = int((tmax-tmin)/dt) + 1
t = []
for i in range(0,nsamp):
t.append(i*dt)
rc = digitize_model(rc_int, t_int, t)
rc_zo.append(rc)
# Convolve wavelet with reflectivities
syn_buf = np.convolve(rc, wvlt_amp, mode='same')
syn_buf = list(syn_buf)
syn_zo.append(syn_buf)
print "finished step %i" % (model)
syn_zo = np.array(syn_zo)
t = np.array(t)
lyr_times = np.array(lyr_times)
lyr_indx = np.array(np.round(lyr_times/dt), dtype='int16')
# Use the transpose because rows are traces;
# columns are time samples.
tuning_trace = np.argmax(np.abs(syn_zo.T)) % syn_zo.T.shape[1]
tuning_thickness = tuning_trace * dz_step
# Plotting Code
[ntrc, nsamp] = syn_zo.shape
fig = plt.figure(figsize=(12, 14))
fig.set_facecolor('white')
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 1])
ax0 = fig.add_subplot(gs[0])
ax0.plot(lyr_times[:,0], color='blue', lw=1.5)
ax0.plot(lyr_times[:,1], color='red', lw=1.5)
ax0.set_ylim((min_plot_time,max_plot_time))
ax0.invert_yaxis()
ax0.set_xlabel('Thickness (m)')
ax0.set_ylabel('Time (s)')
plt.text(2,
min_plot_time + (lyr_times[0,0] - min_plot_time)/2.,
'Layer 1',
fontsize=16)
plt.text(dz_max/dz_step - 2,
lyr_times[-1,0] + (lyr_times[-1,1] - lyr_times[-1,0])/2.,
'Layer 2',
fontsize=16,
horizontalalignment='right')
plt.text(2,
lyr_times[0,0] + (max_plot_time - lyr_times[0,0])/2.,
'Layer 3',
fontsize=16)
plt.gca().xaxis.tick_top()
plt.gca().xaxis.set_label_position('top')
ax0.set_xlim((-excursion, ntrc+excursion))
ax1 = fig.add_subplot(gs[1])
plot_vawig(ax1, syn_zo, t, excursion, highlight=tuning_trace)
ax1.plot(lyr_times[:,0], color='blue', lw=1.5)
ax1.plot(lyr_times[:,1], color='red', lw=1.5)
ax1.set_ylim((min_plot_time,max_plot_time))
ax1.invert_yaxis()
ax1.set_xlabel('Thickness (m)')
ax1.set_ylabel('Time (s)')
ax2 = fig.add_subplot(gs[2])
ax2.plot(syn_zo[:,lyr_indx[:,0]], color='blue')
ax2.set_xlim((-excursion, ntrc+excursion))
ax2.axvline(tuning_trace, color='k', lw=2)
ax2.grid()
ax2.set_title('Upper interface amplitude')
ax2.set_xlabel('Thickness (m)')
ax2.set_ylabel('Amplitude')
plt.text(tuning_trace + 2,
plt.ylim()[0] * 1.1,
'tuning thickness = {0} m'.format(str(tuning_thickness)),
fontsize=16)
plt.savefig('figure_1.png')
plt.show()
|
apache-2.0
| -3,679,153,622,030,970,000
| 24.79602
| 91
| 0.562755
| false
| 2.808133
| false
| false
| false
|
d-Rickyy-b/Python-BlackJackBot
|
blackjack/game/card.py
|
1
|
1732
|
# -*- coding: utf-8 -*-
from enum import Enum
class Card(object):
class Type(Enum):
NUMBER = "card_number"
JACK = "card_jack"
QUEEN = "card_queen"
KING = "card_king"
ACE = "card_ace"
symbols = ["♥", "♦", "♣", "♠"]
value_str = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King", "Ace"]
def __init__(self, card_id):
self.card_id = card_id
def is_ace(self):
return self.value == 11
@property
def symbol(self):
return self.symbols[self.card_id // 13]
@property
def value(self):
values = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
return values[self.card_id % 13]
@property
def face(self):
return self.value_str[self.card_id % 13]
@property
def type(self):
if (self.card_id % 13) in range(0, 9):
return Card.Type.NUMBER
elif (self.card_id % 13) == 9:
return Card.Type.JACK
elif (self.card_id % 13) == 10:
return Card.Type.QUEEN
elif (self.card_id % 13) == 11:
return Card.Type.KING
elif (self.card_id % 13) == 12:
return Card.Type.ACE
else:
raise ValueError("card_id '{}' can't be mapped to card type!".format(self.card_id))
@property
def str_id(self):
str_ids = ["card_2", "card_3", "card_4", "card_5", "card_6",
"card_7", "card_8", "card_9", "card_10",
"card_jack", "card_queen", "card_king", "card_ace"]
return str_ids[self.card_id % 13]
def __str__(self):
return "{} {}".format(self.symbol, self.face)
def __repr__(self):
return self.__str__()
|
gpl-3.0
| -4,021,522,245,757,826,600
| 26.806452
| 95
| 0.493039
| false
| 3.040564
| false
| false
| false
|
luboslenco/cyclesgame
|
blender/arm/handlers.py
|
1
|
5807
|
import os
import sys
import bpy
import importlib
from bpy.app.handlers import persistent
import arm.utils
import arm.props as props
import arm.make as make
import arm.make_state as state
import arm.api
@persistent
def on_depsgraph_update_post(self):
if state.proc_build != None:
return
# Recache
if hasattr(bpy.context, 'evaluated_depsgraph_get'):
depsgraph = bpy.context.evaluated_depsgraph_get()
else: # TODO: deprecated
depsgraph = bpy.context.depsgraph
for update in depsgraph.updates:
uid = update.id
if hasattr(uid, 'arm_cached'):
# uid.arm_cached = False # TODO: does not trigger update
if isinstance(uid, bpy.types.Mesh) and uid.name in bpy.data.meshes:
bpy.data.meshes[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Curve) and uid.name in bpy.data.curves:
bpy.data.curves[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.MetaBall) and uid.name in bpy.data.metaballs:
bpy.data.metaballs[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Armature) and uid.name in bpy.data.armatures:
bpy.data.armatures[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.NodeTree) and uid.name in bpy.data.node_groups:
bpy.data.node_groups[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Material) and uid.name in bpy.data.materials:
bpy.data.materials[uid.name].arm_cached = False
# Send last operator to Krom
wrd = bpy.data.worlds['Arm']
if state.proc_play != None and \
state.target == 'krom' and \
wrd.arm_live_patch:
ops = bpy.context.window_manager.operators
if len(ops) > 0 and ops[-1] != None:
send_operator(ops[-1])
def send_operator(op):
if hasattr(bpy.context, 'object') and bpy.context.object != None:
obj = bpy.context.object.name
if op.name == 'Move':
vec = bpy.context.object.location
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.loc.set(' + str(vec[0]) + ', ' + str(vec[1]) + ', ' + str(vec[2]) + '); o.transform.dirty = true;'
make.write_patch(js)
elif op.name == 'Resize':
vec = bpy.context.object.scale
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.scale.set(' + str(vec[0]) + ', ' + str(vec[1]) + ', ' + str(vec[2]) + '); o.transform.dirty = true;'
make.write_patch(js)
elif op.name == 'Rotate':
vec = bpy.context.object.rotation_euler.to_quaternion()
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.rot.set(' + str(vec[1]) + ', ' + str(vec[2]) + ', ' + str(vec[3]) + ' ,' + str(vec[0]) + '); o.transform.dirty = true;'
make.write_patch(js)
else: # Rebuild
make.patch()
def always():
# Force ui redraw
if state.redraw_ui and context_screen != None:
for area in context_screen.areas:
if area.type == 'VIEW_3D' or area.type == 'PROPERTIES':
area.tag_redraw()
state.redraw_ui = False
# TODO: depsgraph.updates only triggers material trees
space = arm.utils.logic_editor_space(context_screen)
if space != None:
space.node_tree.arm_cached = False
return 0.5
appended_py_paths = []
context_screen = None
@persistent
def on_load_post(context):
global appended_py_paths
global context_screen
context_screen = bpy.context.screen
props.init_properties_on_load()
reload_blend_data()
bpy.ops.arm.sync_proxy()
wrd = bpy.data.worlds['Arm']
wrd.arm_recompile = True
arm.api.drivers = dict()
# Load libraries
if os.path.exists(arm.utils.get_fp() + '/Libraries'):
libs = os.listdir(arm.utils.get_fp() + '/Libraries')
for lib in libs:
if os.path.isdir(arm.utils.get_fp() + '/Libraries/' + lib):
fp = arm.utils.get_fp() + '/Libraries/' + lib
if fp not in appended_py_paths and os.path.exists(fp + '/blender.py'):
appended_py_paths.append(fp)
sys.path.append(fp)
import blender
importlib.reload(blender)
blender.register()
sys.path.remove(fp)
# Show trait users as collections
arm.utils.update_trait_collections()
def reload_blend_data():
armory_pbr = bpy.data.node_groups.get('Armory PBR')
if armory_pbr == None:
load_library('Armory PBR')
def load_library(asset_name):
if bpy.data.filepath.endswith('arm_data.blend'): # Prevent load in library itself
return
sdk_path = arm.utils.get_sdk_path()
data_path = sdk_path + '/armory/blender/data/arm_data.blend'
data_names = [asset_name]
# Import
data_refs = data_names.copy()
with bpy.data.libraries.load(data_path, link=False) as (data_from, data_to):
data_to.node_groups = data_refs
for ref in data_refs:
ref.use_fake_user = True
def register():
bpy.app.handlers.load_post.append(on_load_post)
bpy.app.handlers.depsgraph_update_post.append(on_depsgraph_update_post)
# bpy.app.handlers.undo_post.append(on_undo_post)
bpy.app.timers.register(always, persistent=True)
# TODO: On windows, on_load_post is not called when opening .blend file from explorer
if arm.utils.get_os() == 'win' and arm.utils.get_fp() != '':
on_load_post(None)
reload_blend_data()
def unregister():
bpy.app.handlers.load_post.remove(on_load_post)
bpy.app.handlers.depsgraph_update_post.remove(on_depsgraph_update_post)
# bpy.app.handlers.undo_post.remove(on_undo_post)
|
lgpl-3.0
| -1,077,355,546,783,990,800
| 37.713333
| 200
| 0.610126
| false
| 3.370284
| false
| false
| false
|
cloudify-cosmo/softlayer-python
|
SoftLayer/CLI/loadbal/service_add.py
|
1
|
1663
|
"""Adds a new load balancer service."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import loadbal
import click
@click.command()
@click.argument('identifier')
@click.option('--enabled / --disabled',
required=True,
help="Create the service as enable or disabled")
@click.option('--port',
required=True,
help="The port number for the service",
type=click.INT)
@click.option('--weight',
required=True,
type=click.INT,
help="The weight of the service")
@click.option('--healthcheck-type',
required=True,
help="The health check type")
@click.option('--ip-address', '--ip',
required=True,
help="The IP of the service")
@environment.pass_env
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Adds a new load balancer service."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.add_service(loadbal_id,
group_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
return 'Load balancer service is being added!'
|
mit
| 6,626,000,125,855,541,000
| 30.980769
| 78
| 0.591702
| false
| 4.136816
| false
| false
| false
|
ppyordanov/HCI_4_Future_Cities
|
Server/src/virtualenv/Lib/encodings/koi8_r.py
|
1
|
13245
|
""" Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
|
mit
| 2,934,191,392,544,551,000
| 41.588424
| 109
| 0.595092
| false
| 2.853911
| false
| false
| false
|
LamCiuLoeng/fd
|
rpac/controllers/pdf.py
|
1
|
2302
|
# -*- coding: utf-8 -*-
import os
import json
# turbogears imports
from tg import expose, redirect, validate, flash, session, request, config
from tg.decorators import *
# third party imports
from repoze.what import authorize
from repoze.what.predicates import not_anonymous, in_group, has_permission
from sqlalchemy.sql.expression import and_
# project specific imports
from rpac.lib.base import BaseController
from rpac.model import *
from rpac.util.common import *
from rpac.util.layout_pdf import gen_pdf
__all__ = ['PdfController', 'PdfLayoutController']
class PdfController( BaseController ):
# Uncomment this line if your controller requires an authenticated user
# allow_only = authorize.in_group( 'Admin' )
allow_only = authorize.not_anonymous()
@expose()
def index(self, **kw):
header = None
# details = None
hid = kw.get('id', None)
if hid:
header = qry( OrderHeader ).filter( and_( OrderHeader.active == 0 , OrderHeader.id == hid ) ).first()
if header and header.dtls:
details = [(d.id, d.itemCode) for d in header.dtls]
# print details
pdf_zip_file = gen_pdf(header.no, details)
return serveFile(unicode(pdf_zip_file))
class PdfLayoutController( BaseController ):
# Uncomment this line if your controller requires an authenticated user
# allow_only = authorize.in_group( 'Admin' )
@expose('rpac.templates.pdf.index')
def index(self, **kw):
detail = None
data = None
detail_id = kw.get('id', None)
if detail_id:
detail = qry( OrderDetail ).filter( and_( OrderDetail.active == 0 , OrderDetail.id == detail_id ) ).first()
if detail:
item_code = detail.itemCode
template_dir = config.get('pdf_template_dir')
pdf_template = os.path.join(template_dir, '%s.mak' % item_code)
# print item_code, pdf_template
if os.path.exists(pdf_template):
# print os.path.exists(pdf_template)
data = detail.layoutValue
data = json.loads(data) if data else None
override_template(self.index, 'mako:rpac.templates.pdf.%s' % item_code)
# print data
return dict(data=data)
|
mit
| -5,420,445,439,386,341,000
| 31.422535
| 119
| 0.632493
| false
| 3.767594
| false
| false
| false
|
anyonedev/anyonedev-monitor-agent
|
monitor/metrics/log/Nginx.py
|
1
|
2320
|
'''
Created on 2014-11-18
@author: hongye
'''
import re
import time
from metrics.log.AgentParser import detect
class NginxAccessLogLineParser(object):
ipP = r"?P<ip>[\d.]*"
timeP = r"""?P<time>\[[^\[\]]*\]"""
requestP = r"""?P<request>\"[^\"]*\""""
statusP = r"?P<status>\d+"
bodyBytesSentP = r"?P<bodyByteSent>\d+"
referP = r"""?P<refer>\"[^\"]*\""""
userAgentP = r"""?P<userAgent>\"[^\"]*\""""
userOperatorSystems = re.compile(r'\([^\(\)]*\)')
userBrowers = re.compile(r'[^\)]*\"')
nginxLogPattern = re.compile(r"(%s)\ -\ -\ (%s)\ (%s)\ (%s)\ (%s)\ (%s)\ (%s)" % (ipP, timeP, requestP, statusP, bodyBytesSentP, referP, userAgentP), re.VERBOSE)
def parse(self, line):
matchs = self.nginxLogPattern.match(line)
if matchs != None:
values = dict()
groups = matchs.groups()
values["ip"] = groups[0]
values["request"] = groups[2]
values["status"] = groups[3]
values["body_bytes_sent"] = groups[4]
values["refer"] = groups[5]
userAgent = groups[6]
values["user_agent"] = userAgent
t = groups[1]
if t != None:
values["time"] = int(time.mktime(time.strptime(t, '[%d/%b/%Y:%H:%M:%S %z]')))
if len(userAgent) > 20:
agent = detect(userAgent)
os = agent.get("os")
if os != None:
values["os_name"] = os.get("name")
values["os_version"] = os.get("version")
if agent.get("bot") != None:
values["is_bot"] = agent.get("bot")
browser = agent.get("browser")
if browser != None:
values["browser_name"] =browser.get("name")
values["browser_version"] = browser.get("version")
platform = agent.get("platform")
if platform != None:
values["platform_name"] = platform.get("name")
values["platform_version"] = platform.get("version")
return values
return None
def nginx_access_log_parser():
return NginxAccessLogLineParser()
|
gpl-2.0
| 4,690,549,868,023,794,000
| 34.151515
| 166
| 0.471983
| false
| 3.717949
| false
| false
| false
|
jcollado/pic2map
|
tests/test_cli.py
|
1
|
6128
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import logging
import os
import tempfile
import unittest
from StringIO import StringIO
from mock import (
MagicMock as Mock,
patch,
)
from pic2map.cli import (
add,
count,
main,
parse_arguments,
remove,
serve,
valid_directory,
)
class MainTests(unittest.TestCase):
"""Main function test cases."""
def setUp(self):
"""Patch parse_arguments function."""
self.parse_arguments_patcher = patch('pic2map.cli.parse_arguments')
self.parse_arguments = self.parse_arguments_patcher.start()
self.logging_patcher = patch('pic2map.cli.logging')
self.logging_patcher.start()
def test_func_called(self):
"""Command function is called."""
argv = Mock()
function = Mock()
args = argparse.Namespace(
log_level=logging.WARNING,
func=function,
)
self.parse_arguments.return_value = args
main(argv)
function.assert_called_once_with(args)
def tearDown(self):
"""Undo the patching."""
self.parse_arguments_patcher.stop()
self.logging_patcher.stop()
class CommandFunctionTests(unittest.TestCase):
"""Command function test cases."""
def setUp(self):
"""Patch dependencies."""
self.tree_explorer_patcher = patch('pic2map.cli.TreeExplorer')
self.tree_explorer_cls = self.tree_explorer_patcher.start()
self.filter_gps_metadata_patcher = (
patch('pic2map.cli.filter_gps_metadata'))
self.filter_gps_metadata = self.filter_gps_metadata_patcher.start()
self.transform_metadata_to_row_patcher = (
patch('pic2map.cli.transform_metadata_to_row'))
self.transform_metadata_to_row = (
self.transform_metadata_to_row_patcher.start())
self.location_db_patcher = patch('pic2map.cli.LocationDB')
self.location_cls = self.location_db_patcher.start()
def tearDown(self):
"""Undo the patching."""
self.tree_explorer_patcher.stop()
self.filter_gps_metadata_patcher.stop()
self.transform_metadata_to_row_patcher.stop()
self.location_db_patcher.stop()
def test_add(self):
"""Add command function."""
tree_explorer = self.tree_explorer_cls()
paths = Mock()
tree_explorer.paths.return_value = paths
metadata_record = Mock()
metadata_records = [metadata_record]
self.filter_gps_metadata.return_value = metadata_records
row = Mock()
self.transform_metadata_to_row.return_value = row
database = self.location_cls().__enter__()
directory = 'some directory'
args = argparse.Namespace(directory=directory)
add(args)
self.tree_explorer_cls.assert_called_with(directory)
self.filter_gps_metadata.assert_called_once_with(paths)
self.transform_metadata_to_row.assert_called_once_with(metadata_record)
database.insert.assert_called_with([row])
def test_remove(self):
"""Remove command function."""
directory = 'some directory'
args = argparse.Namespace(directory=directory)
remove(args)
database = self.location_cls().__enter__()
database.delete.assert_called_once_with(directory)
def test_count(self):
"""Count command function."""
file_count = 10
database = self.location_cls().__enter__()
database.count.return_value = file_count
args = argparse.Namespace()
with patch('sys.stdout', new_callable=StringIO) as stdout:
count(args)
self.assertEqual(stdout.getvalue(), '{}\n'.format(file_count))
def test_serve(self):
"""Serve command function."""
args = argparse.Namespace()
with patch('pic2map.cli.app') as app:
serve(args)
app.run.assert_called_once_with(debug=True)
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_add_command(self):
"""Add command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['add', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, add)
def test_remove(self):
"""Remove command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['remove', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, remove)
def test_count(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_serve_command(self):
"""Serve command."""
args = parse_arguments(['serve'])
self.assertEqual(args.func, serve)
|
mit
| -5,578,770,176,831,160,000
| 29.79397
| 79
| 0.615862
| false
| 4.191518
| true
| false
| false
|
USF-COT/trdi_adcp_readers
|
trdi_adcp_readers/readers.py
|
1
|
28613
|
import numpy as np
import dask.array as darr
from dask import compute, delayed
from dask.bag import from_delayed, from_sequence
from pandas import Timedelta
from xarray import Variable, IndexVariable, DataArray, Dataset
from trdi_adcp_readers.pd0.pd0_parser_sentinelV import (ChecksumError,
ReadChecksumError,
ReadHeaderError)
from trdi_adcp_readers.pd15.pd0_converters import (
PD15_file_to_PD0,
PD15_string_to_PD0
)
from trdi_adcp_readers.pd0.pd0_parser import parse_pd0_bytearray
from trdi_adcp_readers.pd0.pd0_parser_sentinelV import parse_sentinelVpd0_bytearray
from IPython import embed
def read_PD15_file(path, header_lines=0, return_pd0=False):
pd0_bytes = PD15_file_to_PD0(path, header_lines)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def read_PD15_hex(hex_string, return_pd0=False):
pd15_byte_string = hex_string.decode("hex")
pd0_bytes = PD15_string_to_PD0(pd15_byte_string)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def read_PD15_string(string, return_pd0=False):
pd0_bytes = PD15_string_to_PD0(string)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def _alloc_timestamp(item):
if type(item)==dict:
return item['timestamp']
else:
return item # NaNs marking bad ensembles.
def _alloc_timestamp_parts(part): # Each partition is an array of dicts.
return np.array([ens['timestamp'] for ens in part if type(ens)==dict]) # Exclude NaNs marking bad ensembles.
@delayed
def _addtarr(t, dt):
return darr.array([tn + dt for tn in t])
def _alloc_hpr(ensblk, group, varname):
phisc = 0.01 # Scale heading, pitch and roll by 0.01. Sentinel V manual, p. 259.
return darr.array([ensarr[group][varname]*phisc for ensarr in ensblk
if type(ensarr)==dict])
def _alloc_beam5(ensblk, group):
return np.array([ensarr[group]['data'] for ensarr in ensblk
if type(ensarr)==dict])
# def _alloc_beam5vel(ensblk):
# arrs = darr.array([])
# for ensarr in ensblk:
# if type(ensarr)==dict:
# arr = darr.from_array(np.array(ensarr['velocity_beam5']['data']), chunks=1)
# arrs = darr.concatenate((arrs, arr), axis=1)
# else:
# continue
return arrs
# def alloc_2dvars(ensarr):
# vjanus = ensarr['velocity_janus']['data']
# b1[:, n] = vjanus[:, 0]
# b2[:, n] = vjanus[:, 1]
# b3[:, n] = vjanus[:, 2]
# b4[:, n] = vjanus[:, 3]
# # b5[:, n] = ensarr['velocity_beam5']['data'].squeeze()
# # corjanus = ensarr['correlation_janus']['data']
# # cor1[:, n] = corjanus[:, 0]
# # cor2[:, n] = corjanus[:, 1]
# # cor3[:, n] = corjanus[:, 2]
# # cor4[:, n] = corjanus[:, 3]
# # cor5[:, n] = ensarr['correlation_beam5']['data'].squeeze()
# # intjanus = ensarr['echo_intensity_janus']['data']
# # int1[:, n] = intjanus[:, 0]
# # int2[:, n] = intjanus[:, 1]
# # int3[:, n] = intjanus[:, 2]
# # int4[:, n] = intjanus[:, 3]
# # int5[:, n] = ensarr['echo_intensity_beam5']['data'].squeeze()
def _bag2DataArray(bg, chunks, **kwargs):
return DataArray(darr.from_array(np.array(wrk.compute()), chunks=chunks)
**kwargs)
def ensembles2dataset_dask(ensdict, ncfpath, dsattrs={}, chunks=10,
verbose=True, print_every=1000):
"""
Convert a dictionary of ensembles into an xarray Dataset object
using dask.delayed to keep memory usage feasible.
"""
mms2ms = 1e-3
n=0
# fbadens = np.array(ensdict_aux)==None
# nt = len(ensdict) - np.sum(fbadens)
# embed()
ensdict0 = None
while ensdict0 is None:
ensdict0 = ensdict[n].compute()
n+=1
nz = ensdict0['fixed_leader_janus']['number_of_cells']
fixj = ensdict0['fixed_leader_janus'].compute()
fix5 = ensdict0['fixed_leader_beam5'].compute()
# Add ping offset to get beam 5's timestamps.
dt5 = fix5['ping_offset_time'] # In milliseconds.
dt5 = np.array(Timedelta(dt5, unit='ms'))
th = fixj['beam_angle']
assert th==25 # Always 25 degrees.
th = th*np.pi/180.
Cth = np.cos(th)
# Construct along-beam/vertical axes.
cm2m = 1e-2
r1janus = fixj['bin_1_distance']*cm2m
r1b5 = fix5['bin_1_distance']*cm2m
ncj = fixj['number_of_cells']
nc5 = fix5['number_of_cells']
lcj = fixj['depth_cell_length']*cm2m
lc5 = fix5['depth_cell_length']*cm2m
Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).
rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
# (Janus).
zab = Cth*rb # Vertical distance from xducer head
# (Janus).
zab5 = r1b5 + np.arange(0, L5, lc5) # Distance from xducer head, also
# depth for the vertical beam.
rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
zab5 = IndexVariable('z5', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})
ensdict = from_sequence(ensdict)
tjanus = ensdict.map_partitions(_alloc_timestamp_parts)
t5 = _addtarr(tjanus, dt5)
if verbose: print("Unpacking timestamps.")
time = IndexVariable('time', tjanus.compute(), attrs={'long_name':'timestamps for beams 1-4 (Janus)'})
time5 = IndexVariable('time5', t5.compute(), attrs={'long_name':'timestamps for beam 5 (vertical)'})
if verbose: print("Done unpacking timestamps.")
coords0 = dict(time=time)
coords = dict(z=zab, time=time, rb=rb)
coords5 = dict(z5=zab5, time5=time5)
dims = ['z', 'time']
dims5 = ['z5', 'time5']
dims0 = ['time']
coordsdict = coords0
if verbose: print("Allocating heading, pitch, roll.")
kwda = dict(coords=coordsdict, dims=dims0, attrs=dict(units=unit, long_name=lname))
svars = ['heading', 'pitch', 'roll']
long_names = svars
units = ['degrees']*3
grp = 'variable_leader_janus'
vars1d = dict()
for vname,lname,unit in zip(svars,long_names,units):
if verbose: print(vname)
wrk = ensdict.map_partitions(_alloc_hpr, grp, vname)
# wrk = darr.from_array(np.array(wrk.compute()), chunks=chunks)
wrk2 = delayed(_bag2DataArray)(wrk, chunks)(**kwda)
vars1d.update({vname:wrk2})
del(wrk, wrk2)
ds2hpr = Dataset(data_vars=vars1d, coords=coordsdict)
ds2hpr = ds2hpr.to_netcdf(ncfpath, compute=False, mode='w')
if verbose: print("Saving heading, pitch, roll.")
ds2hpr.compute()
if verbose: print("Done saving heading, pitch, roll.")
del(ds2hpr)
coordsdict = coords5
# Load beam 5 variables into memory to
# be able to put them in a chunked DataArray.
if verbose: print("Allocating beam 5 variables.")
grps = ['velocity_beam5', 'correlation_beam5', 'echo_intensity_beam5']
long_names = ['Beam 5 velocity', 'Beam 5 correlation', 'Beam 5 echo amplitude']
units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
vars5 = dict()
for grp,lname,unit in zip(grps,long_names,units):
if verbose: print(grp)
wrk = ensdict.map_partitions(_alloc_beam5, grp)
wrk = darr.from_array(np.array(wrk.compute()).T, chunks=(1, chunks))
wrk = DataArray(wrk, coords=coordsdict, dims=dims5, attrs=dict(units=unit, long_name=lname))
vars5.update({grp:wrk})
del(wrk)
ds5 = Dataset(data_vars=vars5, coords=coordsdict)
ds5 = ds5.to_netcdf(ncfpath, compute=False, mode='a')
if verbose: print("Saving beam 5 variables.")
ds5.compute()
if verbose: print("Done saving beam 5 variables.")
del(ds5)
embed()
coordsdict = coords
# Load beams 1-4 variables into memory to
# be able to put them in a chunked DataArray.
if verbose: print("Allocating Janus variables.")
grps = ['velocity_janus', 'correlation_janus', 'echo_intensity_janus']
long_names = ['Janus velocity', 'Janus correlation', 'Janus echo amplitude']
units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
vars5 = dict()
for grp,lname,unit in zip(grps,long_names,units):
if verbose: print(grp)
wrk = ensdict.map_partitions(_alloc_janus, grp)
wrk = darr.from_array(np.array(wrk.compute()).T, chunks=(1, chunks))
wrk = DataArray(wrk, coords=coordsdict, dims=dims5, attrs=dict(units=unit, long_name=lname))
vars5.update({grp:wrk})
del(wrk)
dsj = Dataset(data_vars=varsj, coords=coordsdict)
dsj = dsj.to_netcdf(ncfpath, compute=False, mode='a')
if verbose: print("Saving Janus variables.")
dsj.compute()
if verbose: print("Done saving Janus variables.")
del(dsj)
long_names = ('Beam 1 velocity', 'Beam 2 velocity',
'Beam 3 velocity', 'Beam 4 velocity',
'Beam 5 velocity',
'Beam 1 correlation', 'Beam 2 correlation',
'Beam 3 correlation', 'Beam 4 correlation',
'Beam 5 correlation',
'Beam 1 echo amplitude', 'Beam 2 echo amplitude',
'Beam 3 echo amplitude', 'Beam 4 echo amplitude',
'Beam 5 echo amplitude',
'heading', 'pitch', 'roll')
units = ('m/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'no units', 'no units', 'no units', 'no units',
'no units',
'dB', 'dB', 'dB', 'dB',
'dB',
'degrees', 'degrees', 'degrees')
names = ('b1', 'b2', 'b3', 'b4', 'b5',
'cor1', 'cor2', 'cor3', 'cor4', 'cor5',
'int1', 'int2', 'int3', 'int4', 'int5',
'phi1', 'phi2', 'phi3')
# data_vars = {}
#
# sk = darr.zeros((nz, nt), chunks=chunks)*np.nan # Beam vels stored in mm/s
# # as int64 to save memory.
# b1, b2, b3, b4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# # embed()
# sk0 = darr.zeros(nt, chunks=chunks)*np.nan
# cor1, cor2, cor3, cor4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# int1, int2, int3, int4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# b5, cor5, int5 = sk.copy(), sk.copy(), sk.copy()
# heading, pitch, roll = sk0.copy(), sk0.copy(), sk0.copy()
# tjanus = []
# ensdict = np.array(ensdict)[~fbadens]
# ensdict = ensdict.tolist()
arrs = (b1, b2, b3, b4, b5,
cor1, cor2, cor3, cor4, cor5,
int1, int2, int3, int4, int5,
heading, pitch, roll)
# pressure, temperature, salinity, soundspeed)
for arr,name,long_name,unit in zip(arrs,names,long_names,units):
if 'Beam5' in long_name:
coordsn = coords5
dimsn = dims
elif 'phi' in name:
coordsn = coords0
dimsn = dims0
else:
coordsn = coords
dimsn = dims
da = DataArray(arr, coords=coordsn, dims=dimsn, attrs=dict(units=unit, long_name=long_name))
data_vars.update({name:da})
allcoords.update(coords)
allcoords.update(coords5)
ds = Dataset(data_vars=data_vars, coords=allcoords, attrs=dsattrs)
return ds
def ensembles2dataset(ensdict, dsattrs={}, verbose=False, print_every=1000):
"""
Convert a dictionary of ensembles into an xarray Dataset object.
"""
mms2ms = 1e-3
fbadens = np.array([not isinstance(ens, dict) for ens in ensdict])
nt = len(ensdict) - np.sum(fbadens)
n=0
ensdict0 = np.nan
while not isinstance(ensdict0, dict):
ensdict0 = ensdict[n]
n+=1
nz = ensdict0['fixed_leader_janus']['number_of_cells']
sk = np.ma.zeros((nz, nt))*np.nan # Beam vels stored in mm/s
# as int64 to save memory.
b1, b2, b3, b4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
sk0 = np.ma.zeros(nt)*np.nan
cor1, cor2, cor3, cor4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
int1, int2, int3, int4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
b5, cor5, int5 = sk.copy(), sk.copy(), sk.copy()
heading, pitch, roll = sk0.copy(), sk0.copy(), sk0.copy()
tjanus = []
ensdict = np.array(ensdict)[~fbadens]
ensdict = ensdict.tolist()
n=0
for ensarr in ensdict:
tjanus.append(ensarr['timestamp'])
heading[n] = ensarr['variable_leader_janus']['heading']
pitch[n] = ensarr['variable_leader_janus']['pitch']
roll[n] = ensarr['variable_leader_janus']['roll']
vjanus = ensarr['velocity_janus']['data']
b1[:, n] = vjanus[:, 0]
b2[:, n] = vjanus[:, 1]
b3[:, n] = vjanus[:, 2]
b4[:, n] = vjanus[:, 3]
b5[:, n] = ensarr['velocity_beam5']['data'].squeeze()
corjanus = ensarr['correlation_janus']['data']
cor1[:, n] = corjanus[:, 0]
cor2[:, n] = corjanus[:, 1]
cor3[:, n] = corjanus[:, 2]
cor4[:, n] = corjanus[:, 3]
cor5[:, n] = ensarr['correlation_beam5']['data'].squeeze()
intjanus = ensarr['echo_intensity_janus']['data']
int1[:, n] = intjanus[:, 0]
int2[:, n] = intjanus[:, 1]
int3[:, n] = intjanus[:, 2]
int4[:, n] = intjanus[:, 3]
int5[:, n] = ensarr['echo_intensity_beam5']['data'].squeeze()
n+=1
if verbose and not n%print_every: print(n)
fixj = ensdict0['fixed_leader_janus']
fix5 = ensdict0['fixed_leader_beam5']
# Add ping offset to get beam 5's timestamps.
dt5 = fix5['ping_offset_time'] # In milliseconds.
dt5 = np.array(Timedelta(dt5, unit='ms'))
t5 = tjanus + dt5
th = fixj['beam_angle']
assert th==25 # Always 25 degrees.
th = th*np.pi/180.
Cth = np.cos(th)
# Construct along-beam/vertical axes.
cm2m = 1e-2
r1janus = fixj['bin_1_distance']*cm2m
r1b5 = fix5['bin_1_distance']*cm2m
ncj = fixj['number_of_cells']
nc5 = fix5['number_of_cells']
lcj = fixj['depth_cell_length']*cm2m
lc5 = fix5['depth_cell_length']*cm2m
Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).
rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
# (Janus).
zab = Cth*rb # Vertical distance from xducer head
# (Janus).
zab5 = r1b5 + np.arange(0, L5, lc5) # Distance from xducer head, also
# depth for the vertical beam.
rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
zab5 = IndexVariable('z', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})
time = IndexVariable('time', tjanus, attrs={'long_name':'timestamp for beams 1-4 (Janus)'})
time5 = IndexVariable('time', t5, attrs={'long_name':'timestamp for beam 5 (vertical)'})
coords0 = [('time', time)]
coords = [('z', zab), ('time', time)]
coords5 = [('z5', zab5), ('time5', time5)]
dims = ['z', 'time']
dims0 = ['time']
# Convert velocities to m/s.
b1, b2, b3, b4, b5 = b1*mms2ms, b2*mms2ms, b3*mms2ms, b4*mms2ms, b5*mms2ms
# Scale heading, pitch and roll. Sentinel V manual, p. 259.
phisc = 0.01
heading *= phisc
pitch *= phisc
roll *= phisc
arrs = (b1, b2, b3, b4, b5,
cor1, cor2, cor3, cor4, cor5,
int1, int2, int3, int4, int5,
heading, pitch, roll)
# pressure, temperature, salinity, soundspeed)
long_names = ('Beam 1 velocity', 'Beam 2 velocity',
'Beam 3 velocity', 'Beam 4 velocity',
'Beam 5 velocity',
'Beam 1 correlation', 'Beam 2 correlation',
'Beam 3 correlation', 'Beam 4 correlation',
'Beam 5 correlation',
'Beam 1 echo amplitude', 'Beam 2 echo amplitude',
'Beam 3 echo amplitude', 'Beam 4 echo amplitude',
'Beam 5 echo amplitude',
'heading', 'pitch', 'roll')
units = ('m/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'no units', 'no units', 'no units', 'no units',
'no units',
'dB', 'dB', 'dB', 'dB',
'dB',
'degrees', 'degrees', 'degrees')
names = ('b1', 'b2', 'b3', 'b4', 'b5',
'cor1', 'cor2', 'cor3', 'cor4', 'cor5',
'int1', 'int2', 'int3', 'int4', 'int5',
'phi1', 'phi2', 'phi3')
data_vars = {}
for arr,name,long_name,unit in zip(arrs,names,long_names,units):
if 'Beam5' in long_name:
coordsn = coords5
dimsn = dims
elif 'phi' in name:
coordsn = coords0
dimsn = dims0
else:
coordsn = coords
dimsn = dims
if 'int' in name:
arr *= 0.45 # Scale factor for echo intensity, see Sentinel V manual
# Sentinel V manual p. 264.
da = DataArray(arr, coords=coordsn, dims=dimsn, attrs=dict(units=unit, long_name=long_name))
data_vars.update({name:da})
allcoords = {'rb':rb} # Along-beam distance for slanted beams.
allcoords.update(coords)
allcoords.update(coords5)
ds = Dataset(data_vars=data_vars, coords=allcoords, attrs=dsattrs)
return ds
def read_PD0_file(path, header_lines=0, return_pd0=False, all_ensembles=True,
format='sentinel', use_dask=True, chunks=100,
debug=False, verbose=True, print_every=1e3):
"""Read a TRDI Workhorse or Sentinel V *.pd0 file."""
pd0_bytes = bytearray()
with open(path, 'rb') as f:
pd0_bytes = bytearray(f.read())
f.close()
if all_ensembles:
pd0reader = read_PD0_bytes_ensembles
kwread = dict(verbose=verbose, print_every=print_every,
use_dask=use_dask, chunks=chunks)
else:
pd0reader = read_PD0_bytes
kwread = dict()
ret = pd0reader(pd0_bytes, return_pd0=return_pd0, format=format, **kwread)
if return_pd0:
data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, pd0_bytes = ret
else:
data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count = ret
if verbose:
nens = len(t)
nbadens = len(fbad_ens)
ngoodens = nens - nbadens
pbadens = 100.*nbadens/nens
print("")
print("Skipped %d/%d bad ensembles (%.2f%%)."%(nbadens, nens, pbadens))
print("---Breakdown of dud ensembles---")
print("*Bad checksums: %d"%errortype_count['bad_checksum'])
print("*Could not read ensemble's checksum: %d"%errortype_count['read_checksum'])
print("*Could not read ensemble's header: %d"%errortype_count['read_header'])
if debug:
if return_pd0:
ret = data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, pd0_bytes
else:
ret = data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count
else:
if return_pd0:
ret = data, t, fixed_attrs, pd0_bytes
else:
ret = data, t, fixed_attrs
return ret
def read_PD0_bytes_ensembles(PD0_BYTES, return_pd0=False, headerid='\x7f\x7f',
format='sentinel', use_dask=True, chunks=1e4,
verbose=True, print_every=1000):
"""
Finds the hex positions in the bytearray that identify the header of each
ensemble. Then read each ensemble into a dictionary and accumulates them
in a list.
"""
chunks = int(chunks)
if format=='workhorse':
parsepd0 = parse_pd0_bytearray
elif format=='sentinel':
parsepd0 = parse_sentinelVpd0_bytearray
else:
print('Unknown *.pd0 format')
# Split segments of the byte array per ensemble.
ensbytes = PD0_BYTES.split(headerid)
ensbytes = [headerid + ens for ens in ensbytes] # Prepend header id back.
ensbytes = ensbytes[1:] # First entry is empty, cap it off.
nens = len(ensbytes)
nensm = nens - 1
fbad_ens = []
BAD_ENS = []
# embed()
# Get timestamps for all ensembles.
# Note that these timestamps indicate the Janus' (i.e., beams 1-4) pings,
# which will not necessarily be the same as the vertical beam's timestamp.
t = np.empty(nens, dtype=object)
if use_dask:
DATA = darr.from_array(np.array([], dtype=object, ndmin=1), chunks=chunks)
ntotalchunks = nens//chunks
rem_ens = nens%chunks
has_tail=rem_ens>0
if has_tail: ntotalchunks+=1 # Last chunk takes remaining ensembles.
DATAbuffskel = np.empty(chunks, dtype=object)
DATAbuff = DATAbuffskel.copy()
daNaN = darr.from_array(np.array(np.nan, ndmin=1), chunks=1)
cont_inchnk=0
else:
DATA = np.empty(nens, dtype=object)
nChecksumError, nReadChecksumError, nReadHeaderError = 0, 0, 0
cont=0
cont_inchnk=0
for ensb in ensbytes:
try:
if use_dask:
dd = delayed(parsepd0)(ensb)
else:
dd = parsepd0(ensb)
# embed()
t[cont] = dd['timestamp']
except (ChecksumError, ReadChecksumError, ReadHeaderError) as E:
t[cont] = np.nan
fbad_ens.append(cont) # Store index of bad ensemble.
# BAD_ENS.append(ens) # Store bytes of the bad ensemble.
# Which type of error was it?
if isinstance(E, ChecksumError):
nChecksumError += 1
elif isinstance(E, ReadChecksumError):
nReadChecksumError += 1
elif isinstance(E, ReadHeaderError):
nReadHeaderError += 1
if use_dask:
if cont_inchnk==chunks:
DATA = darr.concatenate((DATA, daNaN.copy()))
DATAbuff = DATAbuffskel.copy()
cont_inchnk=0
else:
DATAbuff[cont_inchnk] = np.nan
cont_inchnk+=1
if has_tail and cont==nensm: # Save the last chunk.
DATA = darr.concatenate((DATA, daNaN.copy()))
else:
DATA[cont] = np.nan
cont+=1
continue
if use_dask:
if cont_inchnk==chunks:
DATA = darr.concatenate((DATA, darr.from_array(DATAbuff, chunks=chunks)))
DATAbuff = DATAbuffskel.copy()
cont_inchnk=0
# embed()
else:
DATAbuff[cont_inchnk] = dd
cont_inchnk+=1
if has_tail and cont==nensm: # Save the last chunk.
DATA = darr.concatenate((DATA, darr.from_array(DATAbuff, chunks=chunks)))
else:
DATA[cont] = dd
cont+=1
if verbose and not cont%print_every: print("Ensemble %d"%cont)
errortype_count = dict(bad_checksum=nChecksumError,
read_checksum=nReadChecksumError,
read_header=nReadHeaderError)
# Extract ensemble-independent fields (store in xr.Dataset attributes).
# fixed_attrs = _pick_misc(DATA) # FIXME
fixed_attrs = []
# embed()
if return_pd0:
ret = (DATA, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, PD0_BYTES)
else:
ret = (DATA, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count)
return ret
def read_PD0_bytes(pd0_bytes, return_pd0=False, format='sentinel'):
if format=='workhorse':
data = parse_pd0_bytearray(pd0_bytes)
elif format=='sentinel':
data = parse_sentinelVpd0_bytearray(pd0_bytes)
else:
print('Unknown *.pd0 format')
if return_pd0:
ret = data, pd0_bytes
else:
ret = data
return ret
def inspect_PD0_file(path, format='sentinelV'):
"""
Fetches and organizes metadata on instrument setup
and organizes them in a table.
"""
raise NotImplementedError()
confparams = ['data_source', # START Header.
'number_of_bytes',
'address_offsets',
'number_of_data_types', # END Header.
'system_power', # START fixed_leader_janus.
'system_configuration_MSB',
'sensor_source',
'system_configuration_LSB',
'system_bandwidth',
'number_of_cells',
'pings_per_ensemble',
'false_target_threshold',
'serial_number',
'lag_length',
'sensor_available',
'depth_cell_length',
'beam_angle',
'error_velocity_threshold',
'coordinate_transformation_process',
'heading_bias',
'transmit_pulse_length',
'heading_alignment',
'starting_depth_cell',
'number_of_beams',
'low_correlation_threshold',
'simulation_data_flag',
'cpu_firmware_version',
'transmit_lag_distance',
'ending_depth_cell',
'minimum_percentage_water_profile_pings',
'signal_processing_mode',
'blank_after_transmit',
'bin_1_distance', # END fixed_leader_janus.
'depth_cell_length', # START fixed_leader_beam5.
'vertical_mode',
'ping_offset_time',
'vertical_lag_length',
'transmit_pulse_length',
'number_of_cells',
'bin_1_distance',
'transmit_code_elements',
'pings_per_ensemble', # END fixed_leader_beam5.
'roll_standard_deviation', # START variable_leader_janus.
'error_status_word',
'attitude',
'contamination_sensor',
'attitude_temperature',
'temperature',
'speed_of_sound',
'pitch_standard_deviation',
'pressure_variance',
'heading_standard_deviation',
'pressure',
'transmit_current',
'ensemble_roll_over',
'depth_of_transducer',
'bit_result',
'ambient_temperature',
'salinity',
'pressure_positive',
'pressure_negative',
'transmit_voltage', # END variable_leader_janus.
]
def _pick_misc(d, confparams=confparams):
"""
Check whether the configuration parameters change over ensembles.
If not, replace them with a single value.
"""
dconfparams = dict()
d.reverse()
while d:
dn = d.pop()
for group in dn.keys():
for param in dn[group].keys():
if param in confparams:
dconfparams.update({param:dconfparams[param].extend(dn[group][param])})
ddesc = np.unique([dnn['descriptors'] for dnn in d if dnn is not None]) # Array of lists.
if ddesc.size==1: # If all the lists store the exact same strings.
dconfparams['descriptors'] = ddesc
else:
# print("Warning: Some setup parameters changed during deployment.")
pass
return dconfparams
|
mit
| -928,729,607,118,611,300
| 36.648684
| 168
| 0.567924
| false
| 3.372186
| false
| false
| false
|
stratton-oakcoin/oakcoin
|
contrib/devtools/security-check.py
|
1
|
8216
|
#!/usr/bin/env python
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
|
mit
| -7,381,608,130,979,772,000
| 37.037037
| 163
| 0.617454
| false
| 3.603509
| false
| false
| false
|
dims/oslo.utils
|
oslo/utils/openstack/common/log.py
|
1
|
26577
|
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from oslo.utils.openstack.common.gettextutils import _
from oslo.utils.openstack.common import jsonutils
from oslo.utils.openstack.common import local
from oslo.utils import importutils
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.utils.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
apache-2.0
| 858,290,176,509,701,200
| 35.506868
| 79
| 0.588027
| false
| 4.144238
| true
| false
| false
|
benfitzpatrick/cylc
|
lib/cylc/gui/combo_logviewer.py
|
1
|
3938
|
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2016 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Cylc gui log viewer, with a combo box for log file selection."""
import gtk
import os
from parsec.OrderedDict import OrderedDict
from cylc.gui.logviewer import logviewer
from cylc.gui.tailer import Tailer
from cylc.task_id import TaskID
class ComboLogViewer(logviewer):
"""Implement a viewer for task jobs in the "cylc gui".
It has a a combo box for log file selection.
task_id -- The NAME.POINT of a task proxy.
filenames -- The names of the task job logs.
cmd_tmpls -- A dict to map file names and alternate commands to tail follow
the file.
init_active_index -- The index for selecting the initial log file.
"""
LABEL_TEXT = "Choose Log File: "
def __init__(self, task_id, filenames, cmd_tmpls, init_active_index):
self.filenames = OrderedDict()
name_str, point_str = TaskID.split(task_id)
for filename in filenames:
try:
f_point_str, f_name_str, f_submit_num_str, f_base_name = (
filename.rsplit(os.sep, 4)[1:])
if (f_point_str == point_str and f_name_str == name_str and
int(f_submit_num_str) and f_base_name):
name = f_submit_num_str + os.sep + f_base_name
if ":" in filename:
name += " (%s)" % (filename.split(":", 1)[0])
except ValueError:
name = filename
self.filenames[name] = filename
self.init_active_index = init_active_index
self.cmd_tmpls = cmd_tmpls
logviewer.__init__(
self, task_id, None, filenames[self.init_active_index])
def connect(self):
"""Connect to the selected log file tailer."""
try:
cmd_tmpl = self.cmd_tmpls[self.filename]
except (KeyError, TypeError):
cmd_tmpl = None
self.t = Tailer(self.logview, self.filename, cmd_tmpl=cmd_tmpl)
self.t.start()
def create_gui_panel(self):
"""Create the panel."""
logviewer.create_gui_panel(self)
label = gtk.Label(self.LABEL_TEXT)
combobox = gtk.combo_box_new_text()
for name in self.filenames:
combobox.append_text(name)
combobox.connect("changed", self.switch_log)
if self.init_active_index:
combobox.set_active(self.init_active_index)
else:
combobox.set_active(0)
self.hbox.pack_end(combobox, False)
self.hbox.pack_end(label, False)
def switch_log(self, callback):
"""Switch to another file, if necessary."""
if self.t is None:
return False
model = callback.get_model()
index = callback.get_active()
name = model[index][0]
filename = self.filenames[name]
if filename != self.filename:
self.filename = filename
self.t.stop()
self.t.join()
logbuffer = self.logview.get_buffer()
pos_start, pos_end = logbuffer.get_bounds()
self.reset_logbuffer()
logbuffer.delete(pos_start, pos_end)
self.log_label.set_text(name)
self.connect()
return False
|
gpl-3.0
| -7,878,608,303,728,449,000
| 34.477477
| 79
| 0.611732
| false
| 3.841951
| false
| false
| false
|
icyflame/batman
|
scripts/spamremove.py
|
1
|
3721
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to remove links that are being or have been spammed.
Usage:
python pwb.py spamremove www.spammedsite.com
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
Command line options:
-always Do not ask, but remove the lines automatically. Be very
careful in using this option!
-namespace: Filters the search to a given namespace. If this is specified
multiple times it will search all given namespaces
"""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n
from pywikibot.editor import TextEditor
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
always = False
namespaces = []
spamSite = ''
for arg in pywikibot.handle_args(args):
if arg == "-always":
always = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[len('-namespace:'):]))
except ValueError:
namespaces.append(arg[len('-namespace:'):])
else:
spamSite = arg
if not spamSite:
pywikibot.bot.suggest_help(missing_parameters=['spam site'])
return False
mysite = pywikibot.Site()
pages = mysite.exturlusage(spamSite, namespaces=namespaces, content=True)
summary = i18n.twtranslate(mysite, 'spamremove-remove',
{'url': spamSite})
for i, p in enumerate(pages, 1):
text = p.text
if spamSite not in text:
continue
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% p.title())
lines = text.split('\n')
newpage = []
lastok = ""
for line in lines:
if spamSite in line:
if lastok:
pywikibot.output(lastok)
pywikibot.output('\03{lightred}%s\03{default}' % line)
lastok = None
else:
newpage.append(line)
if line.strip():
if lastok is None:
pywikibot.output(line)
lastok = line
if always:
answer = "y"
else:
answer = pywikibot.input_choice(
u'\nDelete the red lines?',
[('yes', 'y'), ('no', 'n'), ('edit', 'e')],
'n', automatic_quit=False)
if answer == "n":
continue
elif answer == "e":
editor = TextEditor()
newtext = editor.edit(text, highlight=spamSite,
jumpIndex=text.find(spamSite))
else:
newtext = "\n".join(newpage)
if newtext != text:
p.text = newtext
p.save(summary)
else:
if "i" not in locals():
pywikibot.output('No page found.')
elif i == 1:
pywikibot.output('1 pages done.')
else:
pywikibot.output('%d pages done.' % i)
if __name__ == '__main__':
main()
|
mit
| -2,946,422,371,415,227,400
| 29.008065
| 79
| 0.552808
| false
| 4.277011
| false
| false
| false
|
diegojromerolopez/djanban
|
src/djanban/apps/dev_times/migrations/0009_auto_20170515_1731.py
|
1
|
1516
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-15 15:31
from __future__ import unicode_literals
from django.db import migrations
def adjust_spent_time(member, spent_time, date):
spent_time_factors = member.spent_time_factors.all()
for spent_time_factor in spent_time_factors:
if (spent_time_factor.start_date is None and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date <= spent_time_factor.end_date):
adjusted_value = spent_time * spent_time_factor.factor
return adjusted_value
return spent_time
def update_adjusted_spent_time(apps, schema):
DailySpentTime = apps.get_model("dev_times", "DailySpentTime")
for daily_spent_time in DailySpentTime.objects.all():
if daily_spent_time.spent_time is None:
daily_spent_time.adjusted_spent_time = None
else:
daily_spent_time.adjusted_spent_time = adjust_spent_time(
daily_spent_time.member, daily_spent_time.spent_time, daily_spent_time.date
)
DailySpentTime.objects.filter(id=daily_spent_time.id).update(adjusted_spent_time=daily_spent_time.adjusted_spent_time)
class Migration(migrations.Migration):
dependencies = [
('dev_times', '0008_dailyspenttime_adjusted_spent_time'),
]
operations = [
migrations.RunPython(update_adjusted_spent_time)
]
|
mit
| 604,419,573,935,151,400
| 37.871795
| 126
| 0.668206
| false
| 3.437642
| false
| false
| false
|
azumimuo/family-xbmc-addon
|
plugin.video.citerkita/resources/lib/googledocs.py
|
1
|
2310
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,json
from resources.lib import client
def resolve(url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = client.request(url)
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
|
gpl-2.0
| -8,715,977,691,981,637,000
| 32
| 87
| 0.554545
| false
| 3.521341
| false
| false
| false
|
dpineo/gadann
|
gadann/model.py
|
1
|
7347
|
#
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 Daniel Pineo (daniel@pineo.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy
import copy
import logging
import time
import cv2
from .tensor import Tensor
from .updater import SgdUpdater
from . import kernels
logger = logging.getLogger(__name__)
# -------------- NeuralNetworkModel ----------------
class NeuralNetworkModel(object):
def __init__(self, layers, input_shape=None, updater=SgdUpdater()):
self.layers = []
for layer_n, layer in enumerate(layers):
if 'shape' not in layer:
layer['shape'] = input_shape[1:]
if 'input_shape' not in layer:
layer['input_shape'] = input_shape
if 'name' not in layer:
layer['name'] = 'Layer ' + str(layer_n)
if 'updater' not in layer:
layer['updater'] = copy.copy(updater)
self.layers.append(layers[layer_n]['layer'](**layer))
input_shape = self.layers[-1].output_shape
'''
def add(self, layer, **kwargs):
if 'shape' not in kwargs:
layer['shape'] = input_shape[1:]
if 'input_shape' not in kwargs:
layer['input_shape'] = input_shape
if 'name' not in layer:
layer['name'] = 'Layer ' + str(layer_n)
if 'updater' not in layer:
layer['updater'] = copy.copy(updater)
self.layers.append(layer(**kwargs))
input_shape = self.layers[-1].output_shape
'''
def classify(self, features):
probabilities = self.probability(features)
return Tensor(numpy.argmax(probabilities.get(), axis=1))
'''
def probability(self, features):
predictions = self.predict(features)
return tensor.Tensor(numpy.argmax(predictions.get(), axis=1))
confidences = numpy.max(predictions.get(), axis=1)
'''
def evaluate(self, features, labels):
classifications = (self.classify(f) for f in features)
return numpy.fromiter(((l.get().flatten() == c.get().flatten()).mean()
for (l, c) in zip(labels, classifications)), float).mean()
def probability(self, input):
for layer in self.layers:
input = layer.fprop(input)
assert(not numpy.isnan(input.get()).any())
return input
# return reduce(lambda x,l: l.fprop(x), self.layers, input)
def __str__(self):
return self.__class__.__name__ + '\n' + '\n'.join([str(l) for l in self.layers])
def show(self):
for layer_n, layer in enumerate(self.layers):
if not layer.params:
continue
weights = layer.params['w']
for deconv_layer in reversed(self.layers[:layer_n]):
weights = deconv_layer.bprop(weights)
cv2.imshow(layer.name, weights.mosaic().get() + .5)
cv2.waitKey(1)
def train_backprop(self, features, labels, n_epochs):
logger.info("Training (batch gradient descent)")
for epoch in range(n_epochs):
logger.info("Epoch " + str(epoch),)
start_time = time.time()
for n, (batch_features, batch_targets) in enumerate(zip(features, labels)):
# Save the activations during the forward pass, they will be used to
# compute the gradients during the backward pass
layer_activations = [batch_features]
# Forward pass
for layer in self.layers:
layer_activations.append(layer.fprop(layer_activations[-1]))
# Error delta
output_error = layer_activations[-1] - batch_targets
# Backward pass
for layer in reversed(self.layers):
input_error = layer.bprop(output_error, layer_activations.pop())
grads = layer.gradient(layer_activations[-1], output_error)
layer.update(grads)
output_error = input_error
logger.info(' Epoch {} Time={:.3f}'.format(epoch, time.time()-start_time))
self.show()
def train_contrastive_divergence(self, features, n_epochs):
logger.info("Training (contrastive divergence)")
for layer in self.layers[:-2]: # don't train the last linear & softmax layers
logger.info("training " + layer.name)
# skip the layer if it has no parameters to train
if not layer.params:
continue
for epoch in range(n_epochs):
reconstruction_error_avg = 0
start_time = time.time()
for batch_n, v in enumerate(features):
# Gibbs sampling
p_h_given_v = kernels.logistic(layer.fprop(v))
h_sample = kernels.sample(p_h_given_v)
pos_grads = layer.gradient(v, h_sample)
p_v_given_h = kernels.logistic(layer.bprop(h_sample))
v_sample = kernels.sample(p_v_given_h)
ph = kernels.logistic(layer.fprop(v_sample))
h = kernels.sample(ph)
neg_grads = layer.gradient(v, h)
# Gradiant of log likelihood wrt the parameters
grads = {k: (neg_grads[k] - pos_grads[k]) / features.batch_size for k in pos_grads.keys()}
# Update parameters wrt the gradients
layer.update(grads)
# Running average of reconstruction error
reconstruction_error = ((v-p_v_given_h)**2).sum()/v.size
reconstruction_error_avg = .1*reconstruction_error + .9*reconstruction_error_avg
self.show()
# print model.updater.status()
logger.info(' Epoch {} Time={:.3f} Error={:.6f}'.format(epoch, time.time()-start_time, reconstruction_error))
self.show()
# propgate input data through the layer
features = features.apply_batchwise(layer.fprop)
|
mit
| 8,249,523,668,082,480,000
| 39.275281
| 127
| 0.572887
| false
| 4.205495
| false
| false
| false
|
Jellby/ASEP-MD
|
Tests/scripts/gen2gromacs.py
|
1
|
9989
|
#!/usr/bin/python
# Modify the solute geometry and charges in Gromacs .gro and .top files
# Use with 5 arguments:
# 1 (read): generic system file
# 2 (read): .top file
# 3 (read): .gro file
# 4 (write): modified .top file
# 5 (write): modified .gro file
import sys
import re
import math
import copy
#=============================
# Get input arguments
try:
system_input = sys.argv[1]
except IndexError:
sys.exit("Missing input file")
try:
top_input = sys.argv[2]
except IndexError:
sys.exit("Missing input file")
try:
gro_input = sys.argv[3]
except IndexError:
sys.exit("Missing input file")
try:
top_output = sys.argv[4]
except IndexError:
sys.exit("Missing output file")
try:
gro_output = sys.argv[5]
except IndexError:
sys.exit("Missing output file")
#=============================
# Function to replace a word in a string
# (keeping the alignment if possible)
def rep_word ( words, num, new ):
l = len(words[num])
words[num] = new.rjust(l)
#=============================
# Function to displace a molecule, matching an atom with reference
def displace ( mol1, mol2, at ):
disp = {}
disp["x"] = mol1[at]["x"]-mol2[at]["x"]
disp["y"] = mol1[at]["y"]-mol2[at]["y"]
disp["z"] = mol1[at]["z"]-mol2[at]["z"]
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = old[i]["x"]+disp["x"]
mol2[i]["y"] = old[i]["y"]+disp["y"]
mol2[i]["z"] = old[i]["z"]+disp["z"]
return
#=============================
# Function to superpose molecules
# see: Acta Chrystallogr. Sec. A 61 (2005), 478
# J. Comput. Chem. 31 (2010), 1561
def superpose ( mol1, mol2 ):
center1 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center1["x"] += mol1[i]["x"]
center1["y"] += mol1[i]["y"]
center1["z"] += mol1[i]["z"]
center1["x"] = center1["x"]/len(mol1)
center1["y"] = center1["y"]/len(mol1)
center1["z"] = center1["z"]/len(mol1)
for i in range(len(mol1)):
mol1[i]["x"] -= center1["x"]
mol1[i]["y"] -= center1["y"]
mol1[i]["z"] -= center1["z"]
G1 = 0
for i in range(len(mol1)):
G1 += mol1[i]["x"]**2+mol1[i]["y"]**2+mol1[i]["z"]**2
# only use first atoms of mol2 to superpose
center2 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center2["x"] += mol2[i]["x"]
center2["y"] += mol2[i]["y"]
center2["z"] += mol2[i]["z"]
center2["x"] = center2["x"]/len(mol1)
center2["y"] = center2["y"]/len(mol1)
center2["z"] = center2["z"]/len(mol1)
# but move the whole mol2
for i in range(len(mol2)):
mol2[i]["x"] -= center2["x"]
mol2[i]["y"] -= center2["y"]
mol2[i]["z"] -= center2["z"]
G2 = 0
for i in range(len(mol2)):
G2 += mol2[i]["x"]**2+mol2[i]["y"]**2+mol2[i]["z"]**2
M = {}
for i in ["x", "y", "z"]:
for j in ["x", "y", "z"]:
M[i+j] = 0
for k in range(len(mol1)):
M[i+j] += mol1[k][i] * mol2[k][j]
K = []
K.append( [ M["xx"]+M["yy"]+M["zz"], M["yz"]-M["zy"], M["zx"]-M["xz"], M["xy"]-M["yx"] ] )
K.append( [ M["yz"]-M["zy"], M["xx"]-M["yy"]-M["zz"], M["xy"]+M["yx"], M["xz"]+M["zx"] ] )
K.append( [ M["zx"]-M["xz"], M["xy"]+M["yx"], M["yy"]-M["xx"]-M["zz"], M["yz"]+M["zy"] ] )
K.append( [ M["xy"]-M["yx"], M["xz"]+M["zx"], M["yz"]+M["zy"], M["zz"]-M["xx"]-M["yy"] ] )
coef = []
D = (M["xy"]**2+M["xz"]**2-M["yx"]**2-M["zx"]**2)**2
E = (-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2-2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))*\
(-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2+2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))
F = (-(M["xz"]+M["zx"])*(M["yz"]-M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]+M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]+M["zz"]))
G = (-(M["xz"]+M["zx"])*(M["yz"]+M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]-M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]+M["zz"]))
H = ( (M["xy"]+M["yx"])*(M["yz"]+M["zy"])+(M["xz"]+M["zx"])*(M["xx"]-M["yy"]+M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]-M["zy"])+(M["xz"]+M["zx"])*(M["xx"]+M["yy"]+M["zz"]))
I = ( (M["xy"]+M["yx"])*(M["yz"]-M["zy"])+(M["xz"]-M["zx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]+M["zy"])+(M["xz"]-M["zx"])*(M["xx"]+M["yy"]-M["zz"]))
coef.append( D+E+F+G+H+I )
coef.append( -8.0*( M["xx"]*M["yy"]*M["zz"]+M["xy"]*M["yz"]*M["zx"]+M["xz"]*M["yx"]*M["zy"]
-M["xx"]*M["yz"]*M["zy"]-M["xy"]*M["yx"]*M["zz"]-M["xz"]*M["yy"]*M["zx"] ) )
coef.append( -2.0*( M["xx"]**2+M["xy"]**2+M["xz"]**2+M["yx"]**2+M["yy"]**2+M["yz"]**2+M["zx"]**2+M["zy"]**2+M["zz"]**2 ) )
coef.append( 0.0 )
coef.append( 1.0 )
root_old = 0.0
root = 0.5*(G1+G2)
while (math.fabs(root-root_old) > 1.0e-6):
root_old = root
P = root**4+coef[2]*root**2+coef[1]*root+coef[0]
dP = 4*root**3+2*coef[2]*root+coef[1]
root -= P/dP
for i in range(len(K)):
K[i][i] -= root
for i in range(len(K)):
vect = []
for j in range(len(K)):
adj = copy.deepcopy(K)
del adj[i]
for k in range(len(adj)):
del adj[k][j]
det = adj[0][0]*adj[1][1]*adj[2][2]+adj[0][1]*adj[1][2]*adj[2][0]+adj[0][2]*adj[1][0]*adj[2][1] \
-adj[0][0]*adj[1][2]*adj[2][1]-adj[0][1]*adj[1][0]*adj[2][2]-adj[0][2]*adj[1][1]*adj[2][0]
det *= (-1)**(i+j)
vect.append(det)
norm = math.sqrt(vect[0]**2+vect[1]**2+vect[2]**2+vect[3]**2)
if (norm > 1.0e-6):
vect[0] = -vect[0]/norm
vect[1] = vect[1]/norm
vect[2] = vect[2]/norm
vect[3] = vect[3]/norm
break
M["xx"] =vect[0]**2+vect[1]**2-vect[2]**2-vect[3]**2
M["yy"] =vect[0]**2-vect[1]**2+vect[2]**2-vect[3]**2
M["zz"] =vect[0]**2-vect[1]**2-vect[2]**2+vect[3]**2
M["xy"] =2.0*(vect[1]*vect[2]-vect[0]*vect[3])
M["yx"] =2.0*(vect[1]*vect[2]+vect[0]*vect[3])
M["yz"] =2.0*(vect[2]*vect[3]-vect[0]*vect[1])
M["zy"] =2.0*(vect[2]*vect[3]+vect[0]*vect[1])
M["zx"] =2.0*(vect[1]*vect[3]-vect[0]*vect[2])
M["xz"] =2.0*(vect[1]*vect[3]+vect[0]*vect[2])
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = M["xx"]*old[i]["x"]+M["xy"]*old[i]["y"]+M["xz"]*old[i]["z"]+center1["x"]
mol2[i]["y"] = M["yx"]*old[i]["x"]+M["yy"]*old[i]["y"]+M["yz"]*old[i]["z"]+center1["y"]
mol2[i]["z"] = M["zx"]*old[i]["x"]+M["zy"]*old[i]["y"]+M["zz"]*old[i]["z"]+center1["z"]
return
#=============================
# Read the system file
# Skip the file until the solute is found is found
file_system = open(system_input, "r")
for line in file_system:
if (re.match("Solute",line)):
break
# Skip name and number of molecules
file_system.next()
file_system.next()
# Read coordinates and charges
mol = []
num = int(file_system.next())
for i in range(num):
tmp = dict(zip(("x","y","z","q"),file_system.next().split()[4:8]))
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
tmp["q"] = float(tmp["q"])
mol.append(tmp)
file_system.close()
#=============================
# Read the topology file
# and write the modified charges
file_top = open(top_input, "r")
file_top_out = open(top_output, "w")
# Skip to the definition of the first molecule's atoms
for line in file_top:
file_top_out.write(line)
if (re.match("\[\s*atoms\s*\]",line)):
break
# Replace the 7th word (the charge) with the new charge
for i in range(num):
line = file_top.next()
# Skip comment lines
while (re.match("\s*;", line)):
file_top_out.write(line)
line = file_top.next()
words = re.findall("(\s*\S+)",line)
rep_word(words, 6, " "+str(mol[i]["q"]))
file_top_out.write("".join(words)+"\n")
# Copy the rest of the file unchanged
for line in file_top:
file_top_out.write(line)
file_top.close()
file_top_out.close()
#=============================
# Read the coordinates file
# and write the modified coordinates
coord_prec = "11.6"
veloc_prec = "11.7"
format_str = "%%5d%%5s%%5s%%5d%%%sf%%%sf%%%sf%%%sf%%%sf%%%sf\n" % (coord_prec, coord_prec, coord_prec, veloc_prec, veloc_prec, veloc_prec)
file_gro = open(gro_input, "r")
file_gro_out = open(gro_output, "w")
# First read the solute coordinates
file_gro.next()
file_gro.next()
mol_gro = []
for i in range(num):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["x"] = float(tmp["x"])*10
tmp["y"] = float(tmp["y"])*10
tmp["z"] = float(tmp["z"])*10
mol_gro.append(tmp)
# Modify the input coordinates to fit the original orientation
superpose ( mol_gro, mol )
# Back to the top of the file
file_gro.seek(0)
# Copy title and total number of atoms
file_gro_out.write(file_gro.next())
numtot = int(file_gro.next())
file_gro_out.write("%5d\n" % numtot)
# Read the atom coordinates and velocities
for i in range(numtot):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z","vx","vy","vz"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["resnum"] = int(line[0:5])
tmp["resname"] = line[5:10]
tmp["atname"] = line[10:15]
tmp["atnum"] = int(line[15:20])
# For the solute, write the new coordinates, in nm
if (i < num):
tmp["x"] = 0.1*mol[i]["x"]
tmp["y"] = 0.1*mol[i]["y"]
tmp["z"] = 0.1*mol[i]["z"]
else:
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
# Write the velocities if present
if "vx" in tmp:
tmp["vx"] = float(tmp["vx"])
tmp["vy"] = float(tmp["vy"])
tmp["vz"] = float(tmp["vz"])
else:
tmp["vx"] = 0.0
tmp["vy"] = 0.0
tmp["vz"] = 0.0
file_gro_out.write(format_str % \
(tmp["resnum"], tmp["resname"], tmp["atname"], tmp["atnum"], tmp["x"], tmp["y"], tmp["z"], tmp["vx"], tmp["vy"], tmp["vz"]))
# Copy the cell tensor
file_gro_out.write(file_gro.next())
file_gro.close()
file_gro_out.close()
|
gpl-3.0
| 6,437,913,414,592,867,000
| 30.11838
| 138
| 0.513565
| false
| 2.36538
| false
| false
| false
|
gammapy/enrico
|
enrico/plotting.py
|
1
|
25543
|
import os
from distutils.version import LooseVersion
import numpy as np
try:
import astropy.io.fits as fits
except ImportError:
import pyfits as fits
import pyLikelihood
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 15})
matplotlib.rc('text', usetex=True)
import matplotlib.pyplot as plt
from enrico.constants import MEV_TO_ERG, ERG_TO_MEV
from enrico.config import get_config
from enrico import utils
from enrico import Loggin
from enrico.extern.astropy_bayesian_blocks import bayesian_blocks
class Params:
"""Collection of Plotting parameters like Energy bounds,
colors, file name, etc...."""
def __init__(self, srcname, Emin=100, Emax=3e5,
PlotName="LAT_SED", LineColor=2,
PointColor = 1, N = 2000):
self.Emin = Emin #Energy bounds
self.Emax = Emax
self.N = N #Number of points for the TGraph
self.srcname = srcname # Source of interest
self.PlotName = PlotName #file name
#color options
self.LineColor = LineColor
self.PointColor = PointColor
class Result(Loggin.Message):
"""Helper class to get the results from a (Un)BinnedAnalysis object
and compute the SED and errors"""
def __init__(self, Fit, pars):
super(Result,self).__init__()
Loggin.Message.__init__(self)
self.Fit = Fit
self.Model = Fit[pars.srcname].funcs['Spectrum'].genericName()
self.ptsrc = pyLikelihood.PointSource_cast(Fit[pars.srcname].src)
self.covar = np.array(utils.GetCovar(pars.srcname, self.Fit, False))
self.srcpars = pyLikelihood.StringVector()
Fit[pars.srcname].src.spectrum().getFreeParamNames(self.srcpars)
def GetDecorrelationEnergy(self,par):
self.E, self.SED = self.MakeSED(par)
self.Err = self.MakeSEDError(par)
i=np.argmin(self.Err/self.SED)
self.decE = self.E[i]
self.decFlux = self.SED[i]/self.E[i]**2*ERG_TO_MEV
self.decFluxerr = self.Err[i]/self.E[i]**2*ERG_TO_MEV
self.decSED = self.SED[i]
self.decSEDerr = self.Err[i]
def _DumpSED(self,par):
"""Save the energy, E2.dN/dE, and corresponding error in an ascii file
The count and residuals plot vs E is also made"""
try:
self.decE
except NameError:
self.GetDecorrelationEnergy(par)
self.info("Decorrelation energy : %4.2e MeV"% self.decE)
self.info("Diffential flux at the Decorrelation energy : %2.2e +/- %2.2e ph/cm2/s/MeV" \
%(self.decFlux, self.decFluxerr))
self.info("SED value at the Decorrelation energy : %2.2e +/- %2.2e erg/cm2/s" \
%(self.decSED, self.decSEDerr))
try:
self.CountsPlot(par)
except Exception as e:
print((type(e))) # the exception instance
print((e.args)) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
#raise
# Save all in ascii file
# log(E) log (E**2*dN/dE) log(E**2*dN/dE_err) is_dot (0,1) is_upper (0,1)
save_file = open(par.PlotName + '.dat', 'w')
save_file.write("# log(E) log (E**2*dN/dE) Error on log(E**2*dN/dE) \n")
for i in range(par.N):
save_file.write("%12.4e %12.4e %12.4e \n" % (self.E[i], self.SED[i], self.Err[i]))
save_file.close()
def MakeFlux(self, params):
"""Compute differential Flux distribution and
corresponding energy and return a numpy array"""
E = np.logspace(np.log10(params.Emin), np.log10(params.Emax), params.N)
Flux = np.zeros(params.N)
for i in range(params.N):
Flux[i] = self.dNde(E[i])
return E, Flux
def MakeSED(self, pars):
"""Compute Spectral energy distribution and corresponding energy
and return a numpy array"""
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
nuFnu = np.zeros(pars.N)
for i in range(pars.N):
nuFnu[i] = MEV_TO_ERG * E[i] ** 2 * self.dNde(E[i]) #Mev to Ergs
return E, nuFnu
def MakeSEDError(self, pars):
"""@todo: document me"""
estep = np.log(pars.Emax / pars.Emin) / (pars.N - 1)
energies = pars.Emin * np.exp(estep * np.arange(np.float(pars.N)))
err = np.zeros(pars.N)
j = 0
for ene in energies:
arg = pyLikelihood.dArg(ene)
partials = np.zeros(len(self.srcpars))
for i in range(len(self.srcpars)):
x = self.srcpars[i]
partials[i] = self.ptsrc.spectrum().derivByParam(arg, x)
err[j] = np.sqrt(np.dot(partials, np.dot(self.covar, partials)))
j += 1
return MEV_TO_ERG * energies ** 2 * err #Mev to Ergs
def dNde(self, energy):
arg = pyLikelihood.dArg(energy)
return self.ptsrc.spectrum()(arg)
def CountsPlot(self, Parameter):
"""@todo: document me"""
imName = "tmp.fits"
filebase = Parameter.PlotName
total = np.array([])
obs = np.array([])
obs_err = np.array([])
emax = np.array([])
emin = np.array([])
src = np.array([])
# Summed Likelihood has no writeCountsSpectra
# but we can do it component by component
for comp in self.Fit.components:
#self.Fit.writeCountsSpectra(imName)
try:
comp.writeCountsSpectra(imName)
image = fits.open(imName)
#loop on the source names to find the good one
j = 0
for ID in image[1].data.names:
if ID == Parameter.srcname:
indice = j
j += 1
for jn in range(len(image[3].data.field(0))):
energymin = image[3].data.field(1)[jn]
energymax = image[3].data.field(0)[jn]
if energymax in emax and energymin in emin:
k = np.where(energymax==emax)
obs[k] = obs[k] + image[1].data.field(0)[jn]
obs_err[k] = np.sqrt(obs[k])
src[k] = src[k] + image[1].data.field(indice)[jn]
for i in range(len(image[1].data.names) - 1):
total[k] = total[k] + image[1].data.field(i + 1)[jn]
else:
emax = np.append(emax, energymax)
emin = np.append(emin, energymin)
obs = np.append(obs,image[1].data.field(0)[jn])
obs_err = np.append(obs_err,\
np.sqrt(image[1].data.field(0)[jn]))
src = np.append(src, image[1].data.field(indice)[jn])
total = np.append(total,0)
for i in range(len(image[1].data.names) - 1):
total[-1] = total[-1] + image[1].data.field(i + 1)[jn]
except RuntimeError as e:
print("Exception RuntimeError ocurred: ")
print((type(e)))
print((e.args))
print(e)
break
except IndexError:
print("Exception IndexError ocurred (component unavailable): ")
print((type(e)))
print((e.args))
print(e)
continue
# Sort by energy
energy_order = np.argsort(emin)
src = src[energy_order]
obs = obs[energy_order]
obs_err = obs_err[energy_order]
total = total[energy_order]
emin = emin[energy_order]
emax = emax[energy_order]
other = np.array(total - src)
Nbin = len(src)
E = np.array((emax + emin) / 2.)
err_E = np.array((emax - emin) / 2.)
total = np.array(total)
residual = np.zeros(Nbin)
Dres = np.zeros(Nbin)
plt.figure()
plt.loglog()
plt.title('Counts plot')
plt.xlabel("E (MeV) ")
plt.ylabel("Counts / bin")
plt.errorbar(E,obs,xerr=err_E,yerr=obs_err,fmt='o',color="red",ls='None',label="Data")
plt.plot(E,src,ls='dashed',color="blue",label=Parameter.srcname.replace("_"," "))
plt.plot(E,other,ls='solid',color="green",label="Other Sources")
plt.plot(E,total,lw=1.5,ls='solid',label="All Sources")
plt.legend()
plt.tight_layout()
plt.savefig(filebase + "_CountsPlot.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
plt.figure()
plt.title('Residuals plot')
plt.semilogx()
for i in range(Nbin):
try:
residual[i] = (obs[i] - total[i]) / total[i]
Dres[i] = (obs_err[i] / total[i])
except:
residual[i] = 0.
Dres[i] = 0.
if residual[i] == -1.:
residual[i] = 0.
ymin = min(residual) - max(Dres)
ymax = max(residual) + max(Dres)
plt.ylim(ymax = ymax, ymin = ymin)
plt.xlim(xmin = min(E)*0.3, xmax = max(E)*2)
plt.xlabel("E (MeV) ")
plt.ylabel("(counts-model)/model")
plt.errorbar(E,residual,xerr=err_E,yerr=Dres,fmt='o',color="red",ls='None',label="Data")
zero = np.zeros(2)
Ezero = np.array([1e-5, 1e10])
plt.plot(Ezero,zero,lw=1.5,ls='solid',color='black')
plt.tight_layout()
plt.savefig(filebase + "ResPlot.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
os.system("rm " + imName)
image.close()
# def PlotFoldedLC(Time, TimeErr, Flux, FluxErr, tag="Flux (photon cm^{-2} s^{-1})"):
# _, tgraph, arrows = PlotLC(Time, TimeErr, Flux, FluxErr, tag)
# xmin = 0
# xmax = 1
# if max(FluxErr)==0:
# ymin = 0.
# ymax = max(Flux)*1.3
# else:
# ymin = np.min(min(Flux) - max(FluxErr) * 1.3, 0.)
# ymax = (max(Flux) + max(FluxErr)) * 1.3
# gh = ROOT.TH2F("ghflux", "", 80, xmin, xmax, 100, ymin, ymax)
# gh.SetStats(000)
# gh.SetXTitle("Orbital Phase")
# gh.SetYTitle(tag)
# return gh, tgraph, arrows
def GetDataPoints(config,pars,ignore_missing_bins=False):
"""Collect the data points/UL and generate a TGraph for the points
and a list of TArrow for the UL. All is SED format"""
#Preparation + declaration of arrays
arrows = []
NEbin = int(config['Ebin']['NumEnergyBins'])
lEmax = np.log10(float(config['energy']['emax']))
lEmin = np.log10(float(config['energy']['emin']))
Epoint = np.zeros(NEbin)
EpointErrp = np.zeros(NEbin)
EpointErrm = np.zeros(NEbin)
Fluxpoint = np.zeros(NEbin)
FluxpointErrp = np.zeros(NEbin)
FluxpointErrm = np.zeros(NEbin)
uplim = np.zeros(NEbin,dtype=int)
ener = np.logspace(lEmin, lEmax, NEbin + 1)
mes = Loggin.Message()
mes.info("Save Ebin results in ",pars.PlotName+".Ebin.dat")
dumpfile = open(pars.PlotName+".Ebin.dat",'w')
dumpfile.write("# Energy (MeV)\tEmin (MeV)\tEmax (MeV)\tE**2. dN/dE (erg.cm-2s-1)\tGaussianError\tMinosNegativeError\tMinosPositiveError\n")
from enrico.constants import EbinPath
for i in range(NEbin):#Loop over the energy bins
#E = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
filename = (config['out'] + '/'+EbinPath+str(NEbin)+'/' + config['target']['name'] +
"_" + str(i) + ".conf")
try:#read the config file of each data points
CurConf = get_config(filename)
mes.info("Reading "+filename)
results = utils.ReadResult(CurConf)
except:
if not ignore_missing_bins:
mes.warning("cannot read the Results of energy bin "+ str(i))
continue
#fill the energy arrays
#Epoint[i] = results.get("Scale")
#if Epoint[i] in [results.get("Emin"),results.get("Emax")]:
#### <---- is this a mistake?? does not make much sense to me
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
#Epoint[i] = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
EpointErrm[i] = Epoint[i] - results.get("Emin")
EpointErrp[i] = results.get("Emax") - Epoint[i]
dprefactor = 0
#Compute the flux or the UL (in SED format)
if 'Ulvalue' in results:
PrefUl = utils.Prefactor(results.get("Ulvalue"),results.get("Index"),
results.get("Emin"),results.get("Emax"),Epoint[i])
Fluxpoint[i] = MEV_TO_ERG * PrefUl * Epoint[i] ** 2
uplim[i] = 1
else : #Not an UL : compute points + errors
Fluxpoint[i] = MEV_TO_ERG * results.get("Prefactor") * Epoint[i] ** 2
dprefactor = results.get("dPrefactor")
try:
down = abs(results.get("dPrefactor-"))
up = results.get("dPrefactor+")
if down==0 or up ==0 :
mes.error("cannot get Error value")
FluxpointErrp[i] = MEV_TO_ERG * up * Epoint[i] ** 2
FluxpointErrm[i] = MEV_TO_ERG * down * Epoint[i] ** 2
except:
try:
err = MEV_TO_ERG * dprefactor * Epoint[i] ** 2
FluxpointErrp[i] = err
FluxpointErrm[i] = err
except:
pass
mes.info("Energy bins results")
print(("Energy = ",Epoint[i]))
#Save the data point in a ascii file
if 'Ulvalue' in results:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t0\t0\t0\n")
print(("E**2. dN/dE = ",Fluxpoint[i]))
else:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t"+str( MEV_TO_ERG * dprefactor * Epoint[i] ** 2)+"\t"+str(FluxpointErrm[i])+"\t"+str(FluxpointErrp[i])+"\n")
print(("E**2. dN/dE = ",Fluxpoint[i]," + ",FluxpointErrp[i]," - ",FluxpointErrm[i]))
dumpfile.close()
return Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim
def plot_errorbar_withuls(x,xerrm,xerrp,y,yerrm,yerrp,uplim,bblocks=False):
""" plot an errorbar plot with upper limits. Optionally compute and draw bayesian blocks (bblocks) """
# plt.errorbar(Epoint, Fluxpoint, xerr=[EpointErrm, EpointErrp], yerr=[FluxpointErrm, FluxpointErrp],fmt='o',color='black',ls='None',uplims=uplim)
uplim = np.asarray(uplim,dtype=bool) # It is an array of 1 and 0s, needs to be a bool array.
# make sure that the arrays are numpy arrays and not lists.
x = np.asarray(x)
xerrm = np.asarray(xerrm)
xerrp = np.asarray(xerrp)
y = np.asarray(y)
yerrm = np.asarray(yerrm)
yerrp = np.asarray(yerrp)
# Get the strict upper limit (best fit value + error, then set the error to 0 and the lower error to 20% of the value)
y[uplim] += yerrp[uplim]
yerrm[uplim] = 0
yerrp[uplim] = 0
optimal_markersize = (0.5+4./(1.+np.log10(len(y))))
optimal_errorlinewidth = (0.2+2./(1.+4.*np.log10(len(y))))
# Plot the significant points
plt.errorbar(x[~uplim], y[~uplim],
xerr=[xerrm[~uplim], xerrp[~uplim]],
yerr=[yerrm[~uplim], yerrp[~uplim]],
lw=optimal_errorlinewidth,
fmt='o',ms=optimal_markersize,capsize=0,zorder=10,
color='black',ls='None',uplims=False,label='LAT data')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
fmt='o',markersize=0,capsize=0,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=False)
plt.errorbar(x[uplim], 0.8*y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=True)
else:
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=0,zorder=-1,
color='0.50',ls='None',uplims=False)
plt.errorbar(x[uplim], y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
color='0.50',ls='None',uplims=True)
if bblocks and len(x[~uplim])>2:
yerr = 0.5*(yerrm+yerrp)
# Set the value and error for the uls.
yerr[uplim] = y[uplim] #min(y[yerr>0]+yerr[yerr>0])
y[uplim] = 0
edges = bayesian_blocks(x,y,yerr,fitness='measures',p0=0.5)
#edges = bayesian_blocks(x[yerr>0],y[yerr>0],yerr[yerr>0],fitness='measures',p0=0.1)
xvalues = 0.5*(edges[:-1]+edges[1:])
xerrors = 0.5*(edges[1:]-edges[:-1])
yvalues = []
yerrors = []
for k in range(len(edges)-1):
xmin,xmax = edges[k],edges[k+1]
filt = (x>=xmin)*(x<=xmax)*(yerr>0)
sum_inv_square = np.sum(1./yerr[filt]**2)
yvalues.append(np.sum(y[filt]/yerr[filt]**2)/sum_inv_square)
yerrors.append(1./np.sqrt(sum_inv_square))
yvalues = np.asarray(yvalues)
yerrors = np.asarray(yerrors)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(yvalues[k]) # 3 values, to mark the minimum and center
ystepmin.append(yvalues[k]-yerrors[k]) # 3 values, to mark the minimum and center
ystepmax.append(yvalues[k]+yerrors[k]) # 3 values, to mark the minimum and center
xstep.append(xvalues[k]-xerrors[k])
xstep.append(xvalues[k]+xerrors[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, yvalues,
xerr=xerrors,yerr=yerrors,
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None',label='bayesian blocks')
plt.legend(loc=0,fontsize='small',numpoints=1)
def plot_bayesianblocks(xmin, xmax, y, yerrm, yerrp, uplim):
# Set the value and error for the uls.
yerrm[uplim] = y[uplim]
yerrp[uplim] = y[uplim]
y[uplim] = 0.
xvalues = 0.5*(xmax+xmin)
xerrors = 0.5*(xmax-xmin)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(y[k]) # 3 values, to mark the minimum and center
ystepmin.append(y[k]-yerrm[k]) # 3 values, to mark the minimum and center
ystepmax.append(y[k]+yerrp[k]) # 3 values, to mark the minimum and center
xstep.append(xmin[k])
xstep.append(xmax[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, y,
xerr=xerrors,yerr=[yerrm, yerrp],
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None')
def PlotSED(config,pars,ignore_missing_bins=False):
"""plot a nice SED with a butterfly and points"""
# Read the ascii file where the butterfly is stored
filebase = utils._SpecFileName(config)
lines = open(filebase + '.dat', 'r').readlines()
SED = []
E = []
Err = []
for i in range(len(lines) - 1):
words = lines[i + 1].split()
if float(words[0])<pars.Emax :
E.append(float(words[0]))
SED.append(float(words[1]))
Err.append(float(words[2]))
ilen = len(SED)
#From dN/dE to SED
Fluxp = np.array(SED)*np.exp(np.array(Err)/np.array(SED))
Fluxm = np.array(SED)*np.exp(-np.array(Err)/np.array(SED))
ErrorFlux = np.zeros(2 * ilen + 1)
ErrorE = np.zeros(2 * ilen + 1)
#Compute the butterfly and close it
for i in range(ilen):
ErrorFlux[i] = Fluxp[i]
ErrorE[i] = E[i]
for i in range(ilen):
ErrorFlux[ilen + i] = Fluxm[ilen - i - 1]
ErrorE[ilen + i] = E[ilen - i - 1]
ErrorFlux[-1] = Fluxp[0]
ErrorE[-1] = E[0]
#Actually make the plot
plt.figure()
plt.title(pars.PlotName.split("/")[-1].replace('_','\_'))
name = pars.PlotName.split("/")[-1]
plt.loglog()
plt.xlabel(r"Energy (MeV)")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.plot(E,SED,"-r",label='LAT model')
plt.plot(ErrorE,ErrorFlux,"-r")
#Plot points
NEbin = int(config['Ebin']['NumEnergyBins'])
if NEbin > 0:
Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim = GetDataPoints(config,pars,ignore_missing_bins) #collect data points
plot_errorbar_withuls(Epoint,EpointErrm,EpointErrp,Fluxpoint,FluxpointErrm,FluxpointErrp,uplim)
#print uplim
#print FluxpointErrm
#print FluxpointErrp
#Set meaningful axes limits
xlim = plt.xlim()
ylim = plt.ylim()
xlim = (max([20,xlim[0]]),min([2e6,xlim[1]]))
ylim = (max([1e-14,ylim[0]]),min([1e-8,ylim[1]]))
plt.xlim(xlim)
plt.ylim(ylim)
# turn them into log10 scale
#xticks = plt.xticks()[0]
#xticklabels = np.array(np.log10(xticks),dtype=int)
#plt.xticks(xticks,xticklabels)
#plt.xlabel('$\mathrm{\log_{10}\mathbf{(Energy)} \\ \\ [MeV]}$')
plt.legend(fontsize='small',ncol=1,\
loc=3,numpoints=1)#,framealpha=0.75)
#Upper horizontal secondary axis with frequency
#Plt2 = plt.twiny()
#Plt2.set_xscale('log')
#Plt2.set_xlim(2.417990504024163e+20 *np.array(xlim))
#Plt2.set_xticklabels(np.array(np.log10(Plt2.get_xticks()),dtype=int))
#Plt2.set_xlabel('$\mathrm{\log_{10}\mathbf{(Frequency)} \\ \\ [Hz]}$')
#save the canvas
#plt.grid()
plt.tight_layout()
plt.savefig("%s.png" %filebase, dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inch=None, pad_inches=0.1,
frameon=None)
def PlotUL(pars,config,ULFlux,Index):
#Compute the SED
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
SED = MEV_TO_ERG * E ** 2 * (-Index+1)*ULFlux* np.power(E,-Index)/(np.power(pars.Emax,-Index+1)-np.power(pars.Emin,-Index+1))
#Actually make the plot
plt.xlabel(r"E [MeV]")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.loglog()
plt.plot(E,SED,"-",color='black')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',lolims=[1,1])
else:
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',uplims=[1,1])
#save the plot
filebase = utils._SpecFileName(config)
plt.tight_layout()
plt.savefig(filebase + '.png', dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
def plot_sed_fromconfig(config,ignore_missing_bins=False):
config = get_config(config)
utils.mkdir_p(config["out"]+"/Spectrum")
srcname = config['target']['name']
Emin = config['energy']['emin']
Emax = config['energy']['emax']
filename = utils._SpecFileName(config)
Param = Params(srcname, Emin=Emin, Emax=Emax, PlotName=filename)
Result = utils.ReadResult(config)
# if the TS > ts limit plot the butterfly, if not draw UL
if Result["TS"]> config['UpperLimit']['TSlimit'] :
PlotSED(config,Param,ignore_missing_bins)
else :
try :
PlotUL(Param,config,Result['Ulvalue'],config['UpperLimit']['SpectralIndex'])
except :
print("Not able to plot an upper limit in a SED diagram. UL computed?")
|
bsd-3-clause
| 6,686,314,298,672,276,000
| 39.803514
| 238
| 0.565086
| false
| 3.093122
| true
| false
| false
|
auth0/auth0-python
|
auth0/v3/authentication/delegated.py
|
1
|
1118
|
from .base import AuthenticationBase
class Delegated(AuthenticationBase):
"""Delegated authentication endpoints.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def get_token(self, client_id, target, api_type, grant_type,
id_token=None, refresh_token=None, scope='openid'):
"""Obtain a delegation token.
"""
if id_token and refresh_token:
raise ValueError('Only one of id_token or refresh_token '
'can be None')
data = {
'client_id': client_id,
'grant_type': grant_type,
'target': target,
'scope': scope,
'api_type': api_type,
}
if id_token:
data.update({'id_token': id_token})
elif refresh_token:
data.update({'refresh_token': refresh_token})
else:
raise ValueError('Either id_token or refresh_token must '
'have a value')
return self.post('{}://{}/delegation'.format(self.protocol, self.domain), data=data)
|
mit
| -5,919,315,672,139,504,000
| 29.216216
| 92
| 0.538462
| false
| 4.171642
| false
| false
| false
|
maas/maas
|
src/maasserver/exceptions.py
|
1
|
6877
|
# Copyright 2012-2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Exceptions."""
__all__ = [
"ClusterUnavailable",
"MAASException",
"MAASAPIBadRequest",
"MAASAPIException",
"MAASAPINotFound",
"NodeStateViolation",
"NodeGroupMisconfiguration",
"NoScriptsFound",
"IteratorReusedError",
"PowerProblem",
"StaticIPAddressExhaustion",
"StaticIPAddressTypeClash",
"UnresolvableHost",
]
import http.client
import json
from django.core.exceptions import ValidationError
from django.http import HttpResponse, HttpResponseRedirect
class MAASException(Exception):
"""Base class for MAAS' exceptions."""
class CannotDeleteUserException(Exception):
"""User can't be deleted."""
class MAASAPIException(Exception):
"""Base class for MAAS' API exceptions.
:ivar api_error: The HTTP code that should be returned when this error
is raised in the API (defaults to 500: "Internal Server Error").
"""
api_error = int(http.client.INTERNAL_SERVER_ERROR)
def make_http_response(self):
"""Create an :class:`HttpResponse` representing this exception."""
encoding = "utf-8"
return HttpResponse(
status=self.api_error,
content=str(self).encode(encoding),
content_type="text/plain; charset=%s" % encoding,
)
class MAASAPIBadRequest(MAASAPIException):
api_error = int(http.client.BAD_REQUEST)
class MAASAPINotFound(MAASAPIException):
api_error = int(http.client.NOT_FOUND)
class MAASAPIForbidden(MAASAPIException):
api_error = int(http.client.FORBIDDEN)
class MAASAPIValidationError(MAASAPIBadRequest, ValidationError):
"""A validation error raised during a MAAS API request."""
def make_http_response(self):
"""Create an :class:`HttpResponse` representing this exception."""
content_type = b"application/json"
if hasattr(self, "error_dict"):
messages = json.dumps(self.message_dict)
elif len(self.messages) == 1:
messages = self.messages[0]
content_type = b"text/plain"
else:
messages = json.dumps(self.messages)
encoding = b"utf-8"
return HttpResponse(
status=self.api_error,
content=messages,
content_type=b"%s; charset=%s" % (content_type, encoding),
)
class Unauthorized(MAASAPIException):
"""HTTP error 401: Unauthorized. Login required."""
api_error = int(http.client.UNAUTHORIZED)
class NodeStateViolation(MAASAPIException):
"""Operation on node not possible given node's current state."""
api_error = int(http.client.CONFLICT)
class NodesNotAvailable(NodeStateViolation):
"""Requested node(s) are not available to be acquired."""
api_error = int(http.client.CONFLICT)
class Redirect(MAASAPIException):
"""Redirect. The exception message is the target URL."""
api_error = int(http.client.FOUND)
def make_http_response(self):
return HttpResponseRedirect(str(self))
class NodeGroupMisconfiguration(MAASAPIException):
"""Node Groups (aka Cluster Controllers) are misconfigured.
This might mean that more than one controller is marked as managing the
same network
"""
api_error = int(http.client.CONFLICT)
class ClusterUnavailable(MAASAPIException):
"""A Cluster Controller is not available for RPC queries."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class IteratorReusedError(Exception):
"""Raise when a :class:`UseOnceIterator` gets reused."""
class StaticIPAddressExhaustion(MAASAPIException):
"""Raised when no more static IPs are available during allocation."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class IPAddressCheckFailed(MAASAPIException):
"""IP address allocation checks failed."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class StaticIPAddressUnavailable(MAASAPIException):
"""Raised when a requested IP is not available."""
api_error = int(http.client.NOT_FOUND)
class StaticIPAddressOutOfRange(MAASAPIException):
"""Raised when a requested IP is not in an acceptable range."""
api_error = int(http.client.FORBIDDEN)
class StaticIPAddressTypeClash(MAASAPIException):
"""Raised when trying to allocate an IP for a MAC where one of another
type already exists."""
api_error = int(http.client.CONFLICT)
class StaticIPAlreadyExistsForMACAddress(MAASAPIException):
"""Raised when trying to allocate a static IP for a non-node MAC
where a node with that MAC already exists."""
api_error = int(http.client.CONFLICT)
class StaticIPAddressConflict(MAASAPIException):
"""Raised when trying to allocate a static IP that doesn't belong to
the network the MAC address is connected to."""
api_error = int(http.client.CONFLICT)
class StaticIPAddressForbidden(MAASAPIException):
"""Raised when trying to allocate a static IP that belongs to a
dynamic range."""
api_error = int(http.client.CONFLICT)
class NodeActionError(MAASException):
"""Raised when there is an error performing a NodeAction."""
def __init__(self, error):
# Avoid circular imports.
from maasserver.clusterrpc.utils import get_error_message_for_exception
if isinstance(error, Exception):
super().__init__(get_error_message_for_exception(error))
else:
super().__init__(error)
class UnresolvableHost(MAASException):
"""Raised when a hostname can't be resolved to an IP address."""
class MissingBootImage(MAASException):
"""Raised when a boot image is expected to exists."""
class PreseedError(MAASException):
"""Raised when issue generating the preseed."""
class PowerProblem(MAASAPIException):
"""Raised when there's a problem with a power operation.
This could be a problem with parameters, a problem with the power
controller, or something else. The exception text will contain more
information.
"""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class PodProblem(MAASAPIException):
"""Raised when there's a problem with a pod operation.
This could be a problem with parameters, a problem with the pod's
controller, or something else. The exception text will contain more
information.
"""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class NoScriptsFound(MAASException):
"""Raised when no Scripts are found based on user input."""
class StorageClearProblem(MAASAPIException):
"""Raised when an issue occurs that prevents the clearing of a machine's
storage configuration."""
class NetworkingResetProblem(MAASException):
"""Raised when an issue occurs that prevents resetting networking configuration."""
|
agpl-3.0
| -2,348,278,307,039,550,500
| 26.842105
| 87
| 0.703359
| false
| 4.140277
| false
| false
| false
|
mscook/pyParaTools
|
ParaUtils.py
|
1
|
8999
|
"""Utility methods for paramagnetic observables """
import math
from numpy import *
def ZXZRot(A, B, G, scal=1.0):
"""
Builds the ZXZ rotation matrix given 3 Euler Angles. See:
http://mathworld.wolfram.com/EulerAngles.html
@param A : The (A)lpha angle
@type A : float
@param B : The (B)eta angle
@type B : float
@param G : The (G)amma angle
@type G : float
@param val: (OPTIONAL) Such that we can scale the rotation matix
is for the X-tensor frame determination
@type val : float
"""
rot = zeros((3,3))
ca = math.cos(math.radians(A))
cb = math.cos(math.radians(B))
cg = math.cos(math.radians(G))
sa = math.sin(math.radians(A))
sb = math.sin(math.radians(B))
sg = math.sin(math.radians(G))
rot[0][0] = (( cg * ca) - (cb * sa * sg))*scal
rot[0][1] = (( cg * sa) + (cb * ca * sg))*scal
rot[0][2] = (( sg * sb))*scal
rot[1][0] = ((-sg * ca) - (cb * sa * cg))*scal
rot[1][1] = ((-sg * sa) + (cb * ca * cg))*scal
rot[1][2] = (( cg * sb))*scal
rot[2][0] = (( sb * sa))*scal
rot[2][1] = ((-sb * ca))*scal
rot[2][2] = (cb)*scal
return rot
def ZYZRot(A, B, G, scal=1.0):
"""
.Builds the ZYZ rotation matrix given 3 Euler Angles. See:
http://mathworld.wolfram.com/EulerAngles.html
@param A: The (A)lpha angle
@type A : float
@param B: The (B)eta angle
@type B : float
@param G: The (G)amma angle
@type G : float
@param val: (OPTIONAL) Such that we can scale the rotation matix
is for the X-tensor frame determination
@type val : float
"""
rot = zeros((3,3))
ca = math.cos(math.radians(A))
cb = math.cos(math.radians(B))
cg = math.cos(math.radians(G))
sa = math.sin(math.radians(A))
sb = math.sin(math.radians(B))
sg = math.sin(math.radians(G))
rot[0][0] = ((-sg * sa) + (cb * ca * cg))*scal
rot[0][1] = (( sg * ca) + (cb * sa * cg))*scal
rot[0][2] = (( -cg * sb))*scal
rot[1][0] = ((-cg * sa) - (cb * ca * sg))*scal
rot[1][1] = ((cg * ca) - (cb * sa * sg))*scal
rot[1][2] = (( sg * sb))*scal
rot[2][0] = (( sb * ca))*scal
rot[2][1] = ((sb * sa))*scal
rot[2][2] = (cb)*scal
return rot
def RotX90():
"""
.Builds the rotation matrix for 90 deg rotation about X
1, 0, 0, 0, 0, 1, 0, -1, 0
"""
rot = zeros((3,3))
rot[0][0] = 1.0
rot[1][2] = 1.0
rot[2][1] = -1.0
return rot
def RotY90():
"""
.Builds the rotation matrix for 90 deg rotation about Y
0, 0, -1, 0, 1, 0, 1, 0, 0
"""
rot = zeros((3,3))
rot[0][2] = -1.0
rot[1][1] = 1.0
rot[2][0] = 1.0
return rot
def RotZ90():
"""
.Builds the rotation matrix for 90 deg rotation about Z
0, 1, 0, -1, 0, 0, 0, 0, 1
"""
rot = zeros((3,3))
rot[0][1] = 1.0
rot[1][0] = -1.0
rot[2][2] = 1.0
return rot
def correctRofAngles(cosv, sinv):
#TODO: Check that this is correct
if (cosv <= math.pi/2.0):
if (sinv < 0.0):
sinv = sinv + 2*math.pi
return sinv
else:
return sinv
else:
if(sinv > 0.0):
return cosv
else:
return -1*(cosv) +2*math.pi
def ABGFromRotMatrixZYZ(rotMat):
#TODO: Check these are correct!
#TODO: Add the corresponding ZXZ method
b_c = math.acos(rotMat[2,2])
a_c = math.acos(rotMat[2,0]/math.sin(b_c))
g_c = math.acos(-1*rotMat[0,2]/math.sin(b_c))
a_s = math.asin(rotMat[2,1]/math.sin(b_c))
g_s = math.asin(rotMat[1,2]/math.sin(b_c))
aE = correctRofAngles(a_c, a_s)
bE = b_c
gE = correctRofAngles(g_c, g_s)
return aE, bE, gE
def FromVVU(AxorRh):
"""
Convert from van Vleck Units (vvu = m3/3.77 10-35)
@param AxorRh: Axial or Rhombic component
@type AxorRh : float
"""
return AxorRh/(1./((12*math.pi))*10000)
def ToVVU(AxorRh):
"""
Convert to van Vleck Units (vvu = m3/3.77 10-35)
@param AxorRh: Axial or Rhombic component
@type AxorRh : float
"""
return AxorRh*(1./((12*math.pi))*10000)
def FixAngle(angle):
"""
To fix up the angles after optimization as they are not [0:2pi] bound
@param angle: An Euler angle determined from the optimization
@type angle: float
"""
while angle > 0.0:
angle = angle - 360.0
while angle < 0.0:
angle = angle + 360.0
return angle
def SwapVals(val1, val2):
temp = 0.0
temp = val1
val1 = val2
val2 = temp
return val1, val2
def lookupMGR(spin_type):
"""
Return the gyromagnetic ratios for the coupling.
See: http://nmrwiki.org/wiki/index.php?title=Gyromagnetic_ratio
"""
#TODO: These need to be checked
PI2 = 2*math.pi
H1mgr = (PI2*42.576)*1e6
C13mgr = (PI2*10.705)*1e6
Nmgr = []
N14mgr = (PI2*3.0766)*1e6
N15mgr = (PI2*-4.315)*1e6
Nmgr.append(N14mgr)
Nmgr.append(N15mgr)
O17mgr = (PI2*-5.7716)*1e6
mgr = {'H':H1mgr, 'C':C13mgr, 'N':Nmgr, 'O':O17mgr}
return mgr[spin_type]
def rdcScal(S, g1, g2, B0, temp):
"""
Scaling constant.for RDC calculations
"""
#TODO: These need to be checked
hbar = 1.05457148e-34
kboltz = 1.3806503e-23
scal = -S*g1*g2*hbar*B0*B0 / (8*15*math.pi*math.pi*kboltz*temp)
return scal*0.01
def FitSummary(soln,cov,info,mesg,success, p0, y_meas, tof):
scal = 1.0
if tof == 2 or tof == 3:
#The effective strength of the X-tensor is 1/2ved in monomer fits
scal = 2.0
f_type = { \
0:'Standard X-tensor optimization', \
1:'Standard X-tensor optimization (fixed metal position)', \
2:'X-tensor optimization to dimer', \
3:'X-tensor optimization to dimer (fixed metal position)', \
4:'2 X-tensors to a monomer', \
5:'2 X-tensors (1 fixed metal site) to a monomer', \
6:'2 X-tensors (2 fixed metal sites) to a monomer' }
print 80*'-'
print "Fitting Results: ", f_type[tof]
print 80*'-'
if success==1:
print "We have converged to a minima"
else:
print "We have failed to converge"
print "REASON:", mesg
# calculate final chi square
chisq=sum(info["fvec"]*info["fvec"])
dof=len(y_meas)-len(p0)
# chisq, sqrt(chisq/dof) agrees with gnuplot
print "* Converged with chi squared: ",chisq
print "* Degrees of freedom, dof: ", dof
print "* RMS of residuals (i.e. sqrt(chisq/dof)): ", sqrt(chisq/dof)
print "* Reduced chisq (i.e. variance of residuals): ", chisq/dof
print
# uncertainties are calculated as per gnuplot, "fixing" the result
# for non unit values of the reduced chisq.
# values at min match gnuplot
print "Fitted parameters at minimum, with 68% C.I.:"
print "%s%7s%11s%13s" % ("Param", "Init", "Final", "Error")
#NOTE: The confidence intervals may not be correct due to conversion to VVU etc.
if tof == 0 or tof == 2 or tof ==4:
for i,pmin in enumerate(soln):
if i == 3 or i == 4 or i == 11 or i == 12:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 5 or i == 6 or i ==7 or i == 13 or i == 14 or i == 15:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
if tof == 1 or tof == 3 or tof == 5:
for i,pmin in enumerate(soln):
if i == 0 or i == 1 or i == 8 or i == 9:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 2 or i == 3 or i ==4 or i == 10 or i == 11 or i == 12:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
if tof == 6:
for i,pmin in enumerate(soln):
if i == 0 or i == 1 or i == 5 or i == 6:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 2 or i == 3 or i == 4 or i == 7 or i == 8 or i == 9:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
print 80*'-'
print
return chisq/dof
|
apache-2.0
| -5,894,408,450,203,394,000
| 32.206642
| 137
| 0.536615
| false
| 2.737755
| false
| false
| false
|
nathanielvarona/airflow
|
airflow/utils/log/json_formatter.py
|
1
|
2206
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""json_formatter module stores all related to ElasticSearch specific logger classes"""
import json
import logging
from airflow.utils.helpers import merge_dicts
class JSONFormatter(logging.Formatter):
"""JSONFormatter instances are used to convert a log record to json."""
# pylint: disable=too-many-arguments
def __init__(self, fmt=None, datefmt=None, style='%', json_fields=None, extras=None):
super().__init__(fmt, datefmt, style)
if extras is None:
extras = {}
if json_fields is None:
json_fields = []
self.json_fields = json_fields
self.extras = extras
def usesTime(self):
return self.json_fields.count('asctime') > 0
def format(self, record):
super().format(record)
record_dict = {label: getattr(record, label, None) for label in self.json_fields}
if "message" in self.json_fields:
msg = record_dict["message"]
if record.exc_text:
if msg[-1:] != "\n":
msg = msg + "\n"
msg = msg + record.exc_text
if record.stack_info:
if msg[-1:] != "\n":
msg = msg + "\n"
msg = msg + self.formatStack(record.stack_info)
record_dict["message"] = msg
merged_record = merge_dicts(record_dict, self.extras)
return json.dumps(merged_record)
|
apache-2.0
| 8,733,983,074,481,377,000
| 37.034483
| 89
| 0.644152
| false
| 4.146617
| false
| false
| false
|
pgroudas/pants
|
src/python/pants/option/option_value_container.py
|
1
|
4590
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.option.ranked_value import RankedValue
class OptionValueContainer(object):
"""A container for option values.
Implements the following functionality:
1) Attribute forwarding.
An attribute can be registered as forwarding to another attribute, and attempts
to read the source attribute's value will be read from the target attribute.
This is necessary so we can qualify registered options by the scope that registered them,
to allow re-registration in inner scopes. This is best explained by example:
Say that in global scope we register an option with two names: [-f, --foo], which writes its
value to the attribute foo. Then in the compile scope we re-register --foo but leave -f alone.
The re-registered --foo will also write to attribute foo. So now -f, which in the compile
scope is unrelated to --foo, can still stomp on its value.
With attribute forwarding we can have the global scope option write to _DEFAULT_foo__, and
the re-registered option to _COMPILE_foo__, and then have the 'f' and 'foo' attributes
forward, appropriately.
Note that only reads are forwarded. The target of the forward must be written to directly.
If the source attribute is set directly, this overrides any forwarding.
2) Value ranking.
Attribute values can be ranked, so that a given attribute's value can only be changed if
the new value has at least as high a rank as the old value. This allows an option value in
an outer scope to override that option's value in an inner scope, when the outer scope's
value comes from a higher ranked source (e.g., the outer value comes from an env var and
the inner one from config).
See ranked_value.py for more details.
Note that this container is suitable for passing as the namespace argument to argparse's
parse_args() method.
"""
def __init__(self):
self._forwardings = {} # src attribute name -> target attribute name.
def add_forwardings(self, forwardings):
"""Add attribute forwardings.
Will overwrite existing forwardings with the same source attributes.
:param forwardings: A map of source attribute name -> attribute to read source's value from.
"""
self._forwardings.update(forwardings)
def update(self, attrs):
"""Set attr values on this object from the data in the attrs dict."""
for k, v in attrs.items():
setattr(self, k, v)
def get(self, key, default=None):
# Support dict-like dynamic access. See also __getitem__ below.
if hasattr(self, key):
return getattr(self, key)
else:
return default
def __setattr__(self, key, value):
if key == '_forwardings':
return super(OptionValueContainer, self).__setattr__(key, value)
if hasattr(self, key):
existing_value = getattr(self, key)
if isinstance(existing_value, RankedValue):
existing_rank = existing_value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
existing_rank = RankedValue.FLAG
else:
existing_rank = RankedValue.NONE
if isinstance(value, RankedValue):
new_rank = value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
new_rank = RankedValue.FLAG
if new_rank >= existing_rank:
# We set values from outer scopes before values from inner scopes, so
# in case of equal rank we overwrite. That way that the inner scope value wins.
super(OptionValueContainer, self).__setattr__(key, value)
def __getitem__(self, key):
# Support natural dynamic access, options[key_var] is more idiomatic than
# getattr(option, key_var).
return getattr(self, key)
def __getattr__(self, key):
# Note: Called only if regular attribute lookup fails, so accesses
# to non-forwarded attributes will be handled the normal way.
if key == '_forwardings':
# In case we get called in copy/deepcopy, which don't invoke the ctor.
raise AttributeError
if key not in self._forwardings:
raise AttributeError('No such forwarded attribute: {}'.format(key))
val = getattr(self, self._forwardings[key])
if isinstance(val, RankedValue):
return val.value
else:
return val
|
apache-2.0
| 3,330,725,136,526,867,000
| 38.568966
| 99
| 0.698475
| false
| 4.309859
| false
| false
| false
|
rdo-management/ironic-discoverd
|
ironic_discoverd_ramdisk/discover.py
|
1
|
8663
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import os
import subprocess
import tarfile
import tempfile
import netifaces
import requests
LOG = logging.getLogger('ironic-discoverd-ramdisk')
def try_call(*cmd, **kwargs):
strip = kwargs.pop('strip', True)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
try:
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
except EnvironmentError as exc:
LOG.warn('command %s failed: %s', cmd, exc)
return
if p.returncode:
LOG.warn('command %s returned failure status %d:\n%s', cmd,
p.returncode, err.strip())
else:
return out.strip() if strip else out
def try_shell(sh, **kwargs):
strip = kwargs.pop('strip', True)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
kwargs['shell'] = True
p = subprocess.Popen([sh], **kwargs)
out, err = p.communicate()
if p.returncode:
LOG.warn('shell script "%s" failed with code %d:\n%s', sh,
p.returncode, err.strip())
else:
return out.strip() if strip else out
class AccumulatedFailure(object):
"""Object accumulated failures without raising exception."""
def __init__(self):
self._failures = []
def add(self, fail, *fmt):
"""Add failure with optional formatting."""
if fmt:
fail = fail % fmt
LOG.error('%s', fail)
self._failures.append(fail)
def get_error(self):
"""Get error string or None."""
if not self._failures:
return
msg = ('The following errors were encountered during '
'hardware discovery:\n%s'
% '\n'.join('* %s' % item for item in self._failures))
return msg
def __nonzero__(self):
return bool(self._failures)
__bool__ = __nonzero__
def __repr__(self): # pragma: no cover
# This is for tests
if self:
return '<%s: %s>' % (self.__class__.__name__,
', '.join(self._failures))
else:
return '<%s: success>' % self.__class__.__name__
def discover_basic_properties(data, args):
# These properties might not be present, we don't count it as failure
data['boot_interface'] = args.bootif
data['ipmi_address'] = try_shell(
"ipmitool lan print | grep -e 'IP Address [^S]' | awk '{ print $4 }'")
LOG.info('BMC IP address: %s', data['ipmi_address'])
def discover_network_interfaces(data, failures):
data.setdefault('interfaces', {})
for iface in netifaces.interfaces():
if iface.startswith('lo'):
LOG.info('ignoring local network interface %s', iface)
continue
LOG.debug('found network interface %s', iface)
addrs = netifaces.ifaddresses(iface)
try:
mac = addrs[netifaces.AF_LINK][0]['addr']
except (KeyError, IndexError):
LOG.info('no link information for interface %s in %s',
iface, addrs)
continue
try:
ip = addrs[netifaces.AF_INET][0]['addr']
except (KeyError, IndexError):
LOG.info('no IP address for interface %s', iface)
ip = None
data['interfaces'][iface] = {'mac': mac, 'ip': ip}
if data['interfaces']:
LOG.info('network interfaces: %s', data['interfaces'])
else:
failures.add('no network interfaces found')
def discover_scheduling_properties(data, failures):
scripts = [
('cpus', "grep processor /proc/cpuinfo | wc -l"),
('cpu_arch', "lscpu | grep Architecture | awk '{ print $2 }'"),
('local_gb', "fdisk -l | grep Disk | awk '{print $5}' | head -n 1"),
]
for key, script in scripts:
data[key] = try_shell(script)
LOG.info('value for "%s" field is %s', key, data[key])
ram_info = try_shell(
"dmidecode --type memory | grep Size | awk '{ print $2; }'")
if ram_info:
total_ram = 0
for ram_record in ram_info.split('\n'):
try:
total_ram += int(ram_record)
except ValueError:
pass
data['memory_mb'] = total_ram
LOG.info('total RAM: %s MiB', total_ram)
else:
failures.add('failed to get RAM information')
for key in ('cpus', 'local_gb', 'memory_mb'):
try:
data[key] = int(data[key])
except (KeyError, ValueError, TypeError):
LOG.warn('value for %s is missing or malformed: %s',
key, data.get(key))
data[key] = None
# FIXME(dtantsur): -1 is required to give Ironic some spacing for
# partitioning and may be removed later
if data['local_gb']:
data['local_gb'] = data['local_gb'] / 1024 / 1024 / 1024 - 1
if data['local_gb'] < 1:
LOG.warn('local_gb is less than 1 GiB')
data['local_gb'] = None
def discover_additional_properties(args, data, failures):
hw_args = ('--benchmark', 'cpu', 'disk', 'mem') if args.benchmark else ()
hw_json = try_call('hardware-detect', *hw_args)
if hw_json:
try:
data['data'] = json.loads(hw_json)
except ValueError:
LOG.error('JSON value returned from hardware-detect cannot be '
'decoded:\n%s', hw_json)
failures.add('unable to get extended hardware properties')
else:
failures.add('unable to get extended hardware properties')
def discover_block_devices(data):
block_devices = try_shell(
"lsblk -no TYPE,SERIAL | grep disk | awk '{print $2}'")
if not block_devices:
LOG.warn('unable to get block devices')
return
serials = [item for item in block_devices.split('\n') if item.strip()]
data['block_devices'] = {'serials': serials}
def discover_hardware(args, data, failures):
try_call('modprobe', 'ipmi_msghandler')
try_call('modprobe', 'ipmi_devintf')
try_call('modprobe', 'ipmi_si')
discover_basic_properties(data, args)
discover_network_interfaces(data, failures)
discover_scheduling_properties(data, failures)
if args.use_hardware_detect:
discover_additional_properties(args, data, failures)
discover_block_devices(data)
def call_discoverd(args, data, failures):
data['error'] = failures.get_error()
LOG.info('posting collected data to %s', args.callback_url)
resp = requests.post(args.callback_url, data=json.dumps(data))
if resp.status_code >= 400:
LOG.error('discoverd error %d: %s',
resp.status_code,
resp.content.decode('utf-8'))
resp.raise_for_status()
return resp.json()
def collect_logs(args):
files = {args.log_file} | set(args.system_log_file or ())
with tempfile.TemporaryFile() as fp:
with tarfile.open(fileobj=fp, mode='w:gz') as tar:
with tempfile.NamedTemporaryFile() as jrnl_fp:
if try_shell("journalctl > '%s'" % jrnl_fp.name) is not None:
tar.add(jrnl_fp.name, arcname='journal')
else:
LOG.warn('failed to get system journal')
for fname in files:
if os.path.exists(fname):
tar.add(fname)
else:
LOG.warn('log file %s does not exist', fname)
fp.seek(0)
return base64.b64encode(fp.read())
def setup_ipmi_credentials(resp):
user, password = resp['ipmi_username'], resp['ipmi_password']
if try_call('ipmitool', 'user', 'set', 'name', '2', user) is None:
raise RuntimeError('failed to set IPMI user name to %s', user)
if try_call('ipmitool', 'user', 'set', 'password', '2', password) is None:
raise RuntimeError('failed to set IPMI password')
try_call('ipmitool', 'user', 'enable', '2')
try_call('ipmitool', 'channel', 'setaccess', '1', '2',
'link=on', 'ipmi=on', 'callin=on', 'privilege=4')
def fork_and_serve_logs(args):
pass # TODO(dtantsur): implement
|
apache-2.0
| -1,589,015,766,572,319,500
| 32.191571
| 78
| 0.587441
| false
| 3.796231
| false
| false
| false
|
0x90/pyroute2
|
pyroute2/netlink/nl80211/__init__.py
|
2
|
26383
|
'''
NL80211 module
================
TODO
'''
from pyroute2.common import map_namespace
from pyroute2.netlink import genlmsg
from pyroute2.netlink.generic import GenericNetlinkSocket
from pyroute2.netlink.nlsocket import Marshal
from pyroute2.netlink import nla
from pyroute2.netlink import nla_base
# import pdb
import struct
from pyroute2.common import hexdump
# nl80211 commands
NL80211_CMD_UNSPEC = 0
NL80211_CMD_GET_WIPHY = 1
NL80211_CMD_SET_WIPHY = 2
NL80211_CMD_NEW_WIPHY = 3
NL80211_CMD_DEL_WIPHY = 4
NL80211_CMD_GET_INTERFACE = 5
NL80211_CMD_SET_INTERFACE = 6
NL80211_CMD_NEW_INTERFACE = 7
NL80211_CMD_DEL_INTERFACE = 8
NL80211_CMD_GET_KEY = 9
NL80211_CMD_SET_KEY = 10
NL80211_CMD_NEW_KEY = 11
NL80211_CMD_DEL_KEY = 12
NL80211_CMD_GET_BEACON = 13
NL80211_CMD_SET_BEACON = 14
NL80211_CMD_START_AP = 15
NL80211_CMD_NEW_BEACON = NL80211_CMD_START_AP
NL80211_CMD_STOP_AP = 16
NL80211_CMD_DEL_BEACON = NL80211_CMD_STOP_AP
NL80211_CMD_GET_STATION = 17
NL80211_CMD_SET_STATION = 18
NL80211_CMD_NEW_STATION = 19
NL80211_CMD_DEL_STATION = 20
NL80211_CMD_GET_MPATH = 21
NL80211_CMD_SET_MPATH = 22
NL80211_CMD_NEW_MPATH = 23
NL80211_CMD_DEL_MPATH = 24
NL80211_CMD_SET_BSS = 25
NL80211_CMD_SET_REG = 26
NL80211_CMD_REQ_SET_REG = 27
NL80211_CMD_GET_MESH_CONFIG = 28
NL80211_CMD_SET_MESH_CONFIG = 29
NL80211_CMD_SET_MGMT_EXTRA_IE = 30
NL80211_CMD_GET_REG = 31
NL80211_CMD_GET_SCAN = 32
NL80211_CMD_TRIGGER_SCAN = 33
NL80211_CMD_NEW_SCAN_RESULTS = 34
NL80211_CMD_SCAN_ABORTED = 35
NL80211_CMD_REG_CHANGE = 36
NL80211_CMD_AUTHENTICATE = 37
NL80211_CMD_ASSOCIATE = 38
NL80211_CMD_DEAUTHENTICATE = 39
NL80211_CMD_DISASSOCIATE = 40
NL80211_CMD_MICHAEL_MIC_FAILURE = 41
NL80211_CMD_REG_BEACON_HINT = 42
NL80211_CMD_JOIN_IBSS = 43
NL80211_CMD_LEAVE_IBSS = 44
NL80211_CMD_TESTMODE = 45
NL80211_CMD_CONNECT = 46
NL80211_CMD_ROAM = 47
NL80211_CMD_DISCONNECT = 48
NL80211_CMD_SET_WIPHY_NETNS = 49
NL80211_CMD_GET_SURVEY = 50
NL80211_CMD_NEW_SURVEY_RESULTS = 51
NL80211_CMD_SET_PMKSA = 52
NL80211_CMD_DEL_PMKSA = 53
NL80211_CMD_FLUSH_PMKSA = 54
NL80211_CMD_REMAIN_ON_CHANNEL = 55
NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 56
NL80211_CMD_SET_TX_BITRATE_MASK = 57
NL80211_CMD_REGISTER_FRAME = 58
NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME
NL80211_CMD_FRAME = 59
NL80211_CMD_ACTION = NL80211_CMD_FRAME
NL80211_CMD_FRAME_TX_STATUS = 60
NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS
NL80211_CMD_SET_POWER_SAVE = 61
NL80211_CMD_GET_POWER_SAVE = 62
NL80211_CMD_SET_CQM = 63
NL80211_CMD_NOTIFY_CQM = 64
NL80211_CMD_SET_CHANNEL = 65
NL80211_CMD_SET_WDS_PEER = 66
NL80211_CMD_FRAME_WAIT_CANCEL = 67
NL80211_CMD_JOIN_MESH = 68
NL80211_CMD_LEAVE_MESH = 69
NL80211_CMD_UNPROT_DEAUTHENTICATE = 70
NL80211_CMD_UNPROT_DISASSOCIATE = 71
NL80211_CMD_NEW_PEER_CANDIDATE = 72
NL80211_CMD_GET_WOWLAN = 73
NL80211_CMD_SET_WOWLAN = 74
NL80211_CMD_START_SCHED_SCAN = 75
NL80211_CMD_STOP_SCHED_SCAN = 76
NL80211_CMD_SCHED_SCAN_RESULTS = 77
NL80211_CMD_SCHED_SCAN_STOPPED = 78
NL80211_CMD_SET_REKEY_OFFLOAD = 79
NL80211_CMD_PMKSA_CANDIDATE = 80
NL80211_CMD_TDLS_OPER = 81
NL80211_CMD_TDLS_MGMT = 82
NL80211_CMD_UNEXPECTED_FRAME = 83
NL80211_CMD_PROBE_CLIENT = 84
NL80211_CMD_REGISTER_BEACONS = 85
NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 86
NL80211_CMD_SET_NOACK_MAP = 87
NL80211_CMD_CH_SWITCH_NOTIFY = 88
NL80211_CMD_START_P2P_DEVICE = 89
NL80211_CMD_STOP_P2P_DEVICE = 90
NL80211_CMD_CONN_FAILED = 91
NL80211_CMD_SET_MCAST_RATE = 92
NL80211_CMD_SET_MAC_ACL = 93
NL80211_CMD_RADAR_DETECT = 94
NL80211_CMD_GET_PROTOCOL_FEATURES = 95
NL80211_CMD_UPDATE_FT_IES = 96
NL80211_CMD_FT_EVENT = 97
NL80211_CMD_CRIT_PROTOCOL_START = 98
NL80211_CMD_CRIT_PROTOCOL_STOP = 99
NL80211_CMD_GET_COALESCE = 100
NL80211_CMD_SET_COALESCE = 101
NL80211_CMD_CHANNEL_SWITCH = 102
NL80211_CMD_VENDOR = 103
NL80211_CMD_SET_QOS_MAP = 104
NL80211_CMD_ADD_TX_TS = 105
NL80211_CMD_DEL_TX_TS = 106
NL80211_CMD_GET_MPP = 107
NL80211_CMD_JOIN_OCB = 108
NL80211_CMD_LEAVE_OCB = 109
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 110
NL80211_CMD_TDLS_CHANNEL_SWITCH = 111
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 112
NL80211_CMD_WIPHY_REG_CHANGE = 113
NL80211_CMD_MAX = NL80211_CMD_WIPHY_REG_CHANGE
(NL80211_NAMES, NL80211_VALUES) = map_namespace('NL80211_CMD_', globals())
NL80211_BSS_ELEMENTS_SSID = 0
NL80211_BSS_ELEMENTS_SUPPORTED_RATES = 1
NL80211_BSS_ELEMENTS_CHANNEL = 3
NL80211_BSS_ELEMENTS_VENDOR = 221
BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127
BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126
class nl80211cmd(genlmsg):
nla_map = (('NL80211_ATTR_UNSPEC', 'none'),
('NL80211_ATTR_WIPHY', 'hex'),
('NL80211_ATTR_WIPHY_NAME', 'asciiz'),
('NL80211_ATTR_IFINDEX', 'uint32'),
('NL80211_ATTR_IFNAME', 'asciiz'),
('NL80211_ATTR_IFTYPE', 'hex'),
('NL80211_ATTR_MAC', 'l2addr'),
('NL80211_ATTR_KEY_DATA', 'hex'),
('NL80211_ATTR_KEY_IDX', 'hex'),
('NL80211_ATTR_KEY_CIPHER', 'uint32'),
('NL80211_ATTR_KEY_SEQ', 'hex'),
('NL80211_ATTR_KEY_DEFAULT', 'hex'),
('NL80211_ATTR_BEACON_INTERVAL', 'hex'),
('NL80211_ATTR_DTIM_PERIOD', 'hex'),
('NL80211_ATTR_BEACON_HEAD', 'hex'),
('NL80211_ATTR_BEACON_TAIL', 'hex'),
('NL80211_ATTR_STA_AID', 'hex'),
('NL80211_ATTR_STA_FLAGS', 'hex'),
('NL80211_ATTR_STA_LISTEN_INTERVAL', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_RATES', 'hex'),
('NL80211_ATTR_STA_VLAN', 'hex'),
('NL80211_ATTR_STA_INFO', 'hex'),
('NL80211_ATTR_WIPHY_BANDS', 'hex'),
('NL80211_ATTR_MNTR_FLAGS', 'hex'),
('NL80211_ATTR_MESH_ID', 'hex'),
('NL80211_ATTR_STA_PLINK_ACTION', 'hex'),
('NL80211_ATTR_MPATH_NEXT_HOP', 'hex'),
('NL80211_ATTR_MPATH_INFO', 'hex'),
('NL80211_ATTR_BSS_CTS_PROT', 'hex'),
('NL80211_ATTR_BSS_SHORT_PREAMBLE', 'hex'),
('NL80211_ATTR_BSS_SHORT_SLOT_TIME', 'hex'),
('NL80211_ATTR_HT_CAPABILITY', 'hex'),
('NL80211_ATTR_SUPPORTED_IFTYPES', 'hex'),
('NL80211_ATTR_REG_ALPHA2', 'hex'),
('NL80211_ATTR_REG_RULES', 'hex'),
('NL80211_ATTR_MESH_CONFIG', 'hex'),
('NL80211_ATTR_BSS_BASIC_RATES', 'hex'),
('NL80211_ATTR_WIPHY_TXQ_PARAMS', 'hex'),
('NL80211_ATTR_WIPHY_FREQ', 'hex'),
('NL80211_ATTR_WIPHY_CHANNEL_TYPE', 'hex'),
('NL80211_ATTR_KEY_DEFAULT_MGMT', 'hex'),
('NL80211_ATTR_MGMT_SUBTYPE', 'hex'),
('NL80211_ATTR_IE', 'hex'),
('NL80211_ATTR_MAX_NUM_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_SCAN_FREQUENCIES', 'hex'),
('NL80211_ATTR_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_GENERATION', 'hex'),
('NL80211_ATTR_BSS', 'bss'),
('NL80211_ATTR_REG_INITIATOR', 'hex'),
('NL80211_ATTR_REG_TYPE', 'hex'),
('NL80211_ATTR_SUPPORTED_COMMANDS', 'hex'),
('NL80211_ATTR_FRAME', 'hex'),
('NL80211_ATTR_SSID', 'hex'),
('NL80211_ATTR_AUTH_TYPE', 'hex'),
('NL80211_ATTR_REASON_CODE', 'hex'),
('NL80211_ATTR_KEY_TYPE', 'hex'),
('NL80211_ATTR_MAX_SCAN_IE_LEN', 'hex'),
('NL80211_ATTR_CIPHER_SUITES', 'hex'),
('NL80211_ATTR_FREQ_BEFORE', 'hex'),
('NL80211_ATTR_FREQ_AFTER', 'hex'),
('NL80211_ATTR_FREQ_FIXED', 'hex'),
('NL80211_ATTR_WIPHY_RETRY_SHORT', 'hex'),
('NL80211_ATTR_WIPHY_RETRY_LONG', 'hex'),
('NL80211_ATTR_WIPHY_FRAG_THRESHOLD', 'hex'),
('NL80211_ATTR_WIPHY_RTS_THRESHOLD', 'hex'),
('NL80211_ATTR_TIMED_OUT', 'hex'),
('NL80211_ATTR_USE_MFP', 'hex'),
('NL80211_ATTR_STA_FLAGS2', 'hex'),
('NL80211_ATTR_CONTROL_PORT', 'hex'),
('NL80211_ATTR_TESTDATA', 'hex'),
('NL80211_ATTR_PRIVACY', 'hex'),
('NL80211_ATTR_DISCONNECTED_BY_AP', 'hex'),
('NL80211_ATTR_STATUS_CODE', 'hex'),
('NL80211_ATTR_CIPHER_SUITES_PAIRWISE', 'hex'),
('NL80211_ATTR_CIPHER_SUITE_GROUP', 'hex'),
('NL80211_ATTR_WPA_VERSIONS', 'hex'),
('NL80211_ATTR_AKM_SUITES', 'hex'),
('NL80211_ATTR_REQ_IE', 'hex'),
('NL80211_ATTR_RESP_IE', 'hex'),
('NL80211_ATTR_PREV_BSSID', 'hex'),
('NL80211_ATTR_KEY', 'hex'),
('NL80211_ATTR_KEYS', 'hex'),
('NL80211_ATTR_PID', 'hex'),
('NL80211_ATTR_4ADDR', 'hex'),
('NL80211_ATTR_SURVEY_INFO', 'hex'),
('NL80211_ATTR_PMKID', 'hex'),
('NL80211_ATTR_MAX_NUM_PMKIDS', 'hex'),
('NL80211_ATTR_DURATION', 'hex'),
('NL80211_ATTR_COOKIE', 'hex'),
('NL80211_ATTR_WIPHY_COVERAGE_CLASS', 'hex'),
('NL80211_ATTR_TX_RATES', 'hex'),
('NL80211_ATTR_FRAME_MATCH', 'hex'),
('NL80211_ATTR_ACK', 'hex'),
('NL80211_ATTR_PS_STATE', 'hex'),
('NL80211_ATTR_CQM', 'hex'),
('NL80211_ATTR_LOCAL_STATE_CHANGE', 'hex'),
('NL80211_ATTR_AP_ISOLATE', 'hex'),
('NL80211_ATTR_WIPHY_TX_POWER_SETTING', 'hex'),
('NL80211_ATTR_WIPHY_TX_POWER_LEVEL', 'hex'),
('NL80211_ATTR_TX_FRAME_TYPES', 'hex'),
('NL80211_ATTR_RX_FRAME_TYPES', 'hex'),
('NL80211_ATTR_FRAME_TYPE', 'hex'),
('NL80211_ATTR_CONTROL_PORT_ETHERTYPE', 'hex'),
('NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT', 'hex'),
('NL80211_ATTR_SUPPORT_IBSS_RSN', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_TX', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_RX', 'hex'),
('NL80211_ATTR_MCAST_RATE', 'hex'),
('NL80211_ATTR_OFFCHANNEL_TX_OK', 'hex'),
('NL80211_ATTR_BSS_HT_OPMODE', 'hex'),
('NL80211_ATTR_KEY_DEFAULT_TYPES', 'hex'),
('NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION', 'hex'),
('NL80211_ATTR_MESH_SETUP', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX', 'hex'),
('NL80211_ATTR_SUPPORT_MESH_AUTH', 'hex'),
('NL80211_ATTR_STA_PLINK_STATE', 'hex'),
('NL80211_ATTR_WOWLAN_TRIGGERS', 'hex'),
('NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED', 'hex'),
('NL80211_ATTR_SCHED_SCAN_INTERVAL', 'hex'),
('NL80211_ATTR_INTERFACE_COMBINATIONS', 'hex'),
('NL80211_ATTR_SOFTWARE_IFTYPES', 'hex'),
('NL80211_ATTR_REKEY_DATA', 'hex'),
('NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN', 'hex'),
('NL80211_ATTR_SCAN_SUPP_RATES', 'hex'),
('NL80211_ATTR_HIDDEN_SSID', 'hex'),
('NL80211_ATTR_IE_PROBE_RESP', 'hex'),
('NL80211_ATTR_IE_ASSOC_RESP', 'hex'),
('NL80211_ATTR_STA_WME', 'hex'),
('NL80211_ATTR_SUPPORT_AP_UAPSD', 'hex'),
('NL80211_ATTR_ROAM_SUPPORT', 'hex'),
('NL80211_ATTR_SCHED_SCAN_MATCH', 'hex'),
('NL80211_ATTR_MAX_MATCH_SETS', 'hex'),
('NL80211_ATTR_PMKSA_CANDIDATE', 'hex'),
('NL80211_ATTR_TX_NO_CCK_RATE', 'hex'),
('NL80211_ATTR_TDLS_ACTION', 'hex'),
('NL80211_ATTR_TDLS_DIALOG_TOKEN', 'hex'),
('NL80211_ATTR_TDLS_OPERATION', 'hex'),
('NL80211_ATTR_TDLS_SUPPORT', 'hex'),
('NL80211_ATTR_TDLS_EXTERNAL_SETUP', 'hex'),
('NL80211_ATTR_DEVICE_AP_SME', 'hex'),
('NL80211_ATTR_DONT_WAIT_FOR_ACK', 'hex'),
('NL80211_ATTR_FEATURE_FLAGS', 'hex'),
('NL80211_ATTR_PROBE_RESP_OFFLOAD', 'hex'),
('NL80211_ATTR_PROBE_RESP', 'hex'),
('NL80211_ATTR_DFS_REGION', 'hex'),
('NL80211_ATTR_DISABLE_HT', 'hex'),
('NL80211_ATTR_HT_CAPABILITY_MASK', 'hex'),
('NL80211_ATTR_NOACK_MAP', 'hex'),
('NL80211_ATTR_INACTIVITY_TIMEOUT', 'hex'),
('NL80211_ATTR_RX_SIGNAL_DBM', 'hex'),
('NL80211_ATTR_BG_SCAN_PERIOD', 'hex'),
('NL80211_ATTR_WDEV', 'uint32'),
('NL80211_ATTR_USER_REG_HINT_TYPE', 'hex'),
('NL80211_ATTR_CONN_FAILED_REASON', 'hex'),
('NL80211_ATTR_SAE_DATA', 'hex'),
('NL80211_ATTR_VHT_CAPABILITY', 'hex'),
('NL80211_ATTR_SCAN_FLAGS', 'hex'),
('NL80211_ATTR_CHANNEL_WIDTH', 'uint32'),
('NL80211_ATTR_CENTER_FREQ1', 'hex'),
('NL80211_ATTR_CENTER_FREQ2', 'hex'),
('NL80211_ATTR_P2P_CTWINDOW', 'hex'),
('NL80211_ATTR_P2P_OPPPS', 'hex'),
('NL80211_ATTR_LOCAL_MESH_POWER_MODE', 'hex'),
('NL80211_ATTR_ACL_POLICY', 'hex'),
('NL80211_ATTR_MAC_ADDRS', 'hex'),
('NL80211_ATTR_MAC_ACL_MAX', 'hex'),
('NL80211_ATTR_RADAR_EVENT', 'hex'),
('NL80211_ATTR_EXT_CAPA', 'hex'),
('NL80211_ATTR_EXT_CAPA_MASK', 'hex'),
('NL80211_ATTR_STA_CAPABILITY', 'hex'),
('NL80211_ATTR_STA_EXT_CAPABILITY', 'hex'),
('NL80211_ATTR_PROTOCOL_FEATURES', 'hex'),
('NL80211_ATTR_SPLIT_WIPHY_DUMP', 'hex'),
('NL80211_ATTR_DISABLE_VHT', 'hex'),
('NL80211_ATTR_VHT_CAPABILITY_MASK', 'hex'),
('NL80211_ATTR_MDID', 'hex'),
('NL80211_ATTR_IE_RIC', 'hex'),
('NL80211_ATTR_CRIT_PROT_ID', 'hex'),
('NL80211_ATTR_MAX_CRIT_PROT_DURATION', 'hex'),
('NL80211_ATTR_PEER_AID', 'hex'),
('NL80211_ATTR_COALESCE_RULE', 'hex'),
('NL80211_ATTR_CH_SWITCH_COUNT', 'hex'),
('NL80211_ATTR_CH_SWITCH_BLOCK_TX', 'hex'),
('NL80211_ATTR_CSA_IES', 'hex'),
('NL80211_ATTR_CSA_C_OFF_BEACON', 'hex'),
('NL80211_ATTR_CSA_C_OFF_PRESP', 'hex'),
('NL80211_ATTR_RXMGMT_FLAGS', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_CHANNELS', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES', 'hex'),
('NL80211_ATTR_HANDLE_DFS', 'hex'),
('NL80211_ATTR_SUPPORT_5_MHZ', 'hex'),
('NL80211_ATTR_SUPPORT_10_MHZ', 'hex'),
('NL80211_ATTR_OPMODE_NOTIF', 'hex'),
('NL80211_ATTR_VENDOR_ID', 'hex'),
('NL80211_ATTR_VENDOR_SUBCMD', 'hex'),
('NL80211_ATTR_VENDOR_DATA', 'hex'),
('NL80211_ATTR_VENDOR_EVENTS', 'hex'),
('NL80211_ATTR_QOS_MAP', 'hex'),
('NL80211_ATTR_MAC_HINT', 'hex'),
('NL80211_ATTR_WIPHY_FREQ_HINT', 'hex'),
('NL80211_ATTR_MAX_AP_ASSOC_STA', 'hex'),
('NL80211_ATTR_TDLS_PEER_CAPABILITY', 'hex'),
('NL80211_ATTR_SOCKET_OWNER', 'hex'),
('NL80211_ATTR_CSA_C_OFFSETS_TX', 'hex'),
('NL80211_ATTR_MAX_CSA_COUNTERS', 'hex'),
('NL80211_ATTR_TDLS_INITIATOR', 'hex'),
('NL80211_ATTR_USE_RRM', 'hex'),
('NL80211_ATTR_WIPHY_DYN_ACK', 'hex'),
('NL80211_ATTR_TSID', 'hex'),
('NL80211_ATTR_USER_PRIO', 'hex'),
('NL80211_ATTR_ADMITTED_TIME', 'hex'),
('NL80211_ATTR_SMPS_MODE', 'hex'),
('NL80211_ATTR_OPER_CLASS', 'hex'),
('NL80211_ATTR_MAC_MASK', 'hex'),
('NL80211_ATTR_WIPHY_SELF_MANAGED_REG', 'hex'),
('NUM_NL80211_ATTR', 'hex'))
class bss(nla):
class elementsBinary(nla_base):
def binary_supported_rates(self, rawdata):
# pdb.set_trace()
string = ""
for byteRaw in rawdata:
(byte,) = struct.unpack("B", byteRaw)
r = byte & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and byte & 0x80:
string += "VHT"
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and byte & 0x80:
string += "HT"
else:
string += "%d.%d" % (r / 2, 5 * (r & 1))
string += "%s " % ("*" if byte & 0x80 else "")
return string
def binary_vendor(self, rawdata):
'''
Extract vendor data
'''
vendor = {}
# pdb.set_trace()
size = len(rawdata)
# if len > 4 and rawdata[0] == ms_oui[0]
# and rawdata[1] == ms_oui[1] and rawdata[2] == ms_oui[2]
if size < 3:
vendor["VENDOR_NAME"] = "Vendor specific: <too short data:"
+ hexdump(rawdata)
return vendor
def decode_nlas(self):
return
def decode(self):
nla_base.decode(self)
self.value = {}
init = self.buf.tell()
while (self.buf.tell()-init) < (self.length-4):
(msg_type, length) = struct.unpack('BB', self.buf.read(2))
data = self.buf.read(length)
if msg_type == NL80211_BSS_ELEMENTS_SSID:
self.value["SSID"] = data
if msg_type == NL80211_BSS_ELEMENTS_SUPPORTED_RATES:
supported_rates = self.binary_supported_rates(data)
self.value["SUPPORTED_RATES"] = supported_rates
if msg_type == NL80211_BSS_ELEMENTS_CHANNEL:
(channel,) = struct.unpack("B", data[0])
self.value["CHANNEL"] = channel
if msg_type == NL80211_BSS_ELEMENTS_VENDOR:
self.binary_vendor(data)
# if catch == 0:
# self.value["NL80211_BSS_ELEMENTS_UNKNOWN"+str(msg_type)]=hexdump(data)
self.buf.seek(init)
# self.value["NL80211_BSS_ELEMENTS_HEXDUMP"] =
# hexdump(self.buf.read(self.length))
self.buf.seek(init)
prefix = 'NL80211_BSS_'
nla_map = (('NL80211_BSS_UNSPEC', 'none'),
('NL80211_BSS_BSSID', 'hex'),
('NL80211_BSS_FREQUENCY', 'uint32'),
('NL80211_BSS_TSF', 'uint64'),
('NL80211_BSS_BEACON_INTERVAL', 'uint16'),
('NL80211_BSS_CAPABILITY', 'uint8'),
('NL80211_BSS_INFORMATION_ELEMENTS', 'elementsBinary'),
('NL80211_BSS_SIGNAL_MBM', 'uint32'),
('NL80211_BSS_STATUS', 'uint32'),
('NL80211_BSS_SEEN_MS_AGO', 'uint32'),
('NL80211_BSS_BEACON_IES', 'hex'),
('NL80211_BSS_CHAN_WIDTH', 'uint32'),
('NL80211_BSS_BEACON_TSF', 'uint64')
)
class MarshalNl80211(Marshal):
msg_map = {NL80211_CMD_UNSPEC: nl80211cmd,
NL80211_CMD_GET_WIPHY: nl80211cmd,
NL80211_CMD_SET_WIPHY: nl80211cmd,
NL80211_CMD_NEW_WIPHY: nl80211cmd,
NL80211_CMD_DEL_WIPHY: nl80211cmd,
NL80211_CMD_GET_INTERFACE: nl80211cmd,
NL80211_CMD_SET_INTERFACE: nl80211cmd,
NL80211_CMD_NEW_INTERFACE: nl80211cmd,
NL80211_CMD_DEL_INTERFACE: nl80211cmd,
NL80211_CMD_GET_KEY: nl80211cmd,
NL80211_CMD_SET_KEY: nl80211cmd,
NL80211_CMD_NEW_KEY: nl80211cmd,
NL80211_CMD_DEL_KEY: nl80211cmd,
NL80211_CMD_GET_BEACON: nl80211cmd,
NL80211_CMD_SET_BEACON: nl80211cmd,
NL80211_CMD_START_AP: nl80211cmd,
NL80211_CMD_NEW_BEACON: nl80211cmd,
NL80211_CMD_STOP_AP: nl80211cmd,
NL80211_CMD_DEL_BEACON: nl80211cmd,
NL80211_CMD_GET_STATION: nl80211cmd,
NL80211_CMD_SET_STATION: nl80211cmd,
NL80211_CMD_NEW_STATION: nl80211cmd,
NL80211_CMD_DEL_STATION: nl80211cmd,
NL80211_CMD_GET_MPATH: nl80211cmd,
NL80211_CMD_SET_MPATH: nl80211cmd,
NL80211_CMD_NEW_MPATH: nl80211cmd,
NL80211_CMD_DEL_MPATH: nl80211cmd,
NL80211_CMD_SET_BSS: nl80211cmd,
NL80211_CMD_SET_REG: nl80211cmd,
NL80211_CMD_REQ_SET_REG: nl80211cmd,
NL80211_CMD_GET_MESH_CONFIG: nl80211cmd,
NL80211_CMD_SET_MESH_CONFIG: nl80211cmd,
NL80211_CMD_SET_MGMT_EXTRA_IE: nl80211cmd,
NL80211_CMD_GET_REG: nl80211cmd,
NL80211_CMD_GET_SCAN: nl80211cmd,
NL80211_CMD_TRIGGER_SCAN: nl80211cmd,
NL80211_CMD_NEW_SCAN_RESULTS: nl80211cmd,
NL80211_CMD_SCAN_ABORTED: nl80211cmd,
NL80211_CMD_REG_CHANGE: nl80211cmd,
NL80211_CMD_AUTHENTICATE: nl80211cmd,
NL80211_CMD_ASSOCIATE: nl80211cmd,
NL80211_CMD_DEAUTHENTICATE: nl80211cmd,
NL80211_CMD_DISASSOCIATE: nl80211cmd,
NL80211_CMD_MICHAEL_MIC_FAILURE: nl80211cmd,
NL80211_CMD_REG_BEACON_HINT: nl80211cmd,
NL80211_CMD_JOIN_IBSS: nl80211cmd,
NL80211_CMD_LEAVE_IBSS: nl80211cmd,
NL80211_CMD_TESTMODE: nl80211cmd,
NL80211_CMD_CONNECT: nl80211cmd,
NL80211_CMD_ROAM: nl80211cmd,
NL80211_CMD_DISCONNECT: nl80211cmd,
NL80211_CMD_SET_WIPHY_NETNS: nl80211cmd,
NL80211_CMD_GET_SURVEY: nl80211cmd,
NL80211_CMD_NEW_SURVEY_RESULTS: nl80211cmd,
NL80211_CMD_SET_PMKSA: nl80211cmd,
NL80211_CMD_DEL_PMKSA: nl80211cmd,
NL80211_CMD_FLUSH_PMKSA: nl80211cmd,
NL80211_CMD_REMAIN_ON_CHANNEL: nl80211cmd,
NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: nl80211cmd,
NL80211_CMD_SET_TX_BITRATE_MASK: nl80211cmd,
NL80211_CMD_REGISTER_FRAME: nl80211cmd,
NL80211_CMD_REGISTER_ACTION: nl80211cmd,
NL80211_CMD_FRAME: nl80211cmd,
NL80211_CMD_ACTION: nl80211cmd,
NL80211_CMD_FRAME_TX_STATUS: nl80211cmd,
NL80211_CMD_ACTION_TX_STATUS: nl80211cmd,
NL80211_CMD_SET_POWER_SAVE: nl80211cmd,
NL80211_CMD_GET_POWER_SAVE: nl80211cmd,
NL80211_CMD_SET_CQM: nl80211cmd,
NL80211_CMD_NOTIFY_CQM: nl80211cmd,
NL80211_CMD_SET_CHANNEL: nl80211cmd,
NL80211_CMD_SET_WDS_PEER: nl80211cmd,
NL80211_CMD_FRAME_WAIT_CANCEL: nl80211cmd,
NL80211_CMD_JOIN_MESH: nl80211cmd,
NL80211_CMD_LEAVE_MESH: nl80211cmd,
NL80211_CMD_UNPROT_DEAUTHENTICATE: nl80211cmd,
NL80211_CMD_UNPROT_DISASSOCIATE: nl80211cmd,
NL80211_CMD_NEW_PEER_CANDIDATE: nl80211cmd,
NL80211_CMD_GET_WOWLAN: nl80211cmd,
NL80211_CMD_SET_WOWLAN: nl80211cmd,
NL80211_CMD_START_SCHED_SCAN: nl80211cmd,
NL80211_CMD_STOP_SCHED_SCAN: nl80211cmd,
NL80211_CMD_SCHED_SCAN_RESULTS: nl80211cmd,
NL80211_CMD_SCHED_SCAN_STOPPED: nl80211cmd,
NL80211_CMD_SET_REKEY_OFFLOAD: nl80211cmd,
NL80211_CMD_PMKSA_CANDIDATE: nl80211cmd,
NL80211_CMD_TDLS_OPER: nl80211cmd,
NL80211_CMD_TDLS_MGMT: nl80211cmd,
NL80211_CMD_UNEXPECTED_FRAME: nl80211cmd,
NL80211_CMD_PROBE_CLIENT: nl80211cmd,
NL80211_CMD_REGISTER_BEACONS: nl80211cmd,
NL80211_CMD_UNEXPECTED_4ADDR_FRAME: nl80211cmd,
NL80211_CMD_SET_NOACK_MAP: nl80211cmd,
NL80211_CMD_CH_SWITCH_NOTIFY: nl80211cmd,
NL80211_CMD_START_P2P_DEVICE: nl80211cmd,
NL80211_CMD_STOP_P2P_DEVICE: nl80211cmd,
NL80211_CMD_CONN_FAILED: nl80211cmd,
NL80211_CMD_SET_MCAST_RATE: nl80211cmd,
NL80211_CMD_SET_MAC_ACL: nl80211cmd,
NL80211_CMD_RADAR_DETECT: nl80211cmd,
NL80211_CMD_GET_PROTOCOL_FEATURES: nl80211cmd,
NL80211_CMD_UPDATE_FT_IES: nl80211cmd,
NL80211_CMD_FT_EVENT: nl80211cmd,
NL80211_CMD_CRIT_PROTOCOL_START: nl80211cmd,
NL80211_CMD_CRIT_PROTOCOL_STOP: nl80211cmd,
NL80211_CMD_GET_COALESCE: nl80211cmd,
NL80211_CMD_SET_COALESCE: nl80211cmd,
NL80211_CMD_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_VENDOR: nl80211cmd,
NL80211_CMD_SET_QOS_MAP: nl80211cmd,
NL80211_CMD_ADD_TX_TS: nl80211cmd,
NL80211_CMD_DEL_TX_TS: nl80211cmd,
NL80211_CMD_GET_MPP: nl80211cmd,
NL80211_CMD_JOIN_OCB: nl80211cmd,
NL80211_CMD_LEAVE_OCB: nl80211cmd,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: nl80211cmd,
NL80211_CMD_TDLS_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_WIPHY_REG_CHANGE: nl80211cmd}
def fix_message(self, msg):
try:
msg['event'] = NL80211_VALUES[msg['cmd']]
except Exception:
pass
class NL80211(GenericNetlinkSocket):
def __init__(self):
GenericNetlinkSocket.__init__(self)
self.marshal = MarshalNl80211()
def bind(self, groups=0, async=False):
GenericNetlinkSocket.bind(self, 'nl80211', nl80211cmd,
groups, None, async)
|
apache-2.0
| -3,365,363,244,516,462,000
| 43.118729
| 95
| 0.544479
| false
| 2.992967
| false
| false
| false
|
arksu/a2client
|
etc/blender_exporter/export_a1.py
|
1
|
20410
|
# coding=utf-8
import struct
import bpy
import bmesh
from bpy_extras.io_utils import ExportHelper
import mathutils
import os.path
import math
#===========================================================================
# Custom exception class
#===========================================================================
class Error( Exception ):
def __init__(self, message):
self.message = message
bind_pose = dict()
def run(fpath, markerFilter, scaleFactor, initFrame, do_mesh, do_skeleton, do_anims):
print ("start a1 export...")
file = open(fpath, 'bw')
# set 0 frame for take skeleton pos
goBack = bpy.context.scene.frame_current
bpy.context.scene.frame_set(initFrame)
# try export by udk exporter
arm, mesh = find_armature_and_mesh()
if (do_mesh):
file.write(struct.pack('<b', 1))
udk_parse_mesh(mesh, file)
else:
file.write(struct.pack('<b', 0))
correctionMatrix = mathutils.Matrix()
if (len(bpy.data.armatures) > 0 and do_skeleton):
armature = bpy.data.armatures[arm.name]
file.write(struct.pack('<b', 1)) # skeleton flag
write_skeleton(file, armature, correctionMatrix)
if (do_anims):
file.write(struct.pack('<b', 1)) # anim flag
write_all_anims(file, markerFilter, fpath, arm)
else:
file.write(struct.pack('<b', 0)) # anim flag
else:
file.write(struct.pack('<b', 0)) # skeleton flag
file.write(struct.pack('<b', 0)) # anim flag
file.close()
print ("a1 export done")
bpy.context.scene.frame_set(goBack)
return {'FINISHED'}
def udk_parse_mesh(mesh, file):
option_clamp_uv = True
#bpy.ops.object.mode_set(mode='OBJECT')
#error ? on commands for select object?
print("Mesh object:", mesh.name)
scene = bpy.context.scene
for i in scene.objects: i.select = False # deselect all objects
scene.objects.active = mesh
setmesh = mesh
mesh = triangulate_mesh(mesh)
#bpy.context.scene.objects.unlink(setmesh)
print("FACES----:",len(mesh.data.tessfaces))
discarded_face_count = 0
vertex_groups = mesh.vertex_groups
write_string(file, "a1mesh")
file.write(struct.pack('<I', len(mesh.data.tessfaces)))
if (mesh.parent):
matrix = mesh.parent.matrix_world * mesh.matrix_local
else:
matrix = mesh.matrix_local
for face in mesh.data.tessfaces:
if len(face.vertices) != 3:
raise Error("Non-triangular face (%i)" % len(face.vertices))
if not is_1d_face(face, mesh.data):
face_index = face.index
has_uv = False
face_uv = None
if len(mesh.data.uv_textures) > 0:
has_uv = True
uv_layer = mesh.data.tessface_uv_textures.active
face_uv = uv_layer.data[face_index]
#size(data) is number of texture faces. Each face has UVs
#print("DATA face uv: ",len(faceUV.uv), " >> ",(faceUV.uv[0][0]))
for i in range(3):
vert_index = face.vertices[i]
vert = mesh.data.vertices[vert_index]
#assumes 3 UVs Per face (for now)
if (has_uv):
if len(face_uv.uv) != 3:
print("WARNING: face has more or less than 3 UV coordinates - writing 0,0...")
uv = [0.0, 0.0]
else:
uv = [face_uv.uv[i][0],face_uv.uv[i][1]] #OR bottom works better # 24 for cube
else:
#print ("No UVs?")
uv = [0.0, 0.0]
#flip V coordinate because UEd requires it and DOESN'T flip it on its own like it
#does with the mesh Y coordinates. this is otherwise known as MAGIC-2
uv[1] = 1.0 - uv[1]
# clamp UV coords if udk_option_clamp_uv is True
if option_clamp_uv:
if (uv[0] > 1):
uv[0] = 1
if (uv[0] < 0):
uv[0] = 0
if (uv[1] > 1):
uv[1] = 1
if (uv[1] < 0):
uv[1] = 0
#matrix = mathutils.Matrix()
co = matrix * vert.co
# no = mesh.matrix_local * vert.normal
no = vert.normal
no.normalize()
file.write(struct.pack('<fff', co[0], co[1], co[2]))
file.write(struct.pack('<fff', no[0], no[1], no[2]))
#weight_layer = False
if (len(vert.groups) > 0):
file.write(struct.pack('<H', len(vert.groups)))
for vgroup in vert.groups:
wg = vertex_groups[vgroup.group]
vertex_weight = vgroup.weight
wname = wg.name
write_string(file, wname)
file.write(struct.pack('<f', vertex_weight))
else:
# no weight data
file.write(struct.pack('<H', 0))
file.write(struct.pack('<ff', uv[0], uv[1]))
#END if not is_1d_face(current_face, mesh.data)
else:
discarded_face_count += 1
print ("discarded_face_count ", discarded_face_count)
bpy.ops.object.mode_set(mode='OBJECT') # OBJECT mode
mesh.parent = None # unparent to avoid phantom links
bpy.context.scene.objects.unlink(mesh) # unlink
# arm - armature object
def write_skeleton(file, armature, correctionMatrix):
# global orientationTweak
print("save skeleton...")
armature_obj = bpy.data.objects[armature.name]
arm_mw = armature_obj.matrix_world
bones = armature.bones
if not bones:
print("no bones for skeleton")
return
abandonedBones = [i for i in bones
if i.parent and i.parent not in bones[:]]
if abandonedBones:
boneList = []
for ab in abandonedBones:
boneList.append("- " + str(ab.name))
print ("bones missing parents : ", boneList)
print ("bones count: ", len(bones))
# header
write_string(file, "a1skeleton")
# count
file.write(struct.pack('<H', len(bones)))
# data
for b in bones:
if not b.use_deform:
print ("not deformable bone!: ", b.name)
write_skip(file, True)
continue
write_skip(file, False)
bone_parent = b.parent
while bone_parent:
if bone_parent.use_deform:
break
bone_parent = bone_parent.parent
if bone_parent:
pn = bone_parent.name
else:
pn = ''
mw = arm_mw * b.matrix_local # точно
if bone_parent:
ml = bone_parent.matrix_local.inverted() * b.matrix_local
else:
ml = mw
# mw = get_mw(b)
# ml = correctionMatrix * b.matrix_local
# print ("m local : ", ml)
# print ("m world : ", mw)
# print ("parent", pn)
# print ("name: ", b.name, "---------")
write_string(file, b.name)
write_string(file, pn)
write_matrix(file, mw.inverted()) # bind
# inverted = boneMatrix.inverted()
write_matrix(file, ml) # frame
bind_pose[b.name] = ml
print("skeleton saved")
def write_all_anims(file, markerFilter, filePath, arm):
ranges = get_ranges(markerFilter)
print ("ranges : ", ranges)
if ranges:
file.write(struct.pack('<H', len(ranges)))
for r in ranges.keys():
# folder = os.path.dirname(filePath)
# animFile = os.path.join(folder, r + ".a1anim")
write_anim(file, r, ranges[r], arm)
else:
file.write(struct.pack('<H', 1))
write_anim(file, None, None, arm)
# baseFilePathEnd = filePath.rfind(".md5mesh")
# if baseFilePathEnd == -1:
# animFilePath = filePath + ".md5anim"
# else:
# animFilePath = filePath[:baseFilePathEnd] + ".md5anim"
# write_md5anim(animFilePath, prerequisites, correctionMatrix, None)
# return {'FINISHED'}
def write_anim(file, Name, frameRange, armature):
# global orientationTweak
print ("save animation... name: ", Name, " range: ", frameRange)
write_string(file, "a1anim")
if frameRange == None:
startFrame = bpy.context.scene.frame_start
endFrame = bpy.context.scene.frame_end
else:
startFrame, endFrame = frameRange
#armature = bpy.context.object.find_armature()
#armature = bpy.data.armatures[0]
bones = armature.data.bones
armObj = [o for o in bpy.data.objects if o.data == bones[0].id_data][0]
pBones = armObj.pose.bones
print ("arm :", armObj , " pbones: ", pBones)
# anim name
if Name:
write_string(file, Name)
else:
write_string(file, '')
# frames count
fcount = endFrame - startFrame + 1
file.write(struct.pack('<H', fcount))
fps = bpy.context.scene.render.fps
file.write(struct.pack('<H', fps))
# bones names
file.write(struct.pack('<H', len(bones)))
for b in bones:
write_string(file, b.name)
# print ("orientationTweak ", orientationTweak)
# frames
print ("process frames...")
for frame in range(startFrame, endFrame + 1):
bpy.context.scene.frame_set(frame)
#print("set frame ", frame)
for b in bones:
if not b.use_deform:
write_skip(file, True)
continue
write_skip(file, False)
pBone = pBones[b.name]
bone_parent = pBone.parent
while bone_parent:
if bone_parent.bone.use_deform:
break
bone_parent = bone_parent.parent
pBoneMatrix = pBone.matrix
if bone_parent:
diffMatrix = bone_parent.matrix.inverted() * (pBoneMatrix)
else:
diffMatrix = armObj.matrix_world * pBoneMatrix
# diffMatrix = orientationTweak * diffMatrix
# print ("bind_pose ", b.name, "=", bind_pose[b.name])
# print ("frame matrix=", diffMatrix)
# одинаковые матрицы. запишем флаг для пропуска этой кости по флагам
# if cmp_matrix(bind_pose[b.name], diffMatrix):
# print("equal matrix ", b.name)
# write_skip(file, True)
# else:
# write_skip(file, False)
write_matrix(file, diffMatrix)
print ("animation saved")
pass
def get_mw(bone):
ml = bone.matrix_local
if (bone.parent):
ml = get_mw(bone.parent) * ml
# else:
# ml = bpy.data.objects['Armature'].matrix_world * ml
return ml
def triangulate(bm):
while True:
nonTris = [f for f in bm.faces if len(f.verts) > 3]
if nonTris:
nt = nonTris[0]
pivotLoop = nt.loops[0]
allVerts = nt.verts
vert1 = pivotLoop.vert
wrongVerts = [vert1,
pivotLoop.link_loop_next.vert,
pivotLoop.link_loop_prev.vert]
bmesh.utils.face_split(nt, vert1, [v for v in allVerts
if v not in wrongVerts][0])
for seq in [bm.verts, bm.faces, bm.edges]: seq.index_update()
else:
break
return bm
def write_string(file, str):
l = len(str)
file.write(struct.pack('<H', l))
file.write(bytearray(str, 'ascii'))
def write_matrix(file, m):
# transpose in converter
file.write(struct.pack('<ffff', m[0][0], m[0][1], m[0][2], m[0][3]))
file.write(struct.pack('<ffff', m[1][0], m[1][1], m[1][2], m[1][3]))
file.write(struct.pack('<ffff', m[2][0], m[2][1], m[2][2], m[2][3]))
file.write(struct.pack('<ffff', m[3][0], m[3][1], m[3][2], m[3][3]))
def eps_num(n1, n2):
return (n1-n2) < 0.00001
def cmp_matrix(m1, m2):
if \
eps_num(m1[0][0], m2[0][0]) and eps_num(m1[0][1], m2[0][1]) and eps_num(m1[0][2], m2[0][2]) and eps_num(m1[0][3], m2[0][3]) and \
eps_num(m1[1][0], m2[1][0]) and eps_num(m1[1][1], m2[1][1]) and eps_num(m1[1][2], m2[1][2]) and eps_num(m1[1][3], m2[1][3]) and \
eps_num(m1[2][0], m2[2][0]) and eps_num(m1[2][1], m2[2][1]) and eps_num(m1[2][2], m2[2][2]) and eps_num(m1[2][3], m2[2][3]) and \
eps_num(m1[3][0], m2[3][0]) and eps_num(m1[3][1], m2[3][1]) and eps_num(m1[3][2], m2[3][2]) and eps_num(m1[3][3], m2[3][3]):
return True
else:
return False
def write_skip(file, skip):
if skip:
file.write(struct.pack('<b', 1))
else:
file.write(struct.pack('<b', 0))
def get_ranges(markerFilter):
markers = bpy.context.scene.timeline_markers
starts = [m for m in markers if
m.name.startswith(markerFilter)
and m.name.endswith("_start", 2)]
ends = [m for m in markers if
m.name.startswith(markerFilter)
and m.name.endswith("_end", 2)]
if not starts or not ends:
return None
else:
return find_matches(starts, ends)
def find_matches(starts, ends):
pairs = {}
for s in starts:
basename = s.name[:s.name.rfind("_start")]
matches = [e for e in ends if
e.name[:e.name.rfind("_end")] == basename]
if matches:
m = matches[0]
pairs[basename] = (min(s.frame, m.frame), max(s.frame, m.frame))
return pairs
#===========================================================================
# Locate the target armature and mesh for export
# RETURNS armature, mesh
#===========================================================================
def find_armature_and_mesh():
print ("find_armature_and_mesh")
context = bpy.context
active_object = context.active_object
armature = None
mesh = None
# TODO:
# this could be more intuitive
bpy.ops.object.mode_set(mode='OBJECT')
# try the active object
if active_object and active_object.type == 'ARMATURE':
armature = active_object
# otherwise, try for a single armature in the scene
else:
all_armatures = [obj for obj in context.scene.objects if obj.type == 'ARMATURE']
if len(all_armatures) == 1:
armature = all_armatures[0]
elif len(all_armatures) > 1:
raise Error("Please select an armature in the scene")
else:
raise Error("No armatures in scene")
print ("Found armature: ", armature.name, " ", armature)
meshselected = []
parented_meshes = [obj for obj in armature.children if obj.type == 'MESH']
for obj in armature.children:
#print(dir(obj))
if obj.type == 'MESH' and obj.select == True:
meshselected.append(obj)
# try the active object
if active_object and active_object.type == 'MESH' and len(meshselected) == 0:
if active_object.parent == armature:
mesh = active_object
else:
raise Error("The selected mesh is not parented to the armature")
# otherwise, expect a single mesh parented to the armature (other object types are ignored)
else:
print("Number of meshes:",len(parented_meshes))
print("Number of meshes (selected):",len(meshselected))
if len(parented_meshes) == 1:
mesh = parented_meshes[0]
elif len(parented_meshes) > 1:
if len(meshselected) >= 1:
mesh = sortmesh(meshselected)
else:
raise Error("More than one mesh(s) parented to armature. Select object(s)!")
else:
raise Error("No mesh parented to armature")
print ("Found mesh: " +mesh.name, " ", mesh)
# if len(armature.pose.bones) == len(mesh.vertex_groups):
# print("Armature and Mesh Vertex Groups matches Ok!")
# else:
# raise Error("Armature bones:" + str(len(armature.pose.bones)) + " Mesh Vertex Groups:" + str(len(mesh.vertex_groups)) +" doesn't match!")
return armature, mesh
#copy mesh data and then merge them into one object
def meshmerge(selectedobjects):
bpy.ops.object.mode_set(mode='OBJECT')
cloneobjects = []
if len(selectedobjects) > 1:
print("selectedobjects:",len(selectedobjects))
count = 0 #reset count
for count in range(len( selectedobjects)):
#print("Index:",count)
if selectedobjects[count] != None:
me_da = selectedobjects[count].data.copy() #copy data
me_ob = selectedobjects[count].copy() #copy object
#note two copy two types else it will use the current data or mesh
me_ob.data = me_da
bpy.context.scene.objects.link(me_ob)#link the object to the scene #current object location
print("Index:",count,"clone object",me_ob.name)
cloneobjects.append(me_ob)
#bpy.ops.object.mode_set(mode='OBJECT')
for i in bpy.data.objects: i.select = False #deselect all objects
count = 0 #reset count
#bpy.ops.object.mode_set(mode='OBJECT')
for count in range(len( cloneobjects)):
if count == 0:
bpy.context.scene.objects.active = cloneobjects[count]
print("Set Active Object:",cloneobjects[count].name)
cloneobjects[count].select = True
bpy.ops.object.join()
if len(cloneobjects) > 1:
bpy.types.Scene.udk_copy_merge = True
return cloneobjects[0]
#sort the mesh center top list and not center at the last array. Base on order while select to merge mesh to make them center.
def sortmesh(selectmesh):
print("MESH SORTING...")
centermesh = []
notcentermesh = []
for countm in range(len(selectmesh)):
if selectmesh[countm].location.x == 0 and selectmesh[countm].location.y == 0 and selectmesh[countm].location.z == 0:
centermesh.append(selectmesh[countm])
else:
notcentermesh.append(selectmesh[countm])
selectmesh = []
for countm in range(len(centermesh)):
selectmesh.append(centermesh[countm])
for countm in range(len(notcentermesh)):
selectmesh.append(notcentermesh[countm])
if len(selectmesh) == 1:
return selectmesh[0]
else:
return meshmerge(selectmesh)
#===========================================================================
# http://en.wikibooks.org/wiki/Blender_3D:_Blending_Into_Python/Cookbook#Triangulate_NMesh
# blender 2.50 format using the Operators/command convert the mesh to tri mesh
#===========================================================================
def triangulate_mesh( object ):
print("triangulateNMesh")
#print(type(object))
scene = bpy.context.scene
me_ob = object.copy()
me_ob.data = object.to_mesh(bpy.context.scene, True, 'PREVIEW') #write data object
bpy.context.scene.objects.link(me_ob)
bpy.context.scene.update()
bpy.ops.object.mode_set(mode='OBJECT')
for i in scene.objects:
i.select = False # deselect all objects
me_ob.select = True
scene.objects.active = me_ob
print("Copy and Convert mesh just incase any way...")
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')# select all the face/vertex/edge
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.quads_convert_to_tris()
bpy.context.scene.update()
bpy.ops.object.mode_set(mode='OBJECT')
print("Triangulated mesh")
me_ob.data = me_ob.to_mesh(bpy.context.scene, True, 'PREVIEW') #write data object
bpy.context.scene.update()
return me_ob
#===========================================================================
#RG - check to make sure face isnt a line
#===========================================================================
def is_1d_face( face, mesh ):
#ID Vertex of id point
v0 = face.vertices[0]
v1 = face.vertices[1]
v2 = face.vertices[2]
return (mesh.vertices[v0].co == mesh.vertices[v1].co\
or mesh.vertices[v1].co == mesh.vertices[v2].co\
or mesh.vertices[v2].co == mesh.vertices[v0].co)
return False
|
gpl-3.0
| -2,436,140,333,795,629,600
| 32.747927
| 146
| 0.549069
| false
| 3.50603
| false
| false
| false
|
stormi/tsunami
|
src/primaires/salle/editeurs/redit/edt_repos.py
|
1
|
5551
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit le contexte-éditeur 'edt_repos'."""
from primaires.interpreteur.editeur import Editeur
from primaires.format.fonctions import format_nb
class EdtRepos(Editeur):
"""Ce contexte permet d'éditer la sous-catégorie 'repos' d'un détail.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur."""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("s", self.opt_asseoir)
self.ajouter_option("l", self.opt_allonger)
self.ajouter_option("c", self.opt_connecteur)
def accueil(self):
"""Message d'accueil du contexte"""
detail = self.objet
msg = "| |tit|" + "Edition du détail '{}'".format(detail).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
msg += self.aide_courte
msg += format_nb(detail.nb_places_assises,
"{nb} place{s} assise{s} ", fem=True)
msg += "(récupération : {}).\n".format(detail.facteur_asseoir)
msg += format_nb(detail.nb_places_allongees,
"{nb} place{s} allongée{s} ", fem=True)
msg += "(récupération : {}).\n".format(detail.facteur_allonger)
msg += "Connecteur : |ent|" + detail.connecteur + "|ff|\n"
return msg
def opt_asseoir(self, arguments):
"""Option asseoir.
Syntaxe : /s <nb> (<facteur>)
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez au moins un nombre de places.|ff|"
return
nb_places = facteur = 0
try:
nb_places, facteur = arguments.split(" ")
except ValueError:
try:
nb_places = int(arguments.split(" ")[0])
assert nb_places >= 0
except (ValueError, AssertionError):
self.pere << "|err|Précisez un nombre valide et positif.|ff|"
return
try:
nb_places = int(nb_places)
facteur = float(facteur)
except ValueError:
self.pere << "|err|Précisez des nombres valides.|ff|"
return
if nb_places:
detail.peut_asseoir = True
detail.nb_places_assises = nb_places
else:
detail.peut_asseoir = False
detail.nb_places_assises = 0
if facteur:
detail.facteur_asseoir = facteur
self.actualiser()
def opt_allonger(self, arguments):
"""Option allonger.
Syntaxe : /l <nb> (<facteur>)
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez au moins un nombre de places.|ff|"
return
nb_places = facteur = 0
try:
nb_places, facteur = arguments.split(" ")
except ValueError:
try:
nb_places = int(arguments.split(" ")[0])
assert nb_places >= 0
except (ValueError, AssertionError):
self.pere << "|err|Précisez un nombre valide et positif.|ff|"
return
try:
nb_places = int(nb_places)
facteur = float(facteur)
except ValueError:
self.pere << "|err|Précisez des nombres valides.|ff|"
return
if nb_places:
detail.peut_allonger = True
detail.nb_places_allongees = nb_places
else:
detail.peut_allonger = False
detail.nb_places_allongees = 0
if facteur:
detail.facteur_allonger = facteur
self.actualiser()
def opt_connecteur(self, arguments):
"""Option connecteur.
Syntaxe : /c <connecteur>
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez un connecteur.|ff|"
return
detail.connecteur = arguments
self.actualiser()
|
bsd-3-clause
| -8,722,783,594,387,688,000
| 37.151724
| 79
| 0.601952
| false
| 3.727763
| false
| false
| false
|
MSLNZ/msl-qt
|
tests/test_utils.py
|
1
|
1695
|
from msl.qt import utils, QtCore, QtWidgets, Qt, QtGui
def test_screen_geometry():
# just check that these don't raise an exception
assert isinstance(utils.screen_geometry(), QtCore.QRect)
assert isinstance(utils.screen_geometry(QtWidgets.QLabel()), QtCore.QRect)
assert isinstance(utils.screen_geometry(QtWidgets.QLabel(parent=QtWidgets.QLabel())), QtCore.QRect)
def test_drag_enter_paths():
mime = QtCore.QMimeData()
event = QtGui.QDragEnterEvent(QtCore.QPoint(0, 0), Qt.CopyAction, mime, Qt.LeftButton, Qt.NoModifier)
paths = utils.drag_drop_paths(event)
assert len(paths) == 0
url1 = QtCore.QUrl('/path/to/image.jpeg')
url1.setScheme('file')
url2 = QtCore.QUrl('') # does not pass the isValid() and scheme() == 'file' checks
url3 = QtCore.QUrl('/path/to/image.jpeg')
url3.setScheme('ftp') # does not pass the scheme() == 'file' check
url4 = QtCore.QUrl('/path/to/image.png')
url4.setScheme('file')
url5 = QtCore.QUrl('/path/to/image2.jpg')
url5.setScheme('file')
mime.setUrls([url1, url2, url3, url4, url5])
event = QtGui.QDragEnterEvent(QtCore.QPoint(0, 0), Qt.CopyAction, mime, Qt.LeftButton, Qt.NoModifier)
paths = utils.drag_drop_paths(event)
assert len(paths) == 3
assert '/path/to/image.jpeg' in paths
assert '/path/to/image.png' in paths
assert '/path/to/image2.jpg' in paths
paths = utils.drag_drop_paths(event, pattern='*.jp*g')
assert len(paths) == 2
assert '/path/to/image.jpeg' in paths
assert '/path/to/image2.jpg' in paths
paths = utils.drag_drop_paths(event, pattern='*.png')
assert len(paths) == 1
assert '/path/to/image.png' in paths
|
mit
| -982,922,349,157,547,000
| 34.3125
| 105
| 0.673746
| false
| 3.216319
| false
| false
| false
|
allenai/document-qa
|
docqa/elmo/run_on_user_text.py
|
1
|
3809
|
import argparse
import tensorflow as tf
from docqa.data_processing.qa_training_data import ParagraphAndQuestion, ParagraphAndQuestionSpec
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.elmo.lm_qa_models import ElmoQaModel
from docqa.model_dir import ModelDir
"""
Script to run a model on user provided question/context input.
Its main purpose is to be an example of how to use the model on new question/context pairs.
"""
def main():
parser = argparse.ArgumentParser(description="Run an ELMo model on user input")
parser.add_argument("model", help="Model directory")
parser.add_argument("question", help="Question to answer")
parser.add_argument("context", help="Context to answer the question with")
args = parser.parse_args()
# Tokenize the input, the models expected data to be tokenized using `NltkAndPunctTokenizer`
# Note the model expects case-sensitive input
tokenizer = NltkAndPunctTokenizer()
question = tokenizer.tokenize_paragraph_flat(args.question)
context = tokenizer.tokenize_paragraph_flat(args.context)
print("Loading model")
model_dir = ModelDir(args.model)
model = model_dir.get_model()
if not isinstance(model, ElmoQaModel):
raise ValueError("This script is build to work for ElmoQaModel models only")
# Important! This tells the language model not to use the pre-computed word vectors,
# which are only applicable for the SQuAD dev/train sets.
# Instead the language model will use its character-level CNN to compute
# the word vectors dynamically.
model.lm_model.embed_weights_file = None
# Tell the model the batch size and vocab to expect, This will load the needed
# word vectors and fix the batch size when building the graph / encoding the input
print("Setting up model")
voc = set(question)
voc.update(context)
model.set_input_spec(ParagraphAndQuestionSpec(batch_size=1), voc)
# Now we build the actual tensorflow graph, `best_span` and `conf` are
# tensors holding the predicted span (inclusive) and confidence scores for each
# element in the input batch
print("Build tf graph")
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with sess.as_default():
# 17 means to limit the span to size 17 or less
best_spans, conf = model.get_prediction().get_best_span(17)
# Now restore the weights, this is a bit fiddly since we need to avoid restoring the
# bilm weights, and instead load them from the pre-computed data
all_vars = tf.global_variables() + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)
lm_var_names = {x.name for x in all_vars if x.name.startswith("bilm")}
vars = [x for x in all_vars if x.name not in lm_var_names]
model_dir.restore_checkpoint(sess, vars)
# Run the initializer of the lm weights, which will load them from the lm directory
sess.run(tf.variables_initializer([x for x in all_vars if x.name in lm_var_names]))
# Now the model is ready to run
# The model takes input in the form of `ContextAndQuestion` objects, for example:
data = [ParagraphAndQuestion(context, question, None, "user-question1")]
print("Starting run")
# The model is run in two steps, first it "encodes" the paragraph/context pairs
# into numpy arrays, then to use `sess` to run the actual model get the predictions
encoded = model.encode(data, is_train=False) # batch of `ContextAndQuestion` -> feed_dict
best_spans, conf = sess.run([best_spans, conf], feed_dict=encoded) # feed_dict -> predictions
print("Best span: " + str(best_spans[0]))
print("Answer text: " + " ".join(context[best_spans[0][0]:best_spans[0][1]+1]))
print("Confidence: " + str(conf[0]))
if __name__ == "__main__":
main()
|
apache-2.0
| 85,362,426,133,489,180
| 45.463415
| 98
| 0.716461
| false
| 3.730656
| false
| false
| false
|
woodmd/haloanalysis
|
extpipe/utils.py
|
1
|
7826
|
import copy
import re
import glob
import numpy as np
from numpy.core import defchararray
from scipy.interpolate import RegularGridInterpolator
from astropy.io import fits
from astropy.table import Table, Column
def stack_files(files, outfile, new_cols=None):
h = fits.open(files[0])
tables = []
for hdu in h:
if isinstance(hdu,fits.BinTableHDU):
tables += [stack_tables(files,hdu.name,new_cols=new_cols)]
hdus = [fits.PrimaryHDU()]
hdus += [fits.table_to_hdu(t) for t in tables]
hdulist = fits.HDUList(hdus)
hdulist.writeto(outfile,overwrite=True)
def stack_tables(files, hdu=None, new_cols=None):
tables = []
for f in sorted(files):
tables += [Table.read(f,hdu=hdu)]
cols = []
for c in tables[0].colnames:
col = tables[0][c]
cols += [Column(name=col.name, unit=col.unit, shape=col.shape,
dtype=col.dtype)]
tab = Table(cols,meta=tables[0].meta)
for t in tables:
row = [ t[c] for c in tables[0].colnames ]
tab.add_row(row)
if new_cols is not None:
for col in new_cols:
tab.add_column(col)
return tab
def load_source_rows(tab, names, key='assoc'):
"""Load the rows from a table that match a source name.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of source identifiers.
key : str
Name of the table column that will be searched for a source
matching key.
Returns
-------
outtab : `astropy.table.Table`
Table containing the subset of rows with matching source identifiers.
"""
names = [name.lower().replace(' ', '') for name in names]
col = tab[[key]].copy()
col[key] = defchararray.replace(defchararray.lower(col[key]),
' ', '')
mask = create_mask(col, {key: names})
return tab[mask]
def create_mask(tab, target_def):
"""Create a table mask from a target definition."""
m = np.empty(len(tab), dtype=bool); m.fill(True)
for k,v in target_def.items():
if isinstance(v,list):
m0 = np.zeros(len(tab),dtype=bool)
for t in v:
m0 |= (tab[k] == t)
m &= m0
elif isinstance(v,dict):
m0 = np.empty(len(tab),dtype=bool)
m0.fill(True)
if 'min' in v:
m0 &= (tab[k] >= v['min'])
if 'max' in v:
m0 &= (tab[k] <= v['max'])
m &= m0
elif isinstance(v,str):
p = re.compile('([a-zA-Z_2-9][^"<>=&|!()\s*.]+)')
# regular expression should capture all column names
# that consist of a-z, A-Z, '_', and numbers at the end
# it should not capture pure numbers and numbers like '1e10'
replaced = [] # check what already has been replaced
for cname in p.findall(v):
print(cname)
if not cname in replaced:
if tab.columns.has_key(cname):
tab[cname]
v = v.replace(cname, "tab['{0:s}']".format(cname))
#else:
# v = v.replace(cname, "'{0:s}'".format(cname))
replaced.append(cname)
# all of the above in one line but does not work if column name starts with a number
# or if expression is not a number
#print 'Cutting on expression', p.sub(r"tab['\1']",v)
print 'Cutting on expression', v
m &= eval(v)
return m
def interp_map(z, axis0, axis1,dim=0):
s0 = z.ndim*[None]
s1 = z.ndim*[slice(None)]
s0[idim] = slice(None)
s1[idim] = slice(0,1)
z /= axis0.width[s0]
shape = list(z.shape)
shape[idim] = len(axis1.centers)
zinterp = np.zeros(shape)
for x, idx in np.ndenumerate(z[s1]):
zinterp = np.interp(axis1.centers,
axis0.centers,
z[:,i])
class MapND(object):
"""Container class representing an n-dimensional map."""
def __init__(self, axes, data, log_interp=False):
"""
Parameters
----------
axes : list
List of `Axis` objects defining the n-dimensional grid.
data : `~numpy.ndarray`
log_interp : bool
Perform interpolation in log-space.
"""
self._axes = axes
self._data = data
self._log_interp = log_interp
points = [ax.centers for ax in axes]
if log_interp:
self._fn = RegularGridInterpolator(points, np.log(data),
bounds_error=False,
fill_value=None)
else:
self._fn = RegularGridInterpolator(points, data,
bounds_error=False,
fill_value=None)
@property
def fn(self):
return self._fn
@property
def axes(self):
return self._axes
@property
def data(self):
return self._data
def marginalize(self, dims):
data = np.squeeze(np.apply_over_axes(np.sum,self.data,axes=dims))
axes = []
for i, axis in enumerate(self.axes):
if i not in dims:
axes += [axis]
return MapND(axes, data, self._log_interp)
def slice(self, dims, vals):
axis_xvals = []
axes = []
for i, axis in enumerate(self.axes):
axis_xvals += [axis.centers]
if not i in dims:
axes += [axis]
for i, v in zip(dims,vals):
axis_xvals[i] = np.array(v,ndmin=1)
interp_xvals = np.meshgrid(*axis_xvals,indexing='ij',sparse=True)
data = np.squeeze(self.interp(tuple(interp_xvals)))
return MapND(axes, data, self._log_interp)
def interp(self, *args):
if self._log_interp:
log_vals = self._fn(*args)
log_vals[~np.isfinite(log_vals)] = -33
return np.exp(log_vals)
else:
return self._fn(*args)
class Axis(object):
def __init__(self, name, edges, centers=None):
self._edges = edges
self._centers = (0.5*(edges[1:] + edges[:-1])
if centers is None else centers)
self._name = name
@staticmethod
def create_from_centers(name, centers, logscale=False):
"""Create a new axis object from a sequence of bin centers."""
if logscale:
delta = np.log(centers[1:])-np.log(centers[:-1])
else:
delta = centers[1:]-centers[:-1]
if len(delta) == 0:
delta = np.array([1.0])
else:
delta = np.insert(delta,0,delta[0])
if logscale:
edges_lo = np.log(centers) - 0.5*delta
edges_hi = np.log(centers) + 0.5*delta
edges = np.exp(np.insert(edges_hi,0,edges_lo[0]))
else:
edges_lo = centers - 0.5*delta
edges_hi = centers + 0.5*delta
edges = np.insert(edges_hi,0,edges_lo[0])
return Axis(name, edges, centers)
@property
def name(self):
return self._name
@property
def edges(self):
return self._edges
@property
def lo(self):
"""Return the lower bin edges."""
return self._edges[:-1]
@property
def hi(self):
"""Return the upper bin edges."""
return self._edges[1:]
@property
def nbin(self):
return len(self._edges)-1
@property
def centers(self):
return self._centers
@property
def width(self):
return self._edges[1:] - self._edges[:-1]
|
bsd-3-clause
| -7,118,146,222,372,312,000
| 25.80137
| 89
| 0.523256
| false
| 3.72135
| false
| false
| false
|
eReuse/DeviceHub
|
ereuse_devicehub/scripts/updates/snapshot_software.py
|
1
|
1290
|
from contextlib import suppress
from pydash import find
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
class SnapshotSoftware(Update):
"""
Changes the values of SnapshotSoftware and adds it to the materialized one in devices
"""
def execute(self, database):
SNAPSHOT_SOFTWARE = {
'DDI': 'Workbench',
'Scan': 'AndroidApp',
'DeviceHubClient': 'Web'
}
for snapshot in DeviceEventDomain.get({'@type': "devices:Snapshot"}):
with suppress(KeyError):
snapshot['snapshotSoftware'] = SNAPSHOT_SOFTWARE[snapshot.get('snapshotSoftware', 'DDI')]
DeviceEventDomain.update_one_raw(snapshot['_id'], {'$set': {'snapshotSoftware': snapshot['snapshotSoftware']}})
for device in DeviceDomain.get({'events._id': snapshot['_id']}):
materialized_snapshot = find(device['events'], lambda event: event['_id'] == snapshot['_id'])
materialized_snapshot['snapshotSoftware'] = snapshot['snapshotSoftware']
DeviceDomain.update_one_raw(device['_id'], {'$set': {'events': device['events']}})
|
agpl-3.0
| 4,927,501,792,348,379,000
| 45.071429
| 123
| 0.655039
| false
| 4.372881
| false
| false
| false
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0139_auto__add_field_makey_is_staff_pick__add_field_makey_added_time_staff_.py
|
1
|
70054
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Makey.is_staff_pick'
db.add_column(u'catalog_makey', 'is_staff_pick',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Makey.added_time_staff_pick'
db.add_column(u'catalog_makey', 'added_time_staff_pick',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Makey.is_staff_pick'
db.delete_column(u'catalog_makey', 'is_staff_pick')
# Deleting field 'Makey.added_time_staff_pick'
db.delete_column(u'catalog_makey', 'added_time_staff_pick')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.article': {
'Meta': {'object_name': 'Article'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.NewUser']", 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'recommendation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.ArticleTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articleemail': {
'Meta': {'object_name': 'ArticleEmail'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'email_subscriptions'", 'null': 'True', 'to': "orm['catalog.ArticleTag']"}),
'temp_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url_snippet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.favoritemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'FavoriteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'full_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_s3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.instructablestep': {
'Meta': {'ordering': "['-step']", 'object_name': 'InstructableStep'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'step': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'words': ('django.db.models.fields.IntegerField', [], {'default': '-1'})
},
'catalog.inventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'Inventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_part'", 'to': "orm['catalog.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likearticle': {
'Meta': {'unique_together': "(('user', 'article'),)", 'object_name': 'LikeArticle'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Article']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likechannel': {
'Meta': {'unique_together': "(('user', 'channel'),)", 'object_name': 'LikeChannel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ArticleTag']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likelisting': {
'Meta': {'unique_together': "(('user', 'listing'),)", 'object_name': 'LikeListing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'listing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Listing']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listing': {
'Meta': {'object_name': 'Listing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'content': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'as_part': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.Product']"}),
'as_part_new': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.NewProduct']"}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'derived_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'forked_as'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'made_in': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'makeys_made_in'", 'null': 'True', 'to': "orm['catalog.Space']"}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modules_used': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'used_in'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'removed_collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makey_removed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newinventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'NewInventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_part'", 'to': "orm['catalog.NewProduct']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'ordering': "['order']", 'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_of_founding': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.Inventory']", 'to': "orm['catalog.Product']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'last_updated_external': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'map_zoom_level': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'membership_fee': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_new_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.NewInventory']", 'to': "orm['catalog.NewProduct']"}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'no_of_members': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.spacereview': {
'Meta': {'object_name': 'SpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'space_reviews'", 'to': "orm['catalog.Space']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.textdocumentation': {
'Meta': {'object_name': 'TextDocumentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'text_documentations'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.upfile': {
'Meta': {'object_name': 'UpFile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'profile_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votespacereview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteSpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SpaceReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog']
|
apache-2.0
| -7,416,513,312,088,065,000
| 83.607488
| 229
| 0.54261
| false
| 3.656836
| false
| false
| false
|
okolisny/integration_tests
|
cfme/tests/configure/test_display_settings.py
|
1
|
1849
|
# -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.configure.settings import visual
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import ColorGroup, form_buttons
from cfme.utils.appliance import current_appliance
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils import version
from cfme.configure import settings # NOQA
pytestmark = [pytest.mark.tier(3),
test_requirements.settings]
colors = [
'Orange',
'Yellow',
'Green',
'Blue',
'ManageIQ-Blue',
'Black',
]
@pytest.yield_fixture(scope="module")
def set_timezone():
time_zone = visual.timezone
visual.timezone = "(GMT-10:00) Hawaii"
yield
visual.timezone = time_zone
def set_header_color(name):
cg = ColorGroup('Header Accent Color')
if cg.active != name:
cg.choose(name)
sel.click(form_buttons.save)
def is_header_color_changed(name):
cg = ColorGroup('Header Accent Color')
if cg.active == name:
return cg.status(name)
def reset_default_color(default_color):
cg = ColorGroup('Header Accent Color')
if cg.active != default_color:
cg.choose(default_color)
sel.click(form_buttons.save)
def test_timezone_setting(set_timezone):
""" Tests timezone setting
Metadata:
test_flag: visuals
"""
locator = version.pick({
version.LOWEST: ('//label[contains(@class,"control-label") and contains(., "Started On")]'
'/../div/p[contains(., "{}")]'.format("HST")),
'5.7': ('//label[contains(@class,"control-label") and contains(., "Started On")]'
'/../div/p[contains(., "{}")]'.format("-1000"))
})
navigate_to(current_appliance.server, 'DiagnosticsDetails')
assert sel.is_displayed(locator), "Timezone settings Failed"
|
gpl-2.0
| 1,625,218,247,549,686,800
| 25.414286
| 98
| 0.652244
| false
| 3.604288
| true
| false
| false
|
suyashphadtare/test
|
erpnext/manufacturing/doctype/production_planning_tool/production_planning_tool.py
|
1
|
15406
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and
from frappe import msgprint, _
from frappe.model.document import Document
class ProductionPlanningTool(Document):
def __init__(self, arg1, arg2=None):
super(ProductionPlanningTool, self).__init__(arg1, arg2)
self.item_dict = {}
def get_so_details(self, so):
"""Pull other details from so"""
so = frappe.db.sql("""select transaction_date, customer, grand_total
from tabSales_Order where name = %s""", so, as_dict = 1)
ret = {
'sales_order_date': so and so[0]['transaction_date'] or '',
'customer' : so[0]['customer'] or '',
'grand_total': so[0]['grand_total']
}
return ret
def get_item_details(self, item_code):
""" Pull other item details from item master"""
item = frappe.db.sql("""select description, stock_uom, default_bom
from tabItem where name = %s""", item_code, as_dict =1)
ret = {
'description' : item and item[0]['description'],
'stock_uom' : item and item[0]['stock_uom'],
'bom_no' : item and item[0]['default_bom']
}
return ret
def clear_so_table(self):
self.set('pp_so_details', [])
def clear_item_table(self):
self.set('pp_details', [])
def validate_company(self):
if not self.company:
frappe.throw(_("Please enter Company"))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += ' and so.transaction_date >= "' + self.from_date + '"'
if self.to_date:
so_filter += ' and so.transaction_date <= "' + self.to_date + '"'
if self.customer:
so_filter += ' and so.customer = "' + self.customer + '"'
if self.fg_item:
item_filter += ' and item.name = "' + self.fg_item + '"'
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.grand_total
from tabSales_Order so, tabSales_Order_Item so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status != "Stopped"
and so.company = %s
and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0) %s
and (exists (select name from tabItem item where item.name=so_item.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes') %s)
or exists (select name from tabPacked_Item pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from tabItem item where item.name=pi.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes') %s)))
""" % ('%s', so_filter, item_filter, item_filter), self.company, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.clear_so_table()
so_list = [d.sales_order for d in self.get('pp_so_details')]
for r in open_so:
if cstr(r['name']) not in so_list:
pp_so = self.append('pp_so_details', {})
pp_so.sales_order = r['name']
pp_so.sales_order_date = cstr(r['transaction_date'])
pp_so.customer = cstr(r['customer'])
pp_so.grand_total = flt(r['grand_total'])
def get_items_from_so(self):
""" Pull items from Sales Order, only proction item
and subcontracted item will be pulled from Packing item
and add items in the table
"""
items = self.get_items()
self.add_items(items)
def get_items(self):
so_list = filter(None, [d.sales_order for d in self.get('pp_so_details')])
if not so_list:
msgprint(_("Please enter sales order in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and so_item.item_code = "' + self.fg_item + '"'
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - ifnull(delivered_qty, 0)) as pending_qty
from tabSales_Order_Item so_item
where parent in (%s) and docstatus = 1 and ifnull(qty, 0) > ifnull(delivered_qty, 0)
and exists (select * from tabItem item where item.name=so_item.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes')) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.fg_item:
item_condition = ' and pi.item_code = "' + self.fg_item + '"'
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - ifnull(so_item.delivered_qty, 0)) * pi.qty) / so_item.qty)
as pending_qty
from tabSales_Order_Item so_item, tabPacked_Item pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0)
and exists (select * from tabItem item where item.name=pi.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes')) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
return items + packed_items
def add_items(self, items):
self.clear_item_table()
for p in items:
item_details = frappe.db.sql("""select description, stock_uom, default_bom
from tabItem where name=%s""", p['item_code'])
pi = self.append('pp_details', {})
pi.sales_order = p['parent']
pi.warehouse = p['warehouse']
pi.item_code = p['item_code']
pi.description = item_details and item_details[0][0] or ''
pi.stock_uom = item_details and item_details[0][1] or ''
pi.bom_no = item_details and item_details[0][2] or ''
pi.so_pending_qty = flt(p['pending_qty'])
pi.planned_qty = flt(p['pending_qty'])
def validate_data(self):
self.validate_company()
for d in self.get('pp_details'):
self.validate_bom_no(d)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def validate_bom_no(self, d):
if not d.bom_no:
frappe.throw(_("Please enter BOM for Item {0} at row {1}").format(d.item_code, d.idx))
else:
bom = frappe.db.sql("""select name from tabBOM where name = %s and item = %s
and docstatus = 1 and is_active = 1""",
(d.bom_no, d.item_code), as_dict = 1)
if not bom:
frappe.throw(_("Incorrect or Inactive BOM {0} for Item {1} at row {2}").format(d.bom_no, d.item_code, d.idx))
def raise_production_order(self):
"""It will raise production order (Draft) for all distinct FG items"""
self.validate_data()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "planned_qty")
items = self.get_distinct_items_and_boms()[1]
pro = self.create_production_order(items)
if pro:
pro = ["""<a href="#Form/Production Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in pro]
msgprint(_("{0} created").format(comma_and(pro)))
else :
msgprint(_("No Production Orders created"))
def get_distinct_items_and_boms(self):
""" Club similar BOM and item for processing
bom_dict {
bom_no: ['sales_order', 'qty']
}
"""
item_dict, bom_dict = {}, {}
for d in self.get("pp_details"):
bom_dict.setdefault(d.bom_no, []).append([d.sales_order, flt(d.planned_qty)])
item_dict[(d.item_code, d.sales_order, d.warehouse)] = {
"production_item" : d.item_code,
"sales_order" : d.sales_order,
"qty" : flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),
{}).get("qty")) + flt(d.planned_qty),
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"wip_warehouse" : "",
"fg_warehouse" : d.warehouse,
"status" : "Draft",
}
return bom_dict, item_dict
def create_production_order(self, items):
"""Create production order. Called from Production Planning Tool"""
from erpnext.manufacturing.doctype.production_order.production_order import OverProductionError
pro_list = []
for key in items:
pro = frappe.new_doc("Production Order")
pro.update(items[key])
frappe.flags.mute_messages = True
try:
pro.insert()
pro_list.append(pro.name)
except OverProductionError:
pass
frappe.flags.mute_messages = False
return pro_list
def download_raw_materials(self):
""" Create csv data for required raw material to produce finished goods"""
self.validate_data()
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
return self.get_csv()
def get_raw_materials(self, bom_dict):
""" Get raw materials considering sub-assembly items
{
"item_code": [qty_required, description, stock_uom, min_order_qty]
}
"""
item_list = []
for bom, so_wise_qty in bom_dict.items():
bom_wise_item_details = {}
if self.use_multi_level_bom:
# get all raw materials with sub assembly childs
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
for d in frappe.db.sql("""select fb.item_code,
ifnull(sum(ifnull(fb.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
fb.description, fb.stock_uom, it.min_order_qty
from tabBOM_Explosion_Item fb, tabBOM bom, tabItem it
where bom.name = fb.parent and it.name = fb.item_code and ifnull(it.is_pro_applicable, 'No') = 'No'
and ifnull(it.is_sub_contracted_item, 'No') = 'No'
and fb.docstatus<2 and bom.name=%s
group by item_code, stock_uom""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
# Get all raw materials considering SA items as raw materials,
# so no childs of SA items
for d in frappe.db.sql("""select bom_item.item_code,
ifnull(sum(ifnull(bom_item.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
bom_item.description, bom_item.stock_uom, item.min_order_qty
from tabBOM_Item bom_item, tabBOM bom, tabItem item
where bom.name = bom_item.parent and bom.name = %s and bom_item.docstatus < 2
and bom_item.item_code = item.name
group by item_code""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
for item, item_details in bom_wise_item_details.items():
for so_qty in so_wise_qty:
item_list.append([item, flt(item_details.qty) * so_qty[1], item_details.description,
item_details.stock_uom, item_details.min_order_qty, so_qty[0]])
self.make_items_dict(item_list)
def make_items_dict(self, item_list):
for i in item_list:
self.item_dict.setdefault(i[0], []).append([flt(i[1]), i[2], i[3], i[4], i[5]])
def get_csv(self):
item_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse',
'Quantity Requested for Purchase', 'Ordered Qty', 'Actual Qty']]
for item in self.item_dict:
total_qty = sum([flt(d[0]) for d in self.item_dict[item]])
item_list.append([item, self.item_dict[item][0][1], self.item_dict[item][0][2], total_qty])
item_qty = frappe.db.sql("""select warehouse, indented_qty, ordered_qty, actual_qty
from tabBin where item_code = %s""", item, as_dict=1)
i_qty, o_qty, a_qty = 0, 0, 0
for w in item_qty:
i_qty, o_qty, a_qty = i_qty + flt(w.indented_qty), o_qty + flt(w.ordered_qty), a_qty + flt(w.actual_qty)
item_list.append(['', '', '', '', w.warehouse, flt(w.indented_qty),
flt(w.ordered_qty), flt(w.actual_qty)])
if item_qty:
item_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])
return item_list
def raise_purchase_request(self):
"""
Raise Material Request if projected qty is less than qty required
Requested qty should be shortage qty considering minimum order qty
"""
self.validate_data()
if not self.purchase_request_for_warehouse:
frappe.throw(_("Please enter Warehouse for which Material Request will be raised"))
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
if self.item_dict:
self.insert_purchase_request()
def get_requested_items(self):
item_projected_qty = self.get_projected_qty()
items_to_be_requested = frappe._dict()
for item, so_item_qty in self.item_dict.items():
requested_qty = 0
total_qty = sum([flt(d[0]) for d in so_item_qty])
if total_qty > item_projected_qty.get(item, 0):
# shortage
requested_qty = total_qty - flt(item_projected_qty.get(item))
# consider minimum order qty
requested_qty = requested_qty > flt(so_item_qty[0][3]) and \
requested_qty or flt(so_item_qty[0][3])
# distribute requested qty SO wise
for item_details in so_item_qty:
if requested_qty:
sales_order = item_details[4] or "No Sales Order"
if requested_qty <= item_details[0]:
adjusted_qty = requested_qty
else:
adjusted_qty = item_details[0]
items_to_be_requested.setdefault(item, {}).setdefault(sales_order, 0)
items_to_be_requested[item][sales_order] += adjusted_qty
requested_qty -= adjusted_qty
else:
break
# requested qty >= total so qty, due to minimum order qty
if requested_qty:
items_to_be_requested.setdefault(item, {}).setdefault("No Sales Order", 0)
items_to_be_requested[item]["No Sales Order"] += requested_qty
return items_to_be_requested
def get_projected_qty(self):
items = self.item_dict.keys()
item_projected_qty = frappe.db.sql("""select item_code, sum(projected_qty)
from tabBin where item_code in (%s) and warehouse=%s group by item_code""" %
(", ".join(["%s"]*len(items)), '%s'), tuple(items + [self.purchase_request_for_warehouse]))
return dict(item_projected_qty)
def insert_purchase_request(self):
items_to_be_requested = self.get_requested_items()
from erpnext.accounts.utils import get_fiscal_year
fiscal_year = get_fiscal_year(nowdate())[0]
purchase_request_list = []
if items_to_be_requested:
for item in items_to_be_requested:
item_wrapper = frappe.get_doc("Item", item)
pr_doc = frappe.new_doc("Material Request")
pr_doc.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"fiscal_year": fiscal_year,
"requested_by": frappe.session.user,
"material_request_type": "Purchase"
})
for sales_order, requested_qty in items_to_be_requested[item].items():
pr_doc.append("indent_details", {
"doctype": "Material Request Item",
"__islocal": 1,
"item_code": item,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": requested_qty,
"schedule_date": add_days(nowdate(), cint(item_wrapper.lead_time_days)),
"warehouse": self.purchase_request_for_warehouse,
"sales_order_no": sales_order if sales_order!="No Sales Order" else None
})
pr_doc.ignore_permissions = 1
pr_doc.submit()
purchase_request_list.append(pr_doc.name)
if purchase_request_list:
pur_req = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in purchase_request_list]
msgprint(_("Material Requests {0} created").format(comma_and(pur_req)))
else:
msgprint(_("Nothing to request"))
|
agpl-3.0
| -4,334,333,021,142,938,000
| 37.039506
| 113
| 0.652084
| false
| 2.918909
| false
| false
| false
|
chronicle/api-samples-python
|
common/chronicle_auth.py
|
1
|
2712
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions to access Chronicle APIs using OAuth 2.0.
Background information:
https://google-auth.readthedocs.io/en/latest/user-guide.html#service-account-private-key-files
https://developers.google.com/identity/protocols/oauth2#serviceaccount
Details about using the Google-auth library with the Requests library:
https://github.com/googleapis/google-auth-library-python/blob/master/google/auth/transport/requests.py
https://requests.readthedocs.io
"""
import argparse
import pathlib
from typing import Optional, Union
from google.auth.transport import requests
from google.oauth2 import service_account
DEFAULT_CREDENTIALS_FILE = pathlib.Path.home() / ".chronicle_credentials.json"
AUTHORIZATION_SCOPES = ["https://www.googleapis.com/auth/chronicle-backstory"]
def initialize_http_session(
credentials_file_path: Optional[Union[str, pathlib.Path]]
) -> requests.AuthorizedSession:
"""Initializes an authorized HTTP session, based on the given credentials.
Args:
credentials_file_path: Absolute or relative path to a JSON file containing
the private OAuth 2.0 credentials of a Google Cloud Platform service
account. Optional - the default is ".chronicle_credentials.json" in the
user's home directory. Keep it secret, keep it safe.
Returns:
HTTP session object to send authorized requests and receive responses.
Raises:
OSError: Failed to read the given file, e.g. not found, no read access
(https://docs.python.org/library/exceptions.html#os-exceptions).
ValueError: Invalid file contents.
"""
if not credentials_file_path:
credentials_file_path = DEFAULT_CREDENTIALS_FILE
credentials = service_account.Credentials.from_service_account_file(
str(credentials_file_path), scopes=AUTHORIZATION_SCOPES)
return requests.AuthorizedSession(credentials)
def add_argument_credentials_file(parser: argparse.ArgumentParser):
"""Adds a shared command-line argument to all the sample modules."""
parser.add_argument(
"-c",
"--credentials_file",
type=str,
help=f"credentials file path (default: '{DEFAULT_CREDENTIALS_FILE}')")
|
apache-2.0
| -6,668,738,066,000,931,000
| 36.666667
| 102
| 0.760324
| false
| 4.011834
| false
| false
| false
|
dc3-plaso/plaso
|
plaso/analysis/unique_domains_visited.py
|
1
|
2742
|
# -*- coding: utf-8 -*-
"""A plugin to generate a list of domains visited."""
import sys
if sys.version_info[0] < 3:
import urlparse
else:
from urllib import parse as urlparse # pylint: disable=no-name-in-module
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.containers import reports
class UniqueDomainsVisitedPlugin(interface.AnalysisPlugin):
"""A plugin to generate a list all domains visited.
This plugin will extract domains from browser history events extracted by
Plaso. The list produced can be used to quickly determine if there has been
a visit to a site of interest, for example, a known phishing site.
"""
NAME = u'unique_domains_visited'
# Indicate that we can run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = True
_DATATYPES = frozenset([
u'chrome:history:file_downloaded', u'chrome:history:page_visited',
u'firefox:places:page_visited', u'firefox:downloads:download',
u'macosx:lsquarantine', u'msiecf:redirected', u'msiecf:url',
u'msie:webcache:container', u'opera:history', u'safari:history:visit'])
def __init__(self):
"""Initializes the domains visited plugin."""
super(UniqueDomainsVisitedPlugin, self).__init__()
self._domains = []
def ExamineEvent(self, mediator, event):
"""Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
if event.data_type not in self._DATATYPES:
return
url = getattr(event, u'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, u'netloc', None)
if domain in self._domains:
# We've already found an event containing this domain.
return
self._domains.append(domain)
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
The analysis report (instance of AnalysisReport).
"""
lines_of_text = [u'Listing domains visited by all users']
for domain in sorted(self._domains):
lines_of_text.append(domain)
lines_of_text.append(u'')
report_text = u'\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
manager.AnalysisPluginManager.RegisterPlugin(UniqueDomainsVisitedPlugin)
|
apache-2.0
| -1,282,014,892,101,884,400
| 32.036145
| 77
| 0.710795
| false
| 3.889362
| false
| false
| false
|
fmierlo/Xamarin.Helpers
|
Helpers/update-version.py
|
1
|
1430
|
import sys
import xml.etree.ElementTree as ElementTree
def log(msg):
sys.stderr.write(msg + '\n')
class Project:
Filename = 'Helpers.csproj'
Schema = '{http://schemas.microsoft.com/developer/msbuild/2003}'
RootTag = Schema + 'Project'
Property = Schema + 'PropertyGroup'
Release = Schema + 'ReleaseVersion'
Package = Schema + 'Description'
class Version:
In = 'Version.cs.in'
Out = 'Version.cs'
def main(*args):
project_tree = ElementTree.parse(Project.Filename)
project = project_tree.getroot()
version = None
package = None
for release in project.iter(Project.Release):
version = release.text
log('Release: {}'.format(version))
break
else:
log('Error: version not found!')
return -1
for name in project.iter(Project.Package):
package = name.text
log('Package: {}'.format(package))
break
else:
log('Error: package not found!')
return -1
with open(Version.In) as input:
with open(Version.Out, 'w') as output:
content = input.read()
content = content.replace('{VersionName}', version)
content = content.replace('{PackageName}', package)
output.write(content)
log('Writed: {} -> {}.{} -> {}'.format(Version.In, package, version, Version.Out))
if __name__ == '__main__':
sys.exit(main(*sys.argv))
|
mit
| 6,353,014,324,797,405,000
| 27
| 94
| 0.60084
| false
| 3.923077
| false
| false
| false
|
libracore/erpnext
|
erpnext/erpnext_integrations/doctype/shopify_settings/shopify_settings.py
|
1
|
5006
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import get_request_session
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from erpnext.erpnext_integrations.utils import get_webhook_address
from erpnext.erpnext_integrations.doctype.shopify_log.shopify_log import make_shopify_log
class ShopifySettings(Document):
def validate(self):
if self.enable_shopify == 1:
setup_custom_fields()
self.validate_access_credentials()
self.register_webhooks()
else:
self.unregister_webhooks()
def validate_access_credentials(self):
if not (self.get_password(raise_exception=False) and self.api_key and self.shopify_url):
frappe.msgprint(_("Missing value for Password, API Key or Shopify URL"), raise_exception=frappe.ValidationError)
def register_webhooks(self):
webhooks = ["orders/create", "orders/paid", "orders/fulfilled"]
# url = get_shopify_url('admin/webhooks.json', self)
created_webhooks = [d.method for d in self.webhooks]
url = get_shopify_url('admin/api/2019-04/webhooks.json', self)
print('url', url)
for method in webhooks:
print('method', method)
session = get_request_session()
print('session', session)
try:
print(get_header(self))
d = session.post(url, data=json.dumps({
"webhook": {
"topic": method,
"address": get_webhook_address(connector_name='shopify_connection', method='store_request_data'),
"format": "json"
}
}), headers=get_header(self))
print('d', d.json())
d.raise_for_status()
self.update_webhook_table(method, d.json())
except Exception as e:
make_shopify_log(status="Warning", message=e, exception=False)
def unregister_webhooks(self):
session = get_request_session()
deleted_webhooks = []
for d in self.webhooks:
url = get_shopify_url('admin/api/2019-04/webhooks.json'.format(d.webhook_id), self)
try:
res = session.delete(url, headers=get_header(self))
res.raise_for_status()
deleted_webhooks.append(d)
except Exception as e:
frappe.log_error(message=frappe.get_traceback(), title=e)
for d in deleted_webhooks:
self.remove(d)
def update_webhook_table(self, method, res):
print('update')
self.append("webhooks", {
"webhook_id": res['webhook']['id'],
"method": method
})
def get_shopify_url(path, settings):
if settings.app_type == "Private":
print(settings.api_key, settings.get_password('password'), settings.shopify_url, path)
return 'https://{}:{}@{}/{}'.format(settings.api_key, settings.get_password('password'), settings.shopify_url, path)
else:
return 'https://{}/{}'.format(settings.shopify_url, path)
def get_header(settings):
header = {'Content-Type': 'application/json'}
return header;
@frappe.whitelist()
def get_series():
return {
"sales_order_series" : frappe.get_meta("Sales Order").get_options("naming_series") or "SO-Shopify-",
"sales_invoice_series" : frappe.get_meta("Sales Invoice").get_options("naming_series") or "SI-Shopify-",
"delivery_note_series" : frappe.get_meta("Delivery Note").get_options("naming_series") or "DN-Shopify-"
}
def setup_custom_fields():
custom_fields = {
"Customer": [
dict(fieldname='shopify_customer_id', label='Shopify Customer Id',
fieldtype='Data', insert_after='series', read_only=1, print_hide=1)
],
"Supplier": [
dict(fieldname='shopify_supplier_id', label='Shopify Supplier Id',
fieldtype='Data', insert_after='supplier_name', read_only=1, print_hide=1)
],
"Address": [
dict(fieldname='shopify_address_id', label='Shopify Address Id',
fieldtype='Data', insert_after='fax', read_only=1, print_hide=1)
],
"Item": [
dict(fieldname='shopify_variant_id', label='Shopify Variant Id',
fieldtype='Data', insert_after='item_code', read_only=1, print_hide=1),
dict(fieldname='shopify_product_id', label='Shopify Product Id',
fieldtype='Data', insert_after='item_code', read_only=1, print_hide=1),
dict(fieldname='shopify_description', label='Shopify Description',
fieldtype='Text Editor', insert_after='description', read_only=1, print_hide=1)
],
"Sales Order": [
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
],
"Delivery Note":[
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1),
dict(fieldname='shopify_fulfillment_id', label='Shopify Fulfillment Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
],
"Sales Invoice": [
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
]
}
create_custom_fields(custom_fields)
|
gpl-3.0
| 6,692,700,997,614,586,000
| 36.081481
| 118
| 0.696764
| false
| 3.067402
| false
| false
| false
|
cfbolz/parsimony
|
src/tm/tm4/tm4_meta/tm4_simulator.py
|
1
|
1445
|
import string
import sys
from state import *
from tmsim import *
if __name__ == "__main__":
name = sys.argv[-1]
fileName = name + ".tm4"
path = "../tm4_files/" + fileName
try:
assert len(sys.argv) > 1
for flag in sys.argv[2:-1]:
if not (flag in ["-q", "-s", "-f"]):
int(flag)
except:
raise Exception("Usage: python tm4_simulator.py [-q] [-s] [# steps before aborting] [-f] [name of TM4 file]\n \
Enable -q if you want no program output\n \
Enable -l if you want limited program output\n \
Enable -s followed by the max number of steps if you want to stop interpreting after a certain number of commands\n \
Enable -f if you want to dump the history into a file in tm4_histories instead of the standard output.")
sttm = SingleTapeTuringMachine(path, ["_", "1", "H", "E"])
args = sys.argv[1:-1]
quiet = ("-q" in args)
limited = ("-l" in args)
numSteps = sys.maxint
if ("-s" in args):
numSteps = args[args.index("-s") + 1]
output = None
if ("-f" in args):
output = open("../tm4_histories/" + name + "_history.txt", "w")
try:
assert "-s" in args
except:
raise Exception("You can't include the -f flag without also specifying a maximum step count with the -s flag!")
sttm.run(quiet, numSteps, output)
|
mit
| -8,676,187,172,972,371,000
| 29.744681
| 129
| 0.555709
| false
| 3.705128
| false
| false
| false
|
sloria/osf.io
|
osf/models/user.py
|
1
|
64823
|
import datetime as dt
import logging
import re
import urllib
import urlparse
import uuid
from copy import deepcopy
from os.path import splitext
from flask import Request as FlaskRequest
from framework import analytics
from guardian.shortcuts import get_perms
# OSF imports
import itsdangerous
import pytz
from dirtyfields import DirtyFieldsMixin
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import PermissionsMixin
from django.dispatch import receiver
from django.db import models
from django.db.models import Count
from django.db.models.signals import post_save
from django.utils import timezone
from framework.auth import Auth, signals, utils
from framework.auth.core import generate_verification_key
from framework.auth.exceptions import (ChangePasswordError, ExpiredTokenError,
InvalidTokenError,
MergeConfirmedRequiredError,
MergeConflictError)
from framework.exceptions import PermissionsError
from framework.sessions.utils import remove_sessions_for_user
from osf.utils.requests import get_current_request
from osf.exceptions import reraise_django_validation_errors, MaxRetriesError, UserStateError
from osf.models.base import BaseModel, GuidMixin, GuidMixinQuerySet
from osf.models.contributor import Contributor, RecentlyAddedContributor
from osf.models.institution import Institution
from osf.models.mixins import AddonModelMixin
from osf.models.session import Session
from osf.models.tag import Tag
from osf.models.validators import validate_email, validate_social, validate_history_item
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField, LowercaseEmailField
from osf.utils.names import impute_names
from osf.utils.requests import check_select_for_update
from website import settings as website_settings
from website import filters, mails
from website.project import new_bookmark_collection
logger = logging.getLogger(__name__)
MAX_QUICKFILES_MERGE_RENAME_ATTEMPTS = 1000
def get_default_mailing_lists():
return {'Open Science Framework Help': True}
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
class OSFUserManager(BaseUserManager):
def create_user(self, username, password=None):
if not username:
raise ValueError('Users must have a username')
user = self.model(
username=self.normalize_email(username),
is_active=True,
date_registered=timezone.now()
)
user.set_password(password)
user.save(using=self._db)
return user
_queryset_class = GuidMixinQuerySet
def all(self):
return self.get_queryset().all()
def eager(self, *fields):
fk_fields = set(self.model.get_fk_field_names()) & set(fields)
m2m_fields = set(self.model.get_m2m_field_names()) & set(fields)
return self.select_related(*fk_fields).prefetch_related(*m2m_fields)
def create_superuser(self, username, password):
user = self.create_user(username, password=password)
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save(using=self._db)
return user
class Email(BaseModel):
address = LowercaseEmailField(unique=True, db_index=True, validators=[validate_email])
user = models.ForeignKey('OSFUser', related_name='emails', on_delete=models.CASCADE)
def __unicode__(self):
return self.address
class OSFUser(DirtyFieldsMixin, GuidMixin, BaseModel, AbstractBaseUser, PermissionsMixin, AddonModelMixin):
FIELD_ALIASES = {
'_id': 'guids___id',
'system_tags': 'tags',
}
settings_type = 'user' # Needed for addons
USERNAME_FIELD = 'username'
# Node fields that trigger an update to the search engine on save
SEARCH_UPDATE_FIELDS = {
'fullname',
'given_name',
'middle_names',
'family_name',
'suffix',
'merged_by',
'date_disabled',
'date_confirmed',
'jobs',
'schools',
'social',
}
TRACK_FIELDS = SEARCH_UPDATE_FIELDS.copy()
TRACK_FIELDS.update({'password', 'last_login'})
# TODO: Add SEARCH_UPDATE_NODE_FIELDS, for fields that should trigger a
# search update for all nodes to which the user is a contributor.
SOCIAL_FIELDS = {
'orcid': u'http://orcid.org/{}',
'github': u'http://github.com/{}',
'scholar': u'http://scholar.google.com/citations?user={}',
'twitter': u'http://twitter.com/{}',
'profileWebsites': [],
'linkedIn': u'https://www.linkedin.com/{}',
'impactStory': u'https://impactstory.org/u/{}',
'researcherId': u'http://researcherid.com/rid/{}',
'researchGate': u'https://researchgate.net/profile/{}',
'academiaInstitution': u'https://{}',
'academiaProfileID': u'.academia.edu/{}',
'baiduScholar': u'http://xueshu.baidu.com/scholarID/{}',
'ssrn': u'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'
}
# The primary email address for the account.
# This value is unique, but multiple "None" records exist for:
# * unregistered contributors where an email address was not provided.
# TODO: Update mailchimp subscription on username change in user.save()
# TODO: Consider making this a FK to Email with to_field='address'
# Django supports this (https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#django.contrib.auth.models.CustomUser.USERNAME_FIELD)
# but some third-party apps may not.
username = models.CharField(max_length=255, db_index=True, unique=True)
# Hashed. Use `User.set_password` and `User.check_password`
# password = models.CharField(max_length=255)
fullname = models.CharField(max_length=255)
# user has taken action to register the account
is_registered = models.BooleanField(db_index=True, default=False)
# user has claimed the account
# TODO: This should be retired - it always reflects is_registered.
# While a few entries exist where this is not the case, they appear to be
# the result of a bug, as they were all created over a small time span.
is_claimed = models.BooleanField(default=False, db_index=True)
# for internal use
tags = models.ManyToManyField('Tag', blank=True)
# security emails that have been sent
# TODO: This should be removed and/or merged with system_tags
security_messages = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <message label>: <datetime>
# ...
# }
# user was invited (as opposed to registered unprompted)
is_invited = models.BooleanField(default=False, db_index=True)
# Per-project unclaimed user data:
# TODO: add validation
unclaimed_records = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <project_id>: {
# 'name': <name that referrer provided>,
# 'referrer_id': <user ID of referrer>,
# 'token': <token used for verification urls>,
# 'email': <email the referrer provided or None>,
# 'claimer_email': <email the claimer entered or None>,
# 'last_sent': <timestamp of last email sent to referrer or None>
# }
# ...
# }
# Time of last sent notification email to newly added contributors
# Format : {
# <project_id>: {
# 'last_sent': time.time()
# }
# ...
# }
contributor_added_email_records = DateTimeAwareJSONField(default=dict, blank=True)
# The user into which this account was merged
merged_by = models.ForeignKey('self', null=True, blank=True, related_name='merger')
# verification key v1: only the token string, no expiration time
# used for cas login with username and verification key
verification_key = models.CharField(max_length=255, null=True, blank=True)
# verification key v2: token, and expiration time
# used for password reset, confirm account/email, claim account/contributor-ship
verification_key_v2 = DateTimeAwareJSONField(default=dict, blank=True, null=True)
# Format: {
# 'token': <verification token>
# 'expires': <verification expiration time>
# }
email_last_sent = NonNaiveDateTimeField(null=True, blank=True)
change_password_last_attempt = NonNaiveDateTimeField(null=True, blank=True)
# Logs number of times user attempted to change their password where their
# old password was invalid
old_password_invalid_attempts = models.PositiveIntegerField(default=0)
# email verification tokens
# see also ``unconfirmed_emails``
email_verifications = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <token> : {'email': <email address>,
# 'expiration': <datetime>}
# }
# email lists to which the user has chosen a subscription setting
mailchimp_mailing_lists = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# email lists to which the user has chosen a subscription setting,
# being sent from osf, rather than mailchimp
osf_mailing_lists = DateTimeAwareJSONField(default=get_default_mailing_lists, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# the date this user was registered
date_registered = NonNaiveDateTimeField(db_index=True, auto_now_add=True)
# list of collaborators that this user recently added to nodes as a contributor
# recently_added = fields.ForeignField("user", list=True)
recently_added = models.ManyToManyField('self',
through=RecentlyAddedContributor,
through_fields=('user', 'contributor'),
symmetrical=False)
# Attached external accounts (OAuth)
# external_accounts = fields.ForeignField("externalaccount", list=True)
external_accounts = models.ManyToManyField('ExternalAccount', blank=True)
# CSL names
given_name = models.CharField(max_length=255, blank=True)
middle_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
suffix = models.CharField(max_length=255, blank=True)
# identity for user logged in through external idp
external_identity = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <external_id_provider>: {
# <external_id>: <status from ('VERIFIED, 'CREATE', 'LINK')>,
# ...
# },
# ...
# }
# Employment history
jobs = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'title': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Educational history
schools = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'degree': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Social links
social = DateTimeAwareJSONField(default=dict, blank=True, validators=[validate_social])
# Format: {
# 'profileWebsites': <list of profile websites>
# 'twitter': <twitter id>,
# }
# date the user last sent a request
date_last_login = NonNaiveDateTimeField(null=True, blank=True)
# date the user first successfully confirmed an email address
date_confirmed = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was disabled.
date_disabled = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was soft-deleted (GDPR)
deleted = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# when comments were last viewed
comments_viewed_timestamp = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'Comment.root_target._id': 'timestamp',
# ...
# }
# timezone for user's locale (e.g. 'America/New_York')
timezone = models.CharField(blank=True, default='Etc/UTC', max_length=255)
# user language and locale data (e.g. 'en_US')
locale = models.CharField(blank=True, max_length=255, default='en_US')
# whether the user has requested to deactivate their account
requested_deactivation = models.BooleanField(default=False)
affiliated_institutions = models.ManyToManyField('Institution', blank=True)
notifications_configured = DateTimeAwareJSONField(default=dict, blank=True)
# The time at which the user agreed to our updated ToS and Privacy Policy (GDPR, 25 May 2018)
accepted_terms_of_service = NonNaiveDateTimeField(null=True, blank=True)
objects = OSFUserManager()
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
def __repr__(self):
return '<OSFUser({0!r}) with guid {1!r}>'.format(self.username, self._id)
@property
def deep_url(self):
"""Used for GUID resolution."""
return '/profile/{}/'.format(self._primary_key)
@property
def url(self):
return '/{}/'.format(self._id)
@property
def absolute_url(self):
return urlparse.urljoin(website_settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
from website import util
return util.api_v2_url('users/{}/'.format(self._id))
@property
def api_url(self):
return '/api/v1/profile/{}/'.format(self._id)
@property
def profile_url(self):
return '/{}/'.format(self._id)
@property
def is_disabled(self):
return self.date_disabled is not None
@is_disabled.setter
def is_disabled(self, val):
"""Set whether or not this account has been disabled."""
if val and not self.date_disabled:
self.date_disabled = timezone.now()
elif val is False:
self.date_disabled = None
@property
def is_confirmed(self):
return bool(self.date_confirmed)
@property
def is_merged(self):
"""Whether or not this account has been merged into another account.
"""
return self.merged_by is not None
@property
def unconfirmed_emails(self):
# Handle when email_verifications field is None
email_verifications = self.email_verifications or {}
return [
each['email']
for each
in email_verifications.values()
]
@property
def social_links(self):
social_user_fields = {}
for key, val in self.social.items():
if val and key in self.SOCIAL_FIELDS:
if not isinstance(val, basestring):
social_user_fields[key] = val
else:
social_user_fields[key] = self.SOCIAL_FIELDS[key].format(val)
return social_user_fields
@property
def given_name_initial(self):
"""
The user's preferred initialization of their given name.
Some users with common names may choose to distinguish themselves from
their colleagues in this way. For instance, there could be two
well-known researchers in a single field named "Robert Walker".
"Walker, R" could then refer to either of them. "Walker, R.H." could
provide easy disambiguation.
NOTE: The internal representation for this should never end with a
period. "R" and "R.H" would be correct in the prior case, but
"R.H." would not.
"""
return self.given_name[0]
@property
def email(self):
if self.has_usable_username():
return self.username
else:
return None
@property
def all_tags(self):
"""Return a queryset containing all of this user's tags (incl. system tags)."""
# Tag's default manager only returns non-system tags, so we can't use self.tags
return Tag.all_tags.filter(osfuser=self)
@property
def system_tags(self):
"""The system tags associated with this node. This currently returns a list of string
names for the tags, for compatibility with v1. Eventually, we can just return the
QuerySet.
"""
return self.all_tags.filter(system=True).values_list('name', flat=True)
@property
def csl_given_name(self):
return utils.generate_csl_given_name(self.given_name, self.middle_names, self.suffix)
def csl_name(self, node_id=None):
# disabled users are set to is_registered = False but have a fullname
if self.is_registered or self.is_disabled:
name = self.fullname
else:
name = self.get_unclaimed_record(node_id)['name']
if self.family_name and self.given_name:
"""If the user has a family and given name, use those"""
return {
'family': self.family_name,
'given': self.csl_given_name,
}
else:
""" If the user doesn't autofill his family and given name """
parsed = utils.impute_names(name)
given_name = parsed['given']
middle_names = parsed['middle']
family_name = parsed['family']
suffix = parsed['suffix']
csl_given_name = utils.generate_csl_given_name(given_name, middle_names, suffix)
return {
'family': family_name,
'given': csl_given_name,
}
@property
def contributor_to(self):
return self.nodes.filter(is_deleted=False, type__in=['osf.node', 'osf.registration'])
@property
def visible_contributor_to(self):
return self.nodes.filter(is_deleted=False, contributor__visible=True, type__in=['osf.node', 'osf.registration'])
def set_unusable_username(self):
"""Sets username to an unusable value. Used for, e.g. for invited contributors
and merged users.
NOTE: This is necessary because Django does not allow the username column to be nullable.
"""
if self._id:
self.username = self._id
else:
self.username = str(uuid.uuid4())
return self.username
def has_usable_username(self):
return '@' in self.username
@property
def is_authenticated(self): # Needed for django compat
return True
@property
def is_anonymous(self):
return False
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_addon_names(self):
return []
# django methods
def get_full_name(self):
return self.fullname
def get_short_name(self):
return self.username
def __unicode__(self):
return self.get_short_name()
def __str__(self):
return self.get_short_name()
@property
def contributed(self):
return self.nodes.all()
@property
def can_be_merged(self):
"""The ability of the `merge_user` method to fully merge the user"""
return all((addon.can_be_merged for addon in self.get_addons()))
def merge_user(self, user):
"""Merge a registered user into this account. This user will be
a contributor on any project. if the registered user and this account
are both contributors of the same project. Then it will remove the
registered user and set this account to the highest permission of the two
and set this account to be visible if either of the two are visible on
the project.
:param user: A User object to be merged.
"""
# Attempt to prevent self merges which end up removing self as a contributor from all projects
if self == user:
raise ValueError('Cannot merge a user into itself')
# Fail if the other user has conflicts.
if not user.can_be_merged:
raise MergeConflictError('Users cannot be merged')
# Move over the other user's attributes
# TODO: confirm
for system_tag in user.system_tags.all():
self.add_system_tag(system_tag)
self.is_claimed = self.is_claimed or user.is_claimed
self.is_invited = self.is_invited or user.is_invited
self.is_superuser = self.is_superuser or user.is_superuser
self.is_staff = self.is_staff or user.is_staff
# copy over profile only if this user has no profile info
if user.jobs and not self.jobs:
self.jobs = user.jobs
if user.schools and not self.schools:
self.schools = user.schools
if user.social and not self.social:
self.social = user.social
unclaimed = user.unclaimed_records.copy()
unclaimed.update(self.unclaimed_records)
self.unclaimed_records = unclaimed
# - unclaimed records should be connected to only one user
user.unclaimed_records = {}
security_messages = user.security_messages.copy()
security_messages.update(self.security_messages)
self.security_messages = security_messages
notifications_configured = user.notifications_configured.copy()
notifications_configured.update(self.notifications_configured)
self.notifications_configured = notifications_configured
if not website_settings.RUNNING_MIGRATION:
for key, value in user.mailchimp_mailing_lists.iteritems():
# subscribe to each list if either user was subscribed
subscription = value or self.mailchimp_mailing_lists.get(key)
signals.user_merged.send(self, list_name=key, subscription=subscription)
# clear subscriptions for merged user
signals.user_merged.send(user, list_name=key, subscription=False, send_goodbye=False)
for target_id, timestamp in user.comments_viewed_timestamp.iteritems():
if not self.comments_viewed_timestamp.get(target_id):
self.comments_viewed_timestamp[target_id] = timestamp
elif timestamp > self.comments_viewed_timestamp[target_id]:
self.comments_viewed_timestamp[target_id] = timestamp
# Give old user's emails to self
user.emails.update(user=self)
for k, v in user.email_verifications.iteritems():
email_to_confirm = v['email']
if k not in self.email_verifications and email_to_confirm != user.username:
self.email_verifications[k] = v
user.email_verifications = {}
self.affiliated_institutions.add(*user.affiliated_institutions.values_list('pk', flat=True))
for service in user.external_identity:
for service_id in user.external_identity[service].iterkeys():
if not (
service_id in self.external_identity.get(service, '') and
self.external_identity[service][service_id] == 'VERIFIED'
):
# Prevent 'CREATE', merging user has already been created.
external = user.external_identity[service][service_id]
status = 'VERIFIED' if external == 'VERIFIED' else 'LINK'
if self.external_identity.get(service):
self.external_identity[service].update(
{service_id: status}
)
else:
self.external_identity[service] = {
service_id: status
}
user.external_identity = {}
# FOREIGN FIELDS
self.external_accounts.add(*user.external_accounts.values_list('pk', flat=True))
# - addons
# Note: This must occur before the merged user is removed as a
# contributor on the nodes, as an event hook is otherwise fired
# which removes the credentials.
for addon in user.get_addons():
user_settings = self.get_or_add_addon(addon.config.short_name)
user_settings.merge(addon)
user_settings.save()
# - projects where the user was a contributor
for node in user.contributed:
# Skip quickfiles
if node.is_quickfiles:
continue
# if both accounts are contributor of the same project
if node.is_contributor(self) and node.is_contributor(user):
user_permissions = node.get_permissions(user)
self_permissions = node.get_permissions(self)
permissions = max([user_permissions, self_permissions])
node.set_permissions(user=self, permissions=permissions)
visible1 = self._id in node.visible_contributor_ids
visible2 = user._id in node.visible_contributor_ids
if visible1 != visible2:
node.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
node.contributor_set.filter(user=user).delete()
else:
node.contributor_set.filter(user=user).update(user=self)
node.save()
# Skip bookmark collections
user.collection_set.exclude(is_bookmark_collection=True).update(creator=self)
from osf.models import QuickFilesNode
from osf.models import BaseFileNode
# - projects where the user was the creator
user.nodes_created.exclude(type=QuickFilesNode._typedmodels_type).update(creator=self)
# - file that the user has checked_out, import done here to prevent import error
for file_node in BaseFileNode.files_checked_out(user=user):
file_node.checkout = self
file_node.save()
# - move files in the merged user's quickfiles node, checking for name conflicts
from addons.osfstorage.models import OsfStorageFileNode
primary_quickfiles = QuickFilesNode.objects.get(creator=self)
merging_user_quickfiles = QuickFilesNode.objects.get(creator=user)
files_in_merging_user_quickfiles = merging_user_quickfiles.files.filter(type='osf.osfstoragefile')
for merging_user_file in files_in_merging_user_quickfiles:
if OsfStorageFileNode.objects.filter(node=primary_quickfiles, name=merging_user_file.name).exists():
digit = 1
split_filename = splitext(merging_user_file.name)
name_without_extension = split_filename[0]
extension = split_filename[1]
found_digit_in_parens = re.findall('(?<=\()(\d)(?=\))', name_without_extension)
if found_digit_in_parens:
found_digit = int(found_digit_in_parens[0])
digit = found_digit + 1
name_without_extension = name_without_extension.replace('({})'.format(found_digit), '').strip()
new_name_format = '{} ({}){}'
new_name = new_name_format.format(name_without_extension, digit, extension)
# check if new name conflicts, update til it does not (try up to 1000 times)
rename_count = 0
while OsfStorageFileNode.objects.filter(node=primary_quickfiles, name=new_name).exists():
digit += 1
new_name = new_name_format.format(name_without_extension, digit, extension)
rename_count += 1
if rename_count >= MAX_QUICKFILES_MERGE_RENAME_ATTEMPTS:
raise MaxRetriesError('Maximum number of rename attempts has been reached')
merging_user_file.name = new_name
merging_user_file.save()
merging_user_file.node = primary_quickfiles
merging_user_file.save()
# finalize the merge
remove_sessions_for_user(user)
# - username is set to the GUID so the merging user can set it primary
# in the future (note: it cannot be set to None due to non-null constraint)
user.set_unusable_username()
user.set_unusable_password()
user.verification_key = None
user.osf_mailing_lists = {}
user.merged_by = self
user.save()
def disable_account(self):
"""
Disables user account, making is_disabled true, while also unsubscribing user
from mailchimp emails, remove any existing sessions.
Ported from framework/auth/core.py
"""
from website import mailchimp_utils
from framework.auth import logout
try:
mailchimp_utils.unsubscribe_mailchimp(
list_name=website_settings.MAILCHIMP_GENERAL_LIST,
user_id=self._id,
username=self.username
)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
pass
except mailchimp_utils.mailchimp.InvalidApiKeyError:
if not website_settings.ENABLE_EMAIL_SUBSCRIPTIONS:
pass
else:
raise
except mailchimp_utils.mailchimp.EmailNotExistsError:
pass
# Call to `unsubscribe` above saves, and can lead to stale data
self.reload()
self.is_disabled = True
# we must call both methods to ensure the current session is cleared and all existing
# sessions are revoked.
req = get_current_request()
if isinstance(req, FlaskRequest):
logout()
remove_sessions_for_user(self)
def update_is_active(self):
"""Update ``is_active`` to be consistent with the fields that
it depends on.
"""
# The user can log in if they have set a password OR
# have a verified external ID, e.g an ORCID
can_login = self.has_usable_password() or (
'VERIFIED' in sum([each.values() for each in self.external_identity.values()], [])
)
self.is_active = (
self.is_registered and
self.is_confirmed and
can_login and
not self.is_merged and
not self.is_disabled
)
# Overrides BaseModel
def save(self, *args, **kwargs):
self.update_is_active()
self.username = self.username.lower().strip() if self.username else None
dirty_fields = set(self.get_dirty_fields(check_relationship=True))
ret = super(OSFUser, self).save(*args, **kwargs)
if self.SEARCH_UPDATE_FIELDS.intersection(dirty_fields) and self.is_confirmed:
self.update_search()
self.update_search_nodes_contributors()
if 'fullname' in dirty_fields:
from osf.models.quickfiles import get_quickfiles_project_title, QuickFilesNode
quickfiles = QuickFilesNode.objects.filter(creator=self).first()
if quickfiles:
quickfiles.title = get_quickfiles_project_title(self)
quickfiles.save()
return ret
# Legacy methods
@classmethod
def create(cls, username, password, fullname, accepted_terms_of_service=None):
validate_email(username) # Raises BlacklistedEmailError if spam address
user = cls(
username=username,
fullname=fullname,
accepted_terms_of_service=accepted_terms_of_service
)
user.update_guessed_names()
user.set_password(password)
return user
def set_password(self, raw_password, notify=True):
"""Set the password for this user to the hash of ``raw_password``.
If this is a new user, we're done. If this is a password change,
then email the user about the change and clear all the old sessions
so that users will have to log in again with the new password.
:param raw_password: the plaintext value of the new password
:param notify: Only meant for unit tests to keep extra notifications from being sent
:rtype: list
:returns: Changed fields from the user save
"""
had_existing_password = bool(self.has_usable_password() and self.is_confirmed)
if self.username == raw_password:
raise ChangePasswordError(['Password cannot be the same as your email address'])
super(OSFUser, self).set_password(raw_password)
if had_existing_password and notify:
mails.send_mail(
to_addr=self.username,
mail=mails.PASSWORD_RESET,
mimetype='html',
user=self,
can_change_preferences=False,
osf_contact_email=website_settings.OSF_CONTACT_EMAIL
)
remove_sessions_for_user(self)
@classmethod
def create_unconfirmed(cls, username, password, fullname, external_identity=None,
do_confirm=True, campaign=None, accepted_terms_of_service=None):
"""Create a new user who has begun registration but needs to verify
their primary email address (username).
"""
user = cls.create(username, password, fullname, accepted_terms_of_service)
user.add_unconfirmed_email(username, external_identity=external_identity)
user.is_registered = False
if external_identity:
user.external_identity.update(external_identity)
if campaign:
# needed to prevent cirular import
from framework.auth.campaigns import system_tag_for_campaign # skipci
# User needs to be saved before adding system tags (due to m2m relationship)
user.save()
user.add_system_tag(system_tag_for_campaign(campaign))
return user
@classmethod
def create_confirmed(cls, username, password, fullname):
user = cls.create(username, password, fullname)
user.is_registered = True
user.is_claimed = True
user.save() # Must save before using auto_now_add field
user.date_confirmed = user.date_registered
user.emails.create(address=username.lower().strip())
return user
def get_unconfirmed_email_for_token(self, token):
"""Return email if valid.
:rtype: bool
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: InvalidTokenError if trying to access a token that is invalid.
"""
if token not in self.email_verifications:
raise InvalidTokenError
verification = self.email_verifications[token]
# Not all tokens are guaranteed to have expiration dates
if (
'expiration' in verification and
verification['expiration'].replace(tzinfo=pytz.utc) < timezone.now()
):
raise ExpiredTokenError
return verification['email']
def get_unconfirmed_emails_exclude_external_identity(self):
"""Return a list of unconfirmed emails that are not related to external identity."""
unconfirmed_emails = []
if self.email_verifications:
for token, value in self.email_verifications.iteritems():
if not value.get('external_identity'):
unconfirmed_emails.append(value.get('email'))
return unconfirmed_emails
@property
def unconfirmed_email_info(self):
"""Return a list of dictionaries containing information about each of this
user's unconfirmed emails.
"""
unconfirmed_emails = []
email_verifications = self.email_verifications or []
for token in email_verifications:
if self.email_verifications[token].get('confirmed', False):
try:
user_merge = OSFUser.objects.get(emails__address__iexact=self.email_verifications[token]['email'])
except OSFUser.DoesNotExist:
user_merge = False
unconfirmed_emails.append({'address': self.email_verifications[token]['email'],
'token': token,
'confirmed': self.email_verifications[token]['confirmed'],
'user_merge': user_merge.email if user_merge else False})
return unconfirmed_emails
def clean_email_verifications(self, given_token=None):
email_verifications = deepcopy(self.email_verifications or {})
for token in self.email_verifications or {}:
try:
self.get_unconfirmed_email_for_token(token)
except (KeyError, ExpiredTokenError):
email_verifications.pop(token)
continue
if token == given_token:
email_verifications.pop(token)
self.email_verifications = email_verifications
def verify_password_token(self, token):
"""
Verify that the password reset token for this user is valid.
:param token: the token in verification key
:return `True` if valid, otherwise `False`
"""
if token and self.verification_key_v2:
try:
return (self.verification_key_v2['token'] == token and
self.verification_key_v2['expires'] > timezone.now())
except AttributeError:
return False
return False
def verify_claim_token(self, token, project_id):
"""Return whether or not a claim token is valid for this user for
a given node which they were added as a unregistered contributor for.
"""
try:
record = self.get_unclaimed_record(project_id)
except ValueError: # No unclaimed record for given pid
return False
return record['token'] == token
@classmethod
def create_unregistered(cls, fullname, email=None):
"""Create a new unregistered user.
"""
user = cls(
username=email,
fullname=fullname,
is_invited=True,
is_registered=False,
)
if not email:
user.set_unusable_username()
user.set_unusable_password()
user.update_guessed_names()
return user
def update_guessed_names(self):
"""Updates the CSL name fields inferred from the the full name.
"""
parsed = impute_names(self.fullname)
self.given_name = parsed['given']
self.middle_names = parsed['middle']
self.family_name = parsed['family']
self.suffix = parsed['suffix']
def add_unconfirmed_email(self, email, expiration=None, external_identity=None):
"""
Add an email verification token for a given email.
:param email: the email to confirm
:param email: overwrite default expiration time
:param external_identity: the user's external identity
:return: a token
:raises: ValueError if email already confirmed, except for login through external idp.
"""
# Note: This is technically not compliant with RFC 822, which requires
# that case be preserved in the "local-part" of an address. From
# a practical standpoint, the vast majority of email servers do
# not preserve case.
# ref: https://tools.ietf.org/html/rfc822#section-6
email = email.lower().strip()
with reraise_django_validation_errors():
validate_email(email)
if not external_identity and self.emails.filter(address=email).exists():
raise ValueError('Email already confirmed to this user.')
# If the unconfirmed email is already present, refresh the token
if email in self.unconfirmed_emails:
self.remove_unconfirmed_email(email)
verification_key = generate_verification_key(verification_type='confirm')
# handle when email_verifications is None
if not self.email_verifications:
self.email_verifications = {}
self.email_verifications[verification_key['token']] = {
'email': email,
'confirmed': False,
'expiration': expiration if expiration else verification_key['expires'],
'external_identity': external_identity,
}
return verification_key['token']
def remove_unconfirmed_email(self, email):
"""Remove an unconfirmed email addresses and their tokens."""
for token, value in self.email_verifications.iteritems():
if value.get('email') == email:
del self.email_verifications[token]
return True
return False
def remove_email(self, email):
"""Remove a confirmed email"""
if email == self.username:
raise PermissionsError("Can't remove primary email")
if self.emails.filter(address=email):
self.emails.filter(address=email).delete()
signals.user_email_removed.send(self, email=email, osf_contact_email=website_settings.OSF_CONTACT_EMAIL)
def get_confirmation_token(self, email, force=False, renew=False):
"""Return the confirmation token for a given email.
:param str email: The email to get the token for.
:param bool force: If an expired token exists for the given email, generate a new one and return it.
:param bool renew: Generate a new token and return it.
:return Return the confirmation token.
:raises: ExpiredTokenError if trying to access a token that is expired and force=False.
:raises: KeyError if there no token for the email.
"""
# TODO: Refactor "force" flag into User.get_or_add_confirmation_token
for token, info in self.email_verifications.items():
if info['email'].lower() == email.lower():
# Old records will not have an expiration key. If it's missing,
# assume the token is expired
expiration = info.get('expiration')
if renew:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
if not expiration or (expiration and expiration < timezone.now()):
if not force:
raise ExpiredTokenError('Token for email "{0}" is expired'.format(email))
else:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
return token
raise KeyError('No confirmation token for email "{0}"'.format(email))
def get_confirmation_url(self, email,
external=True,
force=False,
renew=False,
external_id_provider=None,
destination=None):
"""Return the confirmation url for a given email.
:param email: The email to confirm.
:param external: Use absolute or relative url.
:param force: If an expired token exists for the given email, generate a new one and return it.
:param renew: Generate a new token and return it.
:param external_id_provider: The external identity provider that authenticates the user.
:param destination: The destination page to redirect after confirmation
:return: Return the confirmation url.
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: KeyError if there is no token for the email.
"""
base = website_settings.DOMAIN if external else '/'
token = self.get_confirmation_token(email, force=force, renew=renew)
external = 'external/' if external_id_provider else ''
destination = '?{}'.format(urllib.urlencode({'destination': destination})) if destination else ''
return '{0}confirm/{1}{2}/{3}/{4}'.format(base, external, self._primary_key, token, destination)
def register(self, username, password=None, accepted_terms_of_service=None):
"""Registers the user.
"""
self.username = username
if password:
self.set_password(password)
if not self.emails.filter(address=username):
self.emails.create(address=username)
self.is_registered = True
self.is_claimed = True
self.date_confirmed = timezone.now()
if accepted_terms_of_service:
self.accepted_terms_of_service = timezone.now()
self.update_search()
self.update_search_nodes()
# Emit signal that a user has confirmed
signals.user_confirmed.send(self)
return self
def confirm_email(self, token, merge=False):
"""Confirm the email address associated with the token"""
email = self.get_unconfirmed_email_for_token(token)
# If this email is confirmed on another account, abort
try:
if check_select_for_update():
user_to_merge = OSFUser.objects.filter(emails__address=email).select_for_update().get()
else:
user_to_merge = OSFUser.objects.get(emails__address=email)
except OSFUser.DoesNotExist:
user_to_merge = None
if user_to_merge and merge:
self.merge_user(user_to_merge)
elif user_to_merge:
raise MergeConfirmedRequiredError(
'Merge requires confirmation',
user=self,
user_to_merge=user_to_merge,
)
# If another user has this email as its username, get it
try:
unregistered_user = OSFUser.objects.exclude(guids___id=self._id, guids___id__isnull=False).get(username=email)
except OSFUser.DoesNotExist:
unregistered_user = None
if unregistered_user:
self.merge_user(unregistered_user)
self.save()
unregistered_user.username = None
if not self.emails.filter(address=email).exists():
self.emails.create(address=email)
# Complete registration if primary email
if email.lower() == self.username.lower():
self.register(self.username)
self.date_confirmed = timezone.now()
# Revoke token
del self.email_verifications[token]
# TODO: We can't assume that all unclaimed records are now claimed.
# Clear unclaimed records, so user's name shows up correctly on
# all projects
self.unclaimed_records = {}
self.save()
self.update_search_nodes()
return True
def update_search(self):
from website.search.search import update_user
update_user(self)
def update_search_nodes_contributors(self):
"""
Bulk update contributor name on all nodes on which the user is
a contributor.
:return:
"""
from website.search import search
search.update_contributors_async(self.id)
def update_search_nodes(self):
"""Call `update_search` on all nodes on which the user is a
contributor. Needed to add self to contributor lists in search upon
registration or claiming.
"""
for node in self.contributor_to:
node.update_search()
def update_date_last_login(self):
self.date_last_login = timezone.now()
def get_summary(self, formatter='long'):
return {
'user_fullname': self.fullname,
'user_profile_url': self.profile_url,
'user_display_name': name_formatters[formatter](self),
'user_is_claimed': self.is_claimed
}
def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
Source: https://github.com/django/django/blob/master/django/contrib/auth/base_user.py#L104
"""
def setter(raw_password):
self.set_password(raw_password, notify=False)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=['password'])
return check_password(raw_password, self.password, setter)
def change_password(self, raw_old_password, raw_new_password, raw_confirm_password):
"""Change the password for this user to the hash of ``raw_new_password``."""
raw_old_password = (raw_old_password or '').strip()
raw_new_password = (raw_new_password or '').strip()
raw_confirm_password = (raw_confirm_password or '').strip()
# TODO: Move validation to set_password
issues = []
if not self.check_password(raw_old_password):
self.old_password_invalid_attempts += 1
self.change_password_last_attempt = timezone.now()
issues.append('Old password is invalid')
elif raw_old_password == raw_new_password:
issues.append('Password cannot be the same')
elif raw_new_password == self.username:
issues.append('Password cannot be the same as your email address')
if not raw_old_password or not raw_new_password or not raw_confirm_password:
issues.append('Passwords cannot be blank')
elif len(raw_new_password) < 8:
issues.append('Password should be at least eight characters')
elif len(raw_new_password) > 256:
issues.append('Password should not be longer than 256 characters')
if raw_new_password != raw_confirm_password:
issues.append('Password does not match the confirmation')
if issues:
raise ChangePasswordError(issues)
self.set_password(raw_new_password)
self.reset_old_password_invalid_attempts()
def reset_old_password_invalid_attempts(self):
self.old_password_invalid_attempts = 0
def profile_image_url(self, size=None):
"""A generalized method for getting a user's profile picture urls.
We may choose to use some service other than gravatar in the future,
and should not commit ourselves to using a specific service (mostly
an API concern).
As long as we use gravatar, this is just a proxy to User.gravatar_url
"""
return self._gravatar_url(size)
def _gravatar_url(self, size):
return filters.gravatar(
self,
use_ssl=True,
size=size
)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
def display_full_name(self, node=None):
"""Return the full name , as it would display in a contributor list for a
given node.
NOTE: Unclaimed users may have a different name for different nodes.
"""
if node:
unclaimed_data = self.unclaimed_records.get(str(node._id), None)
if unclaimed_data:
return unclaimed_data['name']
return self.fullname
def add_system_tag(self, tag):
if not isinstance(tag, Tag):
tag_instance, created = Tag.all_tags.get_or_create(name=tag.lower(), system=True)
else:
tag_instance = tag
if not tag_instance.system:
raise ValueError('Non-system tag passed to add_system_tag')
if not self.all_tags.filter(id=tag_instance.id).exists():
self.tags.add(tag_instance)
return tag_instance
def get_recently_added(self):
return (
each.contributor
for each in self.recentlyaddedcontributor_set.order_by('-date_added')
)
def _projects_in_common_query(self, other_user):
sqs = Contributor.objects.filter(node=models.OuterRef('pk'), user=other_user)
return (self.nodes
.filter(is_deleted=False)
.exclude(type='osf.collection')
.annotate(contrib=models.Exists(sqs))
.filter(contrib=True))
def get_projects_in_common(self, other_user):
"""Returns either a collection of "shared projects" (projects that both users are contributors for)
or just their primary keys
"""
query = self._projects_in_common_query(other_user)
return set(query.all())
def n_projects_in_common(self, other_user):
"""Returns number of "shared projects" (projects that both users are contributors for)"""
return self._projects_in_common_query(other_user).count()
def add_unclaimed_record(self, claim_origin, referrer, given_name, email=None):
"""Add a new project entry in the unclaimed records dictionary.
:param object claim_origin: Object this unclaimed user was added to. currently `Node` or `Provider`
:param User referrer: User who referred this user.
:param str given_name: The full name that the referrer gave for this user.
:param str email: The given email address.
:returns: The added record
"""
from osf.models.provider import AbstractProvider
if isinstance(claim_origin, AbstractProvider):
if not bool(get_perms(referrer, claim_origin)):
raise PermissionsError(
'Referrer does not have permission to add a moderator to provider {0}'.format(claim_origin._id)
)
else:
if not claim_origin.can_edit(user=referrer):
raise PermissionsError(
'Referrer does not have permission to add a contributor to project {0}'.format(claim_origin._primary_key)
)
pid = str(claim_origin._id)
referrer_id = str(referrer._id)
if email:
clean_email = email.lower().strip()
else:
clean_email = None
verification_key = generate_verification_key(verification_type='claim')
try:
record = self.unclaimed_records[claim_origin._id]
except KeyError:
record = None
if record:
del record
record = {
'name': given_name,
'referrer_id': referrer_id,
'token': verification_key['token'],
'expires': verification_key['expires'],
'email': clean_email,
}
self.unclaimed_records[pid] = record
return record
def get_unclaimed_record(self, project_id):
"""Get an unclaimed record for a given project_id.
:raises: ValueError if there is no record for the given project.
"""
try:
return self.unclaimed_records[project_id]
except KeyError: # reraise as ValueError
raise ValueError('No unclaimed record for user {self._id} on node {project_id}'
.format(**locals()))
def get_claim_url(self, project_id, external=False):
"""Return the URL that an unclaimed user should use to claim their
account. Return ``None`` if there is no unclaimed_record for the given
project ID.
:param project_id: The project ID for the unclaimed record
:raises: ValueError if a record doesn't exist for the given project ID
:rtype: dict
:returns: The unclaimed record for the project
"""
uid = self._primary_key
base_url = website_settings.DOMAIN if external else '/'
unclaimed_record = self.get_unclaimed_record(project_id)
token = unclaimed_record['token']
return '{base_url}user/{uid}/{project_id}/claim/?token={token}'\
.format(**locals())
def is_affiliated_with_institution(self, institution):
"""Return if this user is affiliated with ``institution``."""
return self.affiliated_institutions.filter(id=institution.id).exists()
def update_affiliated_institutions_by_email_domain(self):
"""
Append affiliated_institutions by email domain.
:return:
"""
try:
email_domains = [email.split('@')[1].lower() for email in self.emails.values_list('address', flat=True)]
insts = Institution.objects.filter(email_domains__overlap=email_domains)
if insts.exists():
self.affiliated_institutions.add(*insts)
except IndexError:
pass
def remove_institution(self, inst_id):
try:
inst = self.affiliated_institutions.get(_id=inst_id)
except Institution.DoesNotExist:
return False
else:
self.affiliated_institutions.remove(inst)
return True
def get_activity_points(self):
return analytics.get_total_activity_count(self._id)
def get_or_create_cookie(self, secret=None):
"""Find the cookie for the given user
Create a new session if no cookie is found
:param str secret: The key to sign the cookie with
:returns: The signed cookie
"""
secret = secret or settings.SECRET_KEY
user_session = Session.objects.filter(
data__auth_user_id=self._id
).order_by(
'-modified'
).first()
if not user_session:
user_session = Session(data={
'auth_user_id': self._id,
'auth_user_username': self.username,
'auth_user_fullname': self.fullname,
})
user_session.save()
signer = itsdangerous.Signer(secret)
return signer.sign(user_session._id)
@classmethod
def from_cookie(cls, cookie, secret=None):
"""Attempt to load a user from their signed cookie
:returns: None if a user cannot be loaded else User
"""
if not cookie:
return None
secret = secret or settings.SECRET_KEY
try:
token = itsdangerous.Signer(secret).unsign(cookie)
except itsdangerous.BadSignature:
return None
user_session = Session.load(token)
if user_session is None:
return None
return cls.load(user_session.data.get('auth_user_id'))
def get_node_comment_timestamps(self, target_id):
""" Returns the timestamp for when comments were last viewed on a node, file or wiki.
"""
default_timestamp = dt.datetime(1970, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
return self.comments_viewed_timestamp.get(target_id, default_timestamp)
def gdpr_delete(self):
"""
This function does not remove the user object reference from our database, but it does disable the account and
remove identifying in a manner compliant with GDPR guidelines.
Follows the protocol described in
https://openscience.atlassian.net/wiki/spaces/PRODUC/pages/482803755/GDPR-Related+protocols
"""
from osf.models import PreprintService, AbstractNode
user_nodes = self.nodes.all()
# Validates the user isn't trying to delete things they deliberately made public.
if user_nodes.filter(type='osf.registration').exists():
raise UserStateError('You cannot delete this user because they have one or more registrations.')
if PreprintService.objects.filter(node___contributors=self, ever_public=True).exists():
raise UserStateError('You cannot delete this user because they have one or more preprints.')
# Validates that the user isn't trying to delete things nodes they are the only admin on.
personal_nodes = AbstractNode.objects.annotate(contrib_count=Count('_contributors')).filter(contrib_count__lte=1).filter(contributor__user=self)
shared_nodes = user_nodes.exclude(id__in=personal_nodes.values_list('id'))
for node in shared_nodes.exclude(type='osf.quickfilesnode'):
alternate_admins = Contributor.objects.select_related('user').filter(
node=node,
user__is_active=True,
admin=True,
).exclude(user=self)
if not alternate_admins:
raise UserStateError(
'You cannot delete node {} because it would be a node with contributors, but with no admin.'.format(
node._id))
for addon in node.get_addons():
if addon.short_name not in ('osfstorage', 'wiki') and addon.user_settings and addon.user_settings.owner.id == self.id:
raise UserStateError('You cannot delete this user because they '
'have an external account for {} attached to Node {}, '
'which has other contributors.'.format(addon.short_name, node._id))
for node in shared_nodes.all():
logger.info('Removing {self._id} as a contributor to node (pk:{node_id})...'.format(self=self, node_id=node.pk))
node.remove_contributor(self, auth=Auth(self), log=False)
# This is doesn't to remove identifying info, but ensures other users can't see the deleted user's profile etc.
self.disable_account()
# delete all personal nodes (one contributor), bookmarks, quickfiles etc.
for node in personal_nodes.all():
logger.info('Soft-deleting node (pk: {node_id})...'.format(node_id=node.pk))
node.remove_node(auth=Auth(self))
logger.info('Clearing identifying information...')
# This removes identifying info
# hard-delete all emails associated with the user
self.emails.all().delete()
# Change name to "Deleted user" so that logs render properly
self.fullname = 'Deleted user'
self.set_unusable_username()
self.set_unusable_password()
self.given_name = ''
self.family_name = ''
self.middle_names = ''
self.mailchimp_mailing_lists = {}
self.osf_mailing_lists = {}
self.verification_key = None
self.suffix = ''
self.jobs = []
self.schools = []
self.social = []
self.unclaimed_records = {}
self.notifications_configured = {}
# Scrub all external accounts
if self.external_accounts.exists():
logger.info('Clearing identifying information from external accounts...')
for account in self.external_accounts.all():
account.oauth_key = None
account.oauth_secret = None
account.refresh_token = None
account.provider_name = 'gdpr-deleted'
account.display_name = None
account.profile_url = None
account.save()
self.external_accounts.clear()
self.external_identity = {}
self.deleted = timezone.now()
class Meta:
# custom permissions for use in the OSF Admin App
permissions = (
('view_osfuser', 'Can view user details'),
)
@receiver(post_save, sender=OSFUser)
def add_default_user_addons(sender, instance, created, **kwargs):
if created:
for addon in website_settings.ADDONS_AVAILABLE:
if 'user' in addon.added_default:
instance.add_addon(addon.short_name)
@receiver(post_save, sender=OSFUser)
def create_bookmark_collection(sender, instance, created, **kwargs):
if created:
new_bookmark_collection(instance)
# Allows this hook to be easily mock.patched
def _create_quickfiles_project(instance):
from osf.models.quickfiles import QuickFilesNode
QuickFilesNode.objects.create_for_user(instance)
@receiver(post_save, sender=OSFUser)
def create_quickfiles_project(sender, instance, created, **kwargs):
if created:
_create_quickfiles_project(instance)
|
apache-2.0
| -4,507,992,747,545,736,000
| 38.744329
| 152
| 0.62157
| false
| 4.235967
| false
| false
| false
|
NMTHydro/SWACodingMeeting
|
src/meeting3/be.py
|
1
|
1317
|
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
def get_file_contents(p, root=None):
if root is None:
root = os.path.expanduser('~')
pp = os.path.join(root, p)
with open(pp, 'r') as rfile:
return [line.strip() for line in rfile]
def get_file_contents2(name):
root = os.path.expanduser('~')
pp = os.path.join(root, name)
with open(pp, 'r') as rfile:
return rfile.readlines()
# ============= EOF =============================================
|
apache-2.0
| -8,939,215,115,441,067,000
| 34.594595
| 81
| 0.538345
| false
| 4.39
| false
| false
| false
|
postlund/home-assistant
|
homeassistant/components/konnected/binary_sensor.py
|
1
|
2668
|
"""Support for wired binary sensors attached to a Konnected device."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_NAME,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as KONNECTED_DOMAIN, SIGNAL_SENSOR_UPDATE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up binary sensors attached to a Konnected device from a config entry."""
data = hass.data[KONNECTED_DOMAIN]
device_id = config_entry.data["id"]
sensors = [
KonnectedBinarySensor(device_id, pin_num, pin_data)
for pin_num, pin_data in data[CONF_DEVICES][device_id][
CONF_BINARY_SENSORS
].items()
]
async_add_entities(sensors)
class KonnectedBinarySensor(BinarySensorDevice):
"""Representation of a Konnected binary sensor."""
def __init__(self, device_id, zone_num, data):
"""Initialize the Konnected binary sensor."""
self._data = data
self._device_id = device_id
self._zone_num = zone_num
self._state = self._data.get(ATTR_STATE)
self._device_class = self._data.get(CONF_TYPE)
self._unique_id = f"{device_id}-{zone_num}"
self._name = self._data.get(CONF_NAME)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(KONNECTED_DOMAIN, self._device_id)},
}
async def async_added_to_hass(self):
"""Store entity_id and register state change callback."""
self._data[ATTR_ENTITY_ID] = self.entity_id
async_dispatcher_connect(
self.hass, SIGNAL_SENSOR_UPDATE.format(self.entity_id), self.async_set_state
)
@callback
def async_set_state(self, state):
"""Update the sensor's state."""
self._state = state
self.async_schedule_update_ha_state()
|
apache-2.0
| 1,640,747,530,150,869,800
| 28.644444
| 88
| 0.631184
| false
| 3.883552
| false
| false
| false
|
pgmillon/ansible
|
lib/ansible/module_utils/basic.py
|
1
|
107336
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import errno
import datetime
import grp
import fcntl
import locale
import os
import pwd
import platform
import re
import select
import shlex
import shutil
import signal
import stat
import subprocess
import sys
import tempfile
import time
import traceback
import types
from collections import deque
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common.text.converters import (
jsonify,
container_to_bytes as json_dict_unicode_to_bytes,
container_to_text as json_dict_bytes_to_unicode,
)
from ansible.module_utils.common.text.formatters import (
lenient_lowercase,
bytes_to_human,
human_to_bytes,
SIZE_RANGES,
)
try:
from ansible.module_utils.common._json_compat import json
except ImportError as e:
print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
_EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
get_distribution_version,
get_platform_subclass,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.common.parameters import (
handle_aliases,
list_deprecations,
list_no_log_values,
PASS_VARS,
PASS_BOOLS,
)
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils.common.validation import (
check_missing_parameters,
check_mutually_exclusive,
check_required_arguments,
check_required_by,
check_required_if,
check_required_one_of,
check_required_together,
count_terms,
check_type_bool,
check_type_bits,
check_type_bytes,
check_type_float,
check_type_int,
check_type_jsonarg,
check_type_list,
check_type_dict,
check_type_path,
check_type_raw,
check_type_str,
safe_eval,
)
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
#
# Deprecated functions
#
def get_platform():
'''
**Deprecated** Use :py:func:`platform.system` directly.
:returns: Name of the platform the module is running on in a native string
Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
the result of calling :py:func:`platform.system`.
'''
return platform.system()
# End deprecated functions
#
# Compat shims
#
def load_platform_subclass(cls, *args, **kwargs):
"""**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
platform_cls = get_platform_subclass(cls)
return super(cls, platform_cls).__new__(platform_cls)
def get_all_subclasses(cls):
"""**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
return list(_get_all_subclasses(cls))
# End compat shims
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def missing_required_lib(library, reason=None, url=None):
hostname = platform.node()
msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
if reason:
msg += " This is required %s." % reason
if url:
msg += " See %s for more info." % url
return msg + " Please read module documentation and install in the appropriate location"
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, required_by=None):
'''
Common code for quickly building an ansible module in Python
(although you can write modules with anything that can return JSON).
See :ref:`developing_modules_general` for a general introduction
and :ref:`developing_program_flow_modules` for more detailed explanation.
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.required_by = required_by
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self._string_conversion_action = ''
self.aliases = {}
self._legal_inputs = []
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except (ValueError, TypeError) as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._check_required_by(required_by)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
if self._remote_tmp is not None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if basedir is not None and not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except Exception:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno in (errno.EPERM, errno.EROFS): # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# this uses exceptions as it happens before we can safely call fail_json
alias_results, self._legal_inputs = handle_aliases(spec, param)
return alias_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
self.no_log_values.update(list_no_log_values(spec, param))
self._deprecations.extend(list_deprecations(spec, param))
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for k in list(param.keys()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
for k in PASS_VARS:
# handle setting internal properties from internal ansible vars
param_key = '_ansible_%s' % k
if param_key in param:
if k in PASS_BOOLS:
setattr(self, PASS_VARS[k][0], self.boolean(param[param_key]))
else:
setattr(self, PASS_VARS[k][0], param[param_key])
# clean up internal top level params:
if param_key in self.params:
del self.params[param_key]
else:
# use defaults if not already set
if not hasattr(self, PASS_VARS[k][0]):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
if param is None:
param = self.params
return count_terms(check, param)
def _check_mutually_exclusive(self, spec, param=None):
if param is None:
param = self.params
try:
check_mutually_exclusive(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_one_of(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_together(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_by(self, spec, param=None):
if spec is None:
return
if param is None:
param = self.params
try:
check_required_by(spec, param)
except TypeError as e:
self.fail_json(msg=to_native(e))
def _check_required_arguments(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
try:
check_required_arguments(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
try:
check_required_if(spec, param)
except TypeError as e:
msg = to_native(e)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
return safe_eval(value, locals, include_exceptions)
def _check_type_str(self, value):
opts = {
'error': False,
'warn': False,
'ignore': True
}
# Ignore, warn, or error when converting to a string.
allow_conversion = opts.get(self._string_conversion_action, True)
try:
return check_type_str(value, allow_conversion)
except TypeError:
common_msg = 'quote the entire value to ensure it does not change.'
if self._string_conversion_action == 'error':
msg = common_msg.capitalize()
raise TypeError(to_native(msg))
elif self._string_conversion_action == 'warn':
msg = ('The value {0!r} (type {0.__class__.__name__}) in a string field was converted to {1!r} (type string). '
'If this does not look like what you expect, {2}').format(value, to_text(value), common_msg)
self.warn(to_native(msg))
return to_native(value, errors='surrogate_or_strict')
def _check_type_list(self, value):
return check_type_list(value)
def _check_type_dict(self, value):
return check_type_dict(value)
def _check_type_bool(self, value):
return check_type_bool(value)
def _check_type_int(self, value):
return check_type_int(value)
def _check_type_float(self, value):
return check_type_float(value)
def _check_type_path(self, value):
return check_type_path(value)
def _check_type_jsonarg(self, value):
return check_type_jsonarg(value)
def _check_type_raw(self, value):
return check_type_raw(value)
def _check_type_bytes(self, value):
return check_type_bytes(value)
def _check_type_bits(self, value):
return check_type_bits(value)
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._check_required_by(v.get('required_by', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _get_wanted_type(self, wanted, k):
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
return type_checker, wanted
def _handle_elements(self, wanted, param, values):
type_checker, wanted_name = self._get_wanted_type(wanted, param)
validated_params = []
for value in values:
try:
validated_params.append(type_checker(value))
except (TypeError, ValueError) as e:
msg = "Elements value for option %s" % param
if self._options_context:
msg += " found in '%s'" % " -> ".join(self._options_context)
msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e))
self.fail_json(msg=msg)
return validated_params
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
type_checker, wanted_name = self._get_wanted_type(wanted, k)
try:
param[k] = type_checker(value)
wanted_elements = v.get('elements', None)
if wanted_elements:
if wanted != 'list' or not isinstance(param[k], list):
msg = "Invalid type %s for option '%s'" % (wanted_name, param)
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += ", elements value check is supported only with 'list' type"
self.fail_json(msg=msg)
param[k] = self._handle_elements(wanted_elements, k, param[k])
except (TypeError, ValueError) as e:
msg = "argument %s is of type %s" % (k, type(value))
if self._options_context:
msg += " found in '%s'." % " -> ".join(self._options_context)
msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
self.fail_json(msg=msg)
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except Exception:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except Exception:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
Find system executable in PATH.
:param arg: The executable to find.
:param required: if executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
:returns: if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg, required, opt_dirs)
except ValueError as e:
self.fail_json(msg=to_text(e))
return bin_path
def boolean(self, arg):
'''Convert the argument to a boolean'''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
elif isinstance(d, Mapping):
self.deprecate(d['msg'], version=d.get('version', None))
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
if not required_params:
return
try:
check_missing_parameters(self.params, required_params)
except TypeError as e:
self.fail_json(msg=to_native(e))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
b_filename = to_bytes(filename, errors='surrogate_or_strict')
if not os.path.exists(b_filename):
return None
if os.path.isdir(b_filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(b_filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), self.get_buffer_size(file_descriptor))
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def _restore_signal_handlers(self):
# Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
if PY2 and sys.platform != 'win32':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment variable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:kw pass_fds: When running on python3 this argument
dictates which file descriptors should be passed
to an underlying ``Popen`` constructor.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
(``Popen`` object will be passed to callback as a first argument)
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
else:
args = to_bytes(args, errors='surrogate_or_strict')
# not set explicitly, check if set by controller
if executable:
executable = to_bytes(executable, errors='surrogate_or_strict')
args = [executable, b'-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
else:
args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module.py and explode, the remote lib path will resemble:
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system:
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=self._restore_signal_handlers,
)
if PY3 and pass_fds:
kwargs["pass_fds"] = pass_fds
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
if before_communicate_callback:
before_communicate_callback(cmd)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
@staticmethod
def get_buffer_size(fd):
try:
# 1032 == FZ_GETPIPE_SZ
buffer_size = fcntl.fcntl(fd, 1032)
except Exception:
try:
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
buffer_size = 9000 # use sane default JIC
return buffer_size
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
gpl-3.0
| 8,027,137,637,342,028,000
| 39.065696
| 155
| 0.554055
| false
| 4.144727
| false
| false
| false
|
kcompher/abstract_rendering
|
examples/numpyDemo.py
|
1
|
3768
|
#!/usr/bin/env python
"""
Draws a colormapped image plot
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Abstract rendering imports
from __future__ import print_function, division, absolute_import
import abstract_rendering.util as util
import abstract_rendering.core as core
import abstract_rendering.numeric as numeric
import abstract_rendering.categories as categories
import abstract_rendering.infos as infos
import abstract_rendering.numpyglyphs as npg
from timer import Timer
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
red = util.Color(255,0,0,255)
green = util.Color(0,255,0,255)
blue = util.Color(0,0,255,255)
purple = util.Color(125,0,255,255)
white = util.Color(255,255,255,255)
black = util.Color(0,0,0,255)
clear = util.Color(0,0,0,0)
with Timer("Loeading") as arTimer:
#glyphs = npg.load_csv("../data/circlepoints.csv", 1, 2, 3, 4)
glyphs = npg.load_hdf("../data/CensusTracts.hdf5", "__data__", "LAT", "LON")
#glyphs = npg.load_hdf("../data/tweets-subset.hdf", "test",
# "longitude", "latitude", vc="lang_primary")
screen = (800,600)
ivt = util.zoom_fit(screen,glyphs.bounds())
with Timer("Abstract-Render") as arTimer:
image = core.render(glyphs,
infos.encode(["Arabic","English","Turkish","Russian"]),
npg.PointCountCategories(),
npg.Spread(2) + categories.HDAlpha([red, blue, green, purple, black], alphamin=.3, log=True),
screen,
ivt)
# image = core.render(glyphs,
# infos.valAt(4,0),
# npg.PointCount(),
# npg.Spread(1) + numeric.BinarySegment(white, black, 1),
# screen,
# ivt)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("imagedata", image)
# Create the plot
plot = Plot(pd)
img_plot = plot.img_plot("imagedata")[0]
# Tweak some of the plot properties
plot.title = "Abstract Rendering"
plot.padding = 50
return plot
#===============================================================================
# Attributes to use for the plot view.
size=(800,600)
title="Basic Colormapped Image Plot"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
bsd-3-clause
| 3,856,248,107,765,127,700
| 35.230769
| 119
| 0.538482
| false
| 4.069114
| false
| false
| false
|
jmgilman/Neolib
|
neolib/inventory/UserInventory.py
|
1
|
2623
|
""":mod:`UserInventory` -- Provides an interface for a user inventory
.. module:: UserInventory
:synopsis: Provides an interface for a user inventory
.. moduleauthor:: Joshua Gilman <joshuagilman@gmail.com>
"""
from neolib.exceptions import parseException
from neolib.exceptions import invalidUser
from neolib.inventory.Inventory import Inventory
from neolib.item.Item import Item
import logging
class UserInventory(Inventory):
"""Represents a user's inventory
Sub-classes the Inventory class to provide an interface for a user's
inventory. Will automatically populate itself with all items
in a user's inventory upon initialization.
Example
>>> usr.loadInventory
>>> for item in usr.inventory:
... print item.name
Blue Kougra Plushie
Lu Codestone
...
"""
usr = None
def __init__(self, usr):
if not usr:
raise invalidUser
self.usr = usr
def load(self):
"""Loads a user's inventory
Queries the user's inventory, parses each item, and adds
each item to the inventory. Note this class should not be
used directly, but rather usr.inventory should be used to
access a user's inventory.
Parameters
usr (User) - The user to load the inventory for
Raises
invalidUser
parseException
"""
self.items = {}
pg = self.usr.getPage("http://www.neopets.com/objects.phtml?type=inventory")
# Indicates an empty inventory
if "You aren't carrying anything" in pg.content:
return
try:
for row in pg.find_all("td", "contentModuleContent")[1].table.find_all("tr"):
for item in row.find_all("td"):
name = item.text
# Some item names contain extra information encapsulated in paranthesis
if "(" in name:
name = name.split("(")[0]
tmpItem = Item(name)
tmpItem.id = item.a['onclick'].split("(")[1].replace(");", "")
tmpItem.img = item.img['src']
tmpItem.desc = item.img['alt']
tmpItem.usr = self.usr
self.items[name] = tmpItem
except Exception:
logging.getLogger("neolib.inventory").exception("Unable to parse user inventory.", {'pg': pg})
raise parseException
|
mit
| 2,876,575,541,733,346,300
| 31.7875
| 106
| 0.556615
| false
| 4.709156
| false
| false
| false
|
ddanier/django_price
|
django_price/models.py
|
1
|
1728
|
# coding: utf-8
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django_deferred_polymorph.models import SubDeferredPolymorphBaseModel
import decimal
import datetime
from .manager import TaxManager
# TODO: Versionized Tax (Tax should NEVER get changed, as this may
# create an invalid state if you store net + gross for invoices
class Tax(SubDeferredPolymorphBaseModel):
name = models.CharField(max_length=25)
created = models.DateTimeField(editable=False, default=datetime.datetime.now)
modified = models.DateTimeField(editable=False, auto_now=True)
objects = TaxManager()
def __unicode__(self):
return self.name
@property
def unique_id(self):
return self.get_tax().unique_id
def amount(self, net):
return self.get_tax().amount(net)
def apply(self, net):
return self.get_tax().apply(net)
def reverse(self, gross):
return self.get_tax().reverse(gross)
def get_tax(self):
raise RuntimeError('subclass must implement this')
class LinearTax(Tax):
# TODO: PercentField?
percent = models.DecimalField(max_digits=6, decimal_places=3)
def get_tax(self):
from . import LinearTax
tax = LinearTax(self.name, self.percent)
tax._unique_id = 'linear-pk-%d' % self.pk
tax._model_instance = self
return tax
class MultiTax(Tax):
taxes = models.ManyToManyField(Tax, related_name='+')
def get_tax(self):
from . import MultiTax
tax = MultiTax(list(self.taxes.all()), self.name)
tax._unique_id = 'multi-pk-%d' % self.pk
tax._model_instance = self
return tax
|
bsd-3-clause
| -4,973,895,535,541,663,000
| 27.327869
| 81
| 0.656829
| false
| 3.900677
| false
| false
| false
|
develersrl/rooms
|
editor/structdata/area.py
|
1
|
1839
|
#!/usr/bin/env python
from origin import OriginData
from structdata.project import g_project
class Area(OriginData):
"""
Area gestisce coordinate e dimensione fisiche. Nell'xml le informazioni
sono in coordinate logiche; al momento del caricamento del progetto viene
fatta la conversione
"""
tag_name = 'area'
def __init__(self, id, x, y, width, height, event):
super(Area, self).__init__()
self.id = id
self.x = str(float(x) * float(g_project.data['world'].width))
self.y = str(float(y) * float(g_project.data['world'].height))
self.height = str(float(height) * float(g_project.data['world'].height))
self.width = str(float(width) * float(g_project.data['world'].width))
self.event = event
def setName(self, name):
self.id = name
g_project.notify()
def valueForKey(self, key, value):
if key == "x" or key == "width":
return str(round(float(value) / float(g_project.data['world'].width), 2))
elif key == "y" or key == "height":
return str(round(float(value) / float(g_project.data['world'].height), 2))
else:
return value
@staticmethod
def create(room, x, y, width, height, event=""):
number_of_new_area = 0
for area in room.areas:
if area.id.startswith("new_area_"):
number_of_new_area += 1
area = Area("new_area_%d" % (number_of_new_area + 1),
str(x / float(g_project.data['world'].width)),
str(y / float(g_project.data['world'].height)),
str(width / float(g_project.data['world'].width)),
str(height / float(g_project.data['world'].height)),
event)
room.areas.append(area)
return area
|
mit
| 4,306,555,536,022,000,600
| 37.3125
| 86
| 0.567156
| false
| 3.55706
| false
| false
| false
|
abadger/Bento
|
doc/source/conf.py
|
1
|
6743
|
# -*- coding: utf-8 -*-
#
# Bento documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 3 12:53:13 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
import bento
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Bento'
copyright = u'2009-2011, David Cournapeau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bento.__version__
# The full version, including alpha/beta/rc tags.
release = "%s-git%s" % (version, bento.__git_revision__[:10])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bentodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bento.tex', u'Bento Documentation',
u'David Cournapeau', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
bsd-3-clause
| -7,131,424,483,700,285,000
| 32.381188
| 148
| 0.71437
| false
| 3.721302
| true
| false
| false
|
jpinsonault/imdb_cast_matcher
|
match_cast.py
|
1
|
1055
|
#!/usr/bin/env python
import argparse
import sys
from imdb import IMDb
args = None
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('first_movie')
parser.add_argument('second_movie')
args = parser.parse_args()
def main():
imdb = IMDb()
# Get 2 movies
first_movie = confirm_movie(imdb, args.first_movie)
second_movie = confirm_movie(imdb, args.second_movie)
imdb.update(first_movie)
imdb.update(second_movie)
print("Comparing '{}' and '{}'".format(first_movie["title"], second_movie["title"]))
# Compare cast
in_both = []
for first_movie_person in first_movie["cast"]:
for second_movie_person in second_movie["cast"]:
if first_movie_person["name"] == second_movie_person["name"]:
in_both.append(first_movie_person)
for person in in_both:
print(person["name"])
def confirm_movie(imdb, movie_name):
return imdb.search_movie(movie_name)[0]
if __name__ == '__main__':
parse_args()
main()
|
mit
| -4,238,202,857,582,957,000
| 20.55102
| 88
| 0.629384
| false
| 3.552189
| false
| false
| false
|
wbphelps/ISSTracker
|
showGPS.py
|
1
|
4984
|
# show GPS page
from datetime import datetime, timedelta
import pygame
from pygame.locals import *
import math
from plotSky import plotSky
R90 = math.radians(90) # 90 degrees in radians
class showGPS():
def getxy(self, alt, azi): # alt, az in radians
# thanks to John at Wobbleworks for the algorithm
r = (R90 - alt)/R90
x = r * math.sin(azi)
y = r * math.cos(azi)
if self.flip:
x = int(self.centerX - x * self.D) # flip E/W, scale to radius, center on plot
else:
x = int(self.centerX + x * self.D) # flip E/W, scale to radius, center on plot
y = int(self.centerY - y * self.D) # scale to radius, center on plot
return (x,y)
def __init__(self, Screen, Colors, gps, obs, sun, x=0, y=0, flip=False):
self.Screen = Screen
self.Colors = Colors
self.pos = (x,y)
self.flip = flip
self.window = Screen.copy()
rect = self.window.get_rect()
self.height = rect.height
self.width = rect.width
self.D = self.height/2 - 2
self.centerX = self.width/2 + 2
self.centerY = self.height/2 + 2
self.BG = Screen.copy() # make another copy for the background
self.BGupdate = datetime.now() - timedelta(seconds=61) # force BG update
self.drawBG(obs, sun) # fill in the background & draw it
def drawBG(self, obs, sun):
self.BGupdate = datetime.now()
self.Sky = plotSky(self.BG, self.Colors, obs, self.centerX, self.centerY, self.D, flip=False) # draw the sky background & compass points
self.Sky.plotStars(obs) # add stars
self.Sky.plotPlanets(obs) # add planets
def plot(self, gps, obs, sun):
# fName = 'Monospac821 BT'
# test = pygame.font.match_font(fName, bold=True) # check to see if it's installed
# if test == None:
fName = 'DejaVuSansMono' # use this one instead
if (datetime.now() - self.BGupdate).total_seconds() > 60:
self.drawBG(obs, sun) # update background image once a minute
self.window.blit(self.BG,(0,0)) # paint background image
line = 0
txtColor = self.Colors.Yellow
txtFont = pygame.font.SysFont(fName, 15, bold=True)
t1 = txtFont.render(gps.datetime.strftime('%H:%M:%S'), 1, txtColor) # time
t1r = t1.get_rect()
self.window.blit(t1, (0,0)) # time
line += t1r.height
t2 = txtFont.render(gps.datetime.strftime('%Y/%m/%d'), 1, txtColor) # date
t2r = t2.get_rect()
self.window.blit(t2, (self.width - t2r.width, 0))
e1 = txtFont.render('({})'.format(gps.error_count), 1, self.Colors.Red)
e1r = e1.get_rect()
self.window.blit(e1, (self.width - e1r.width, t2r.height))
# draw a circle for each satellite
satFont = pygame.font.SysFont(fName, 9, bold=True)
# TODO: detect collision and move label ?
ns = 0
nsa = 0
for sat in gps.satellites: # plot all GPS satellites on sky chart
if (sat.alt,sat.azi) == (0,0): pass
xy = self.getxy(sat.alt,sat.azi)
ns += 1
sz = sat.snr
if sz>0: nsa += 1
if sz<5: color = self.Colors.Red # no signal
elif sz<20: color = self.Colors.Yellow
else: color = self.Colors.Green
if sz<9: sz = 9 # minimum circle size
pygame.draw.circle(self.window, color, xy, sz, 1)
# tsat = satFont.render(format(sat.svn), 1, self.Colors.White)
tsat = satFont.render(format(sat.svn), 1, self.Colors.White, self.Sky.bgColor)
tpos = tsat.get_rect()
tpos.centerx = xy[0]
tpos.centery = xy[1]
self.window.blit(tsat,tpos)
# txtFont = pygame.font.SysFont(fName, 15, bold=True)
s1 = txtFont.render('{}/{}'.format(gps.status,gps.quality), 1, txtColor)
s1r = s1.get_rect()
self.window.blit(s1,(1,line))
line += s1r.height
s2 = txtFont.render('{:0>2}/{:0>2}'.format(nsa, ns), 1, txtColor)
s2r = s2.get_rect()
self.window.blit(s2,(1,line))
line += s2r.height
tdil = txtFont.render('{:0.1f}m'.format(gps.hDilution), 1, txtColor)
tdilr = tdil.get_rect()
self.window.blit(tdil, (1, line))
# line += tdilr.height
line = self.height
if gps.quality == 2 or gps.hDilution < 2:
fmt = '{:7.5f}' # differential GPS - 1 meter accuracy!!!
else:
fmt = '{:6.4f}' # normal signal
tlon = txtFont.render(fmt.format(math.degrees(gps.avg_longitude)), 1, txtColor)
tlonr = tlon.get_rect()
line -= tlonr.height
self.window.blit(tlon, (self.width - tlonr.width, line))
tlat = txtFont.render(fmt.format(math.degrees(gps.avg_latitude)), 1, txtColor)
tlatr = tlat.get_rect()
line -= tlatr.height
self.window.blit(tlat, (self.width - tlatr.width, line))
alt = gps.altitude #+ gps.geodiff
if alt<100:
talt = '{:6.1f}m'.format(alt)
else:
talt = '{:6.0f}m'.format(alt)
talt = txtFont.render(talt, 1, txtColor)
taltr = talt.get_rect()
line -= taltr.height
self.window.blit(talt, (self.width - taltr.width, line))
self.Screen.blit(self.window,self.pos)
pygame.display.update() #flip()
|
gpl-2.0
| 6,263,011,145,181,974,000
| 31.789474
| 140
| 0.62179
| false
| 2.854525
| false
| false
| false
|
adist/drunken-sansa
|
openerp/addons/pln_dprr/hr_employee.py
|
1
|
2048
|
'''
Created on Feb 18, 2015
@author: adista@bizoft
'''
from openerp.osv import fields, osv
class hr_employee_category(osv.osv):
_inherit = 'hr.employee.category'
_columns = {
'code' : fields.char(string='Kode'),
'type' : fields.selection([('area', 'Area'), ('unitup', 'Unit Up')], string='Tipe', required=True),
}
def name_get(self, cr, uid, ids, context=None):
res = super(hr_employee_category, self).name_get(cr, uid, ids, context=context)
res = dict(res)
for this in self.browse(cr, uid, res.keys()):
if not this.parent_id:
continue
res[this.id] = this.code and ' - '.join([this.code, res[this.id]]) or res[this.id]
return res.items()
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('code', operator, name)] + args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
hr_employee_category()
class hr_employee(osv.osv):
_inherit = 'hr.employee'
def create(self, cr, uid, vals, context={}):
res = super(hr_employee, self).create(cr, uid, vals, context={})
if not res:
return res
category_ids = vals['category_ids'][0][-1]
if len(category_ids) != 1:
raise
o_hr_categ = self.pool.get('hr.employee.category').browse(cr, uid, category_ids[-1])
user_vals = {
'name' : vals['name'],
'login' : '_'.join([str(o_hr_categ.code).lower(), vals['name'].lower()]),
'password' : ''.join([vals['name'].lower(),'123']),
'employee' : True,
}
o_user = self.pool.get('res.users').create(cr, uid, user_vals, context=context)
self.write(cr, uid, [res], {'user_id' : o_user}, context=context)
return res
hr_employee()
|
agpl-3.0
| 7,014,781,787,241,007,000
| 38.403846
| 109
| 0.561523
| false
| 3.413333
| false
| false
| false
|
claudep/translate
|
translate/convert/xliff2odf.py
|
1
|
6253
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2014 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert XLIFF translation files to OpenDocument (ODF) files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/odf2xliff.html
for examples and usage instructions.
"""
import six
import zipfile
from io import BytesIO
import lxml.etree as etree
from translate.convert import convert
from translate.storage import factory
from translate.storage.odf_io import copy_odf, open_odf
from translate.storage.odf_shared import (inline_elements,
no_translate_content_elements)
from translate.storage.xml_extract.extract import ParseState
from translate.storage.xml_extract.generate import (apply_translations,
replace_dom_text)
from translate.storage.xml_extract.unit_tree import XPathTree, build_unit_tree
def translate_odf(template, input_file):
def load_dom_trees(template):
"""Return a dict with translatable files in the template ODF package.
The keys are the filenames inside the ODF package, and the values are
the etrees for each of those translatable files.
"""
odf_data = open_odf(template)
return dict((filename, etree.parse(BytesIO(data)))
for filename, data in six.iteritems(odf_data))
def load_unit_tree(input_file):
"""Return a dict with the translations grouped by files ODF package.
The keys are the filenames inside the template ODF package, and the
values are XPathTree instances for each of those files.
"""
store = factory.getobject(input_file)
tree = build_unit_tree(store)
def extract_unit_tree(filename, root_dom_element_name):
"""Find the subtree in 'tree' which corresponds to the data in XML
file 'filename'.
"""
try:
file_tree = tree.children[root_dom_element_name, 0]
except KeyError:
file_tree = XPathTree()
return (filename, file_tree)
return dict([extract_unit_tree('content.xml', 'office:document-content'),
extract_unit_tree('meta.xml', 'office:document-meta'),
extract_unit_tree('styles.xml', 'office:document-styles')])
def translate_dom_trees(unit_trees, dom_trees):
"""Return a dict with the translated files for the ODF package.
The keys are the filenames for the translatable files inside the
template ODF package, and the values are etree ElementTree instances
for each of those files.
"""
make_parse_state = lambda: ParseState(no_translate_content_elements,
inline_elements)
for filename, dom_tree in six.iteritems(dom_trees):
file_unit_tree = unit_trees[filename]
apply_translations(dom_tree.getroot(), file_unit_tree,
replace_dom_text(make_parse_state))
return dom_trees
dom_trees = load_dom_trees(template)
unit_trees = load_unit_tree(input_file)
return translate_dom_trees(unit_trees, dom_trees)
def write_odf(template, output_file, dom_trees):
"""Write the translated ODF package.
The resulting ODF package is a copy of the template ODF package, with the
translatable files replaced by their translated versions.
"""
template_zip = zipfile.ZipFile(template, 'r')
output_zip = zipfile.ZipFile(output_file, 'w',
compression=zipfile.ZIP_DEFLATED)
# Copy the ODF package.
output_zip = copy_odf(template_zip, output_zip, dom_trees.keys())
# Overwrite the translated files to the ODF package.
for filename, dom_tree in six.iteritems(dom_trees):
output_zip.writestr(filename, etree.tostring(dom_tree,
encoding='UTF-8',
xml_declaration=True))
def convertxliff(input_file, output_file, template):
"""Create a translated ODF using an ODF template and a XLIFF file."""
xlf_data = input_file.read()
dom_trees = translate_odf(template, BytesIO(xlf_data))
write_odf(template, output_file, dom_trees)
output_file.close()
return True
def main(argv=None):
formats = {
('xlf', 'odt'): ("odt", convertxliff), # Text
('xlf', 'ods'): ("ods", convertxliff), # Spreadsheet
('xlf', 'odp'): ("odp", convertxliff), # Presentation
('xlf', 'odg'): ("odg", convertxliff), # Drawing
('xlf', 'odc'): ("odc", convertxliff), # Chart
('xlf', 'odf'): ("odf", convertxliff), # Formula
('xlf', 'odi'): ("odi", convertxliff), # Image
('xlf', 'odm'): ("odm", convertxliff), # Master Document
('xlf', 'ott'): ("ott", convertxliff), # Text template
('xlf', 'ots'): ("ots", convertxliff), # Spreadsheet template
('xlf', 'otp'): ("otp", convertxliff), # Presentation template
('xlf', 'otg'): ("otg", convertxliff), # Drawing template
('xlf', 'otc'): ("otc", convertxliff), # Chart template
('xlf', 'otf'): ("otf", convertxliff), # Formula template
('xlf', 'oti'): ("oti", convertxliff), # Image template
('xlf', 'oth'): ("oth", convertxliff), # Web page template
}
parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__)
parser.run(argv)
if __name__ == '__main__':
main()
|
gpl-2.0
| 5,758,427,300,691,318,000
| 39.869281
| 96
| 0.629778
| false
| 3.874226
| false
| false
| false
|
yarikoptic/Fail2Ban-Old-SVNGIT
|
server/filter.py
|
1
|
15235
|
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision$
__author__ = "Cyril Jaquier"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from failmanager import FailManager
from ticket import FailTicket
from jailthread import JailThread
from datedetector import DateDetector
from mytime import MyTime
from failregex import FailRegex, Regex, RegexException
import logging, re, os, fcntl, time
# Gets the instance of the logger.
logSys = logging.getLogger("fail2ban.filter")
##
# Log reader class.
#
# This class reads a log file and detects login failures or anything else
# that matches a given regular expression. This class is instanciated by
# a Jail object.
class Filter(JailThread):
##
# Constructor.
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail):
JailThread.__init__(self)
## The jail which contains this filter.
self.jail = jail
## The failures manager.
self.failManager = FailManager()
## The regular expression list matching the failures.
self.__failRegex = list()
## The regular expression list with expressions to ignore.
self.__ignoreRegex = list()
## The amount of time to look back.
self.__findTime = 6000
## The ignore IP list.
self.__ignoreIpList = []
self.dateDetector = DateDetector()
self.dateDetector.addDefaultTemplate()
logSys.debug("Created Filter")
##
# Add a regular expression which matches the failure.
#
# The regular expression can also match any other pattern than failures
# and thus can be used for many purporse.
# @param value the regular expression
def addFailRegex(self, value):
try:
regex = FailRegex(value)
self.__failRegex.append(regex)
except RegexException, e:
logSys.error(e)
def delFailRegex(self, index):
try:
del self.__failRegex[index]
except IndexError:
logSys.error("Cannot remove regular expression. Index %d is not "
"valid" % index)
##
# Get the regular expression which matches the failure.
#
# @return the regular expression
def getFailRegex(self):
failRegex = list()
for regex in self.__failRegex:
failRegex.append(regex.getRegex())
return failRegex
##
# Add the regular expression which matches the failure.
#
# The regular expression can also match any other pattern than failures
# and thus can be used for many purporse.
# @param value the regular expression
def addIgnoreRegex(self, value):
try:
regex = Regex(value)
self.__ignoreRegex.append(regex)
except RegexException, e:
logSys.error(e)
def delIgnoreRegex(self, index):
try:
del self.__ignoreRegex[index]
except IndexError:
logSys.error("Cannot remove regular expression. Index %d is not "
"valid" % index)
##
# Get the regular expression which matches the failure.
#
# @return the regular expression
def getIgnoreRegex(self):
ignoreRegex = list()
for regex in self.__ignoreRegex:
ignoreRegex.append(regex.getRegex())
return ignoreRegex
##
# Set the time needed to find a failure.
#
# This value tells the filter how long it has to take failures into
# account.
# @param value the time
def setFindTime(self, value):
self.__findTime = value
self.failManager.setMaxTime(value)
logSys.info("Set findtime = %s" % value)
##
# Get the time needed to find a failure.
#
# @return the time
def getFindTime(self):
return self.__findTime
##
# Set the maximum retry value.
#
# @param value the retry value
def setMaxRetry(self, value):
self.failManager.setMaxRetry(value)
logSys.info("Set maxRetry = %s" % value)
##
# Get the maximum retry value.
#
# @return the retry value
def getMaxRetry(self):
return self.failManager.getMaxRetry()
##
# Main loop.
#
# This function is the main loop of the thread. It checks if the
# file has been modified and looks for failures.
# @return True when the thread exits nicely
def run(self):
raise Exception("run() is abstract")
##
# Ban an IP - http://blogs.buanzo.com.ar/2009/04/fail2ban-patch-ban-ip-address-manually.html
# Arturo 'Buanzo' Busleiman <buanzo@buanzo.com.ar>
#
# to enable banip fail2ban-client BAN command
def addBannedIP(self, ip):
unixTime = time.time()
self.failManager.addFailure(FailTicket(ip, unixTime))
return ip
##
# Add an IP/DNS to the ignore list.
#
# IP addresses in the ignore list are not taken into account
# when finding failures. CIDR mask and DNS are also accepted.
# @param ip IP address to ignore
def addIgnoreIP(self, ip):
logSys.debug("Add " + ip + " to ignore list")
self.__ignoreIpList.append(ip)
def delIgnoreIP(self, ip):
logSys.debug("Remove " + ip + " from ignore list")
self.__ignoreIpList.remove(ip)
def getIgnoreIP(self):
return self.__ignoreIpList
##
# Check if IP address/DNS is in the ignore list.
#
# Check if the given IP address matches an IP address/DNS or a CIDR
# mask in the ignore list.
# @param ip IP address
# @return True if IP address is in ignore list
def inIgnoreIPList(self, ip):
for i in self.__ignoreIpList:
# An empty string is always false
if i == "":
continue
s = i.split('/', 1)
# IP address without CIDR mask
if len(s) == 1:
s.insert(1, '32')
s[1] = long(s[1])
try:
a = DNSUtils.cidr(s[0], s[1])
b = DNSUtils.cidr(ip, s[1])
except Exception:
# Check if IP in DNS
ips = DNSUtils.dnsToIp(i)
if ip in ips:
return True
else:
continue
if a == b:
return True
return False
def processLine(self, line):
try:
# Decode line to UTF-8
l = line.decode('utf-8')
except UnicodeDecodeError:
l = line
timeMatch = self.dateDetector.matchTime(l)
if timeMatch:
# Lets split into time part and log part of the line
timeLine = timeMatch.group()
# Lets leave the beginning in as well, so if there is no
# anchore at the beginning of the time regexp, we don't
# at least allow injection. Should be harmless otherwise
logLine = l[:timeMatch.start()] + l[timeMatch.end():]
else:
timeLine = l
logLine = l
return self.findFailure(timeLine, logLine)
def processLineAndAdd(self, line):
for element in self.processLine(line):
ip = element[0]
unixTime = element[1]
logSys.debug("Processing line with time:%s and ip:%s"
% (unixTime, ip))
if unixTime < MyTime.time() - self.getFindTime():
logSys.debug("Ignore line since time %s < %s - %s"
% (unixTime, MyTime.time(), self.getFindTime()))
break
if self.inIgnoreIPList(ip):
logSys.debug("Ignore %s" % ip)
continue
logSys.debug("Found %s" % ip)
self.failManager.addFailure(FailTicket(ip, unixTime))
##
# Returns true if the line should be ignored.
#
# Uses ignoreregex.
# @param line: the line
# @return: a boolean
def ignoreLine(self, line):
for ignoreRegex in self.__ignoreRegex:
ignoreRegex.search(line)
if ignoreRegex.hasMatched():
return True
return False
##
# Finds the failure in a line given split into time and log parts.
#
# Uses the failregex pattern to find it and timeregex in order
# to find the logging time.
# @return a dict with IP and timestamp.
def findFailure(self, timeLine, logLine):
failList = list()
# Checks if we must ignore this line.
if self.ignoreLine(logLine):
# The ignoreregex matched. Return.
return failList
# Iterates over all the regular expressions.
for failRegex in self.__failRegex:
failRegex.search(logLine)
if failRegex.hasMatched():
# The failregex matched.
date = self.dateDetector.getUnixTime(timeLine)
if date == None:
logSys.debug("Found a match for '" + logLine +"' but no "
+ "valid date/time found for '"
+ timeLine + "'. Please contact the "
+ "author in order to get support for this "
+ "format")
else:
try:
host = failRegex.getHost()
ipMatch = DNSUtils.textToIp(host)
if ipMatch:
for ip in ipMatch:
failList.append([ip, date])
# We matched a regex, it is enough to stop.
break
except RegexException, e:
logSys.error(e)
return failList
##
# Get the status of the filter.
#
# Get some informations about the filter state such as the total
# number of failures.
# @return a list with tuple
def status(self):
ret = [("Currently failed", self.failManager.size()),
("Total failed", self.failManager.getFailTotal())]
return ret
class FileFilter(Filter):
def __init__(self, jail):
Filter.__init__(self, jail)
## The log file path.
self.__logPath = []
##
# Add a log file path
#
# @param path log file path
def addLogPath(self, path, tail = False):
container = FileContainer(path, tail)
self.__logPath.append(container)
##
# Delete a log path
#
# @param path the log file to delete
def delLogPath(self, path):
for log in self.__logPath:
if log.getFileName() == path:
self.__logPath.remove(log)
return
##
# Get the log file path
#
# @return log file path
def getLogPath(self):
return self.__logPath
##
# Check whether path is already monitored.
#
# @param path The path
# @return True if the path is already monitored else False
def containsLogPath(self, path):
for log in self.__logPath:
if log.getFileName() == path:
return True
return False
def getFileContainer(self, path):
for log in self.__logPath:
if log.getFileName() == path:
return log
return None
##
# Gets all the failure in the log file.
#
# Gets all the failure in the log file which are newer than
# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
# is created and is added to the FailManager.
def getFailures(self, filename):
container = self.getFileContainer(filename)
if container == None:
logSys.error("Unable to get failures in " + filename)
return False
# Try to open log file.
try:
container.open()
except Exception, e:
logSys.error("Unable to open %s" % filename)
logSys.exception(e)
return False
line = container.readline()
while not line == "":
if not self._isActive():
# The jail has been stopped
break
self.processLineAndAdd(line)
# Read a new line.
line = container.readline()
container.close()
return True
def status(self):
ret = Filter.status(self)
path = [m.getFileName() for m in self.getLogPath()]
ret.append(("File list", path))
return ret
##
# FileContainer class.
#
# This class manages a file handler and takes care of log rotation detection.
# In order to detect log rotation, the hash (MD5) of the first line of the file
# is computed and compared to the previous hash of this line.
import md5
class FileContainer:
def __init__(self, filename, tail = False):
self.__filename = filename
self.__tail = tail
self.__handler = None
# Try to open the file. Raises an exception if an error occured.
handler = open(filename)
stats = os.fstat(handler.fileno())
self.__ino = stats.st_ino
try:
firstLine = handler.readline()
# Computes the MD5 of the first line.
self.__hash = md5.new(firstLine).digest()
# Start at the beginning of file if tail mode is off.
if tail:
handler.seek(0, 2)
self.__pos = handler.tell()
else:
self.__pos = 0
finally:
handler.close()
def getFileName(self):
return self.__filename
def open(self):
self.__handler = open(self.__filename)
# Set the file descriptor to be FD_CLOEXEC
fd = self.__handler.fileno()
fcntl.fcntl(fd, fcntl.F_SETFD, fd | fcntl.FD_CLOEXEC)
firstLine = self.__handler.readline()
# Computes the MD5 of the first line.
myHash = md5.new(firstLine).digest()
stats = os.fstat(self.__handler.fileno())
# Compare hash and inode
if self.__hash != myHash or self.__ino != stats.st_ino:
logSys.info("Log rotation detected for %s" % self.__filename)
self.__hash = myHash
self.__ino = stats.st_ino
self.__pos = 0
# Sets the file pointer to the last position.
self.__handler.seek(self.__pos)
def readline(self):
if self.__handler == None:
return ""
return self.__handler.readline()
def close(self):
if not self.__handler == None:
# Saves the last position.
self.__pos = self.__handler.tell()
# Closes the file.
self.__handler.close()
self.__handler = None
##
# Utils class for DNS and IP handling.
#
# This class contains only static methods used to handle DNS and IP
# addresses.
import socket, struct
class DNSUtils:
IP_CRE = re.compile("^(?:\d{1,3}\.){3}\d{1,3}$")
#@staticmethod
def dnsToIp(dns):
""" Convert a DNS into an IP address using the Python socket module.
Thanks to Kevin Drapel.
"""
try:
return socket.gethostbyname_ex(dns)[2]
except socket.gaierror:
logSys.warn("Unable to find a corresponding IP address for %s"
% dns)
return list()
dnsToIp = staticmethod(dnsToIp)
#@staticmethod
def searchIP(text):
""" Search if an IP address if directly available and return
it.
"""
match = DNSUtils.IP_CRE.match(text)
if match:
return match
else:
return None
searchIP = staticmethod(searchIP)
#@staticmethod
def isValidIP(string):
""" Return true if str is a valid IP
"""
s = string.split('/', 1)
try:
socket.inet_aton(s[0])
return True
except socket.error:
return False
isValidIP = staticmethod(isValidIP)
#@staticmethod
def textToIp(text):
""" Return the IP of DNS found in a given text.
"""
ipList = list()
# Search for plain IP
plainIP = DNSUtils.searchIP(text)
if not plainIP == None:
plainIPStr = plainIP.group(0)
if DNSUtils.isValidIP(plainIPStr):
ipList.append(plainIPStr)
if not ipList:
# Try to get IP from possible DNS
ip = DNSUtils.dnsToIp(text)
for e in ip:
ipList.append(e)
return ipList
textToIp = staticmethod(textToIp)
#@staticmethod
def cidr(i, n):
""" Convert an IP address string with a CIDR mask into a 32-bit
integer.
"""
# 32-bit IPv4 address mask
MASK = 0xFFFFFFFFL
return ~(MASK >> n) & MASK & DNSUtils.addr2bin(i)
cidr = staticmethod(cidr)
#@staticmethod
def addr2bin(string):
""" Convert a string IPv4 address into an unsigned integer.
"""
return struct.unpack("!L", socket.inet_aton(string))[0]
addr2bin = staticmethod(addr2bin)
#@staticmethod
def bin2addr(addr):
""" Convert a numeric IPv4 address into string n.n.n.n form.
"""
return socket.inet_ntoa(struct.pack("!L", addr))
bin2addr = staticmethod(bin2addr)
|
gpl-2.0
| 3,093,289,645,956,499,000
| 24.519263
| 93
| 0.679685
| false
| 3.212102
| false
| false
| false
|
sumedh123/debatify
|
app/models.py
|
1
|
6706
|
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask import request
#from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask_login import UserMixin,AnonymousUserMixin
from app import login_manager
from app import db
from datetime import datetime
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
timestamp = db.Column(db.DateTime, default = datetime.utcnow)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
firstname = db.Column(db.String(50),nullable = True)
lastname = db.Column(db.String(50),nullable = True)
email = db.Column(db.String(50),nullable = True)
username = db.Column(db.String(64),nullable = True)
password = db.Column(db.String(100),nullable = True)
password_hash = db.Column(db.String(128), nullable = True)
confirmed = db.Column(db.Boolean, default = False)
question = db.relationship("Question", backref = "owner", lazy = 'dynamic')
location = db.Column(db.String(64),nullable = True)
about_me = db.Column(db.Text(),nullable = True)
member_since = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow,nullable = True)
posts = db.relationship('Posts', backref = 'author', lazy = 'dynamic')
fetchedChat = db.relationship('Chats', backref = 'messenger', lazy = 'dynamic')
followed = db.relationship('Follow', foreign_keys = [Follow.follower_id],backref=db.backref('follower', lazy='joined'),lazy='dynamic',cascade='all, delete-orphan')
followers = db.relationship('Follow', foreign_keys = [Follow.followed_id], backref = db.backref('followed', lazy = 'joined'),lazy='dynamic',cascade='all, delete-orphan')
#role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# def __init__(self, **kwargs):
# super(User, self).__init__(**kwargs)
# if self.role is None:
# if self.email == current_app.config['FLASKY_ADMIN']:
# self.role = Role.query.filter_by(permissions=0xff).first()
# if self.role is None:
# self.role = Role.query.filter_by(default=True).first()
# def __repr__(self):
# return "<User %s>" % self.firstname
#Related to werkzeug security
@property
def password(self):
raise AttributeError('password is not a readable attribute')
#Used for generating hashes of passwords
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
#Verification of password n database
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower = self, followed = user)
db.session.add(f)
db.session.commit()
def unfollow(self, user):
f = self.followed.filter_by(followed_id = user.id).first()
if f:
db.session.delete(f)
db.session.commit()
def is_following(self, user):
return self.followed.filter_by(followed_id = user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id = user.id).first() is not None
# Another table containing questions of users
class Question(db.Model):
__tablename__ = "questions"
id = db.Column(db.Integer, primary_key = True)
questions = db.Column(db.String(500))
topic = db.Column(db.String(500))
link = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
question = db.relationship("Chats", backref = "chat_no", lazy = 'dynamic')
upvotes = db.Column(db.Integer, nullable = True, default = 1)
class Chats(db.Model):
__tablename__ = "chats"
id = db.Column(db.Integer, primary_key = True)
messages = db.Column(db.String)
time = db.Column(db.String(100))
chat_id = db.Column(db.Integer, db.ForeignKey('questions.id'))
sender_name = db.Column(db.String, nullable = True)
messenger_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class Posts(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index = True, default = datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# class Role():
# __tablename__ = 'roles'
# id = db.Column(db.Integer,primary_key = True)
# name = db.Column(db.String(64), unique = True)
# default = db.Column(db.Boolean, default = False, index = True)
# permissions = db.Column(db.Integer)
# users = db.relationship('User', backref = 'role', lazy = 'dynamic')
# def can(self,permissions):
# return self.role is not None and (self.role.permissions & permissions) == permissions
# def is_administrator(self):
# return self.can(Permission.ADMINISTER)
# @staticmethod
# def insert_roles():
# roles = {
# 'User': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES, True),
# 'Moderator': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES | Permission.MODERATE_COMMENTS, False),
# 'Administrator': (0xff, False)
# }
# for r in roles:
# role = Role.query.filter_by(name = r).first()
# if role is None:
# role = Role(name = r)
# role.permissions = roles[r][0]
# role.default = roles[r][1]
# db.session.add(role)
# db.session.commit()
# class Permission:
# FOLLOW = 0x01
# COMMENT = 0x02
# WRITE_ARTICLES = 0x04
# MODERATE_COMMENTS = 0x08
# ADMINISTER = 0x80
# class AnonymousUser(AnonymousUserMixin):
# def can(self,permissions):
# return False
# def is_administrator(self):
# return False
# login_manager.anonymous_user = AnonymousUser
# def generate_confirmation_token(self, expiration = 120):
# s = Serializer(app.config['SERIAL_KEY'],expiration)
# return s.dumps({'confirm' : self.id})
# def confirm(self, token):
# s = Serializer(current_app.config['SECRET_KEY'])
# try:
# data = s.loads(token)
# except:
# return False
# if data.get('confirm') != self.id:
# return False
# self.confirmed = True
# db.session.add(self)
# return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
mit
| 5,551,581,122,385,706,000
| 33.932292
| 170
| 0.697435
| false
| 2.93865
| false
| false
| false
|
frustreated/VDiscover
|
vdiscover/Backtrace.py
|
1
|
2987
|
from ptrace.ctypes_tools import formatAddress, formatWordHex
from ptrace.cpu_info import CPU_WORD_SIZE, CPU_MAX_UINT
from ptrace import PtraceError
#from ptrace.six.moves import xrange
class BacktraceFrame(object):
"""
Backtrace frame.
Attributes:
- ip: instruction pointer
- name: name of the function
- arguments: value of the arguments
"""
def __init__(self, ip):
self.ip = ip
self.name = u"???"
self.arguments = []
def __str__(self):
arguments = (formatWordHex(arg) for arg in self.arguments)
return u"IP=%s: %s (%s)" % (formatAddress(self.ip), self.name, ", ".join(arguments))
class Backtrace(object):
"""
Backtrace: all process frames since the start function.
"""
def __init__(self):
self.frames = []
self.truncated = False
def append(self, frame):
self.frames.append(frame)
def __iter__(self):
return iter(self.frames)
def __len__(self):
return len(self.frames)
def getBacktrace(process, max_args=6, max_depth=20):
"""
Get the current backtrace of the specified process:
- max_args: maximum number of arguments in a frame
- max_depth: maximum number of frames
Return a Backtrace object.
"""
backtrace = Backtrace()
# Get current instruction and frame pointer
ip = process.getInstrPointer()
fp = process.getFramePointer()
depth = 0
while True:
# Hit maximum trace depth?
if max_depth <= depth:
backtrace.truncated = True
break
# Read next frame pointer
try:
nextfp = process.readWord(fp)
except PtraceError:
nextfp = None
# Guess number of function argument
if fp and nextfp:
nargs = ((nextfp - fp) // CPU_WORD_SIZE) - 2
nargs = min(nargs, max_args)
else:
nargs = 0
# Create frame
frame = getBacktraceFrame(process, ip, fp, nargs)
#print frame
#print hex(fp),hex(nextfp), hex(nargs)
backtrace.append(frame)
# End of the stack?
if not nextfp:
break
# Move to next instruction/frame pointer
ip = process.readWord(fp+CPU_WORD_SIZE)
if ip == CPU_MAX_UINT:
# Linux hack to detect end of the stack
break
fp = nextfp
depth += 1
return backtrace
def getBacktraceFrame(process, ip, fp, nargs):
"""
Get a backtrace frame:
- ip: instruction pointer
- fp: frame pointer
- nargs: number of arguments
Return a BacktraceFrame object.
"""
frame = BacktraceFrame(ip)
address = fp + CPU_WORD_SIZE
try:
for index in xrange(nargs):
address += CPU_WORD_SIZE
word = process.readWord(address)
frame.arguments.append(word)
except PtraceError:
# Ignore argument read error
pass
return frame
|
gpl-3.0
| 5,460,198,075,991,017,000
| 24.75
| 92
| 0.585872
| false
| 4.036486
| false
| false
| false
|
belokop/indico_bare
|
indico/modules/events/contributions/util.py
|
1
|
15345
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from datetime import timedelta
from io import BytesIO
from operator import attrgetter
from flask import flash, session
from pytz import timezone
from sqlalchemy.orm import load_only, contains_eager, noload, joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.contributions.models.persons import ContributionPersonLink, SubContributionPersonLink
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.util import serialize_person_link, ReporterBase
from indico.modules.attachments.util import get_attached_items
from indico.util.caching import memoize_request
from indico.util.date_time import format_human_timedelta, format_datetime
from indico.util.i18n import _
from indico.util.string import to_unicode
from indico.util.user import iter_acl
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
from indico.web.http_api.metadata.serializer import Serializer
from indico.web.util import jsonify_data
from MaKaC.common.timezoneUtils import DisplayTZ
def get_events_with_linked_contributions(user, from_dt=None, to_dt=None):
"""Returns a dict with keys representing event_id and the values containing
data about the user rights for contributions within the event
:param user: A `User`
:param from_dt: The earliest event start time to look for
:param to_dt: The latest event start time to look for
"""
def add_acl_data():
query = (user.in_contribution_acls
.options(load_only('contribution_id', 'roles', 'full_access', 'read_access'))
.options(noload('*'))
.options(contains_eager(ContributionPrincipal.contribution).load_only('event_id'))
.join(Contribution)
.join(Event, Event.id == Contribution.event_id)
.filter(~Contribution.is_deleted, ~Event.is_deleted, Event.starts_between(from_dt, to_dt)))
for principal in query:
roles = data[principal.contribution.event_id]
if 'submit' in principal.roles:
roles.add('contribution_submission')
if principal.full_access:
roles.add('contribution_manager')
if principal.read_access:
roles.add('contribution_access')
def add_contrib_data():
has_contrib = (EventPerson.contribution_links.any(
ContributionPersonLink.contribution.has(~Contribution.is_deleted)))
has_subcontrib = EventPerson.subcontribution_links.any(
SubContributionPersonLink.subcontribution.has(db.and_(
~SubContribution.is_deleted,
SubContribution.contribution.has(~Contribution.is_deleted))))
query = (Event.query
.options(load_only('id'))
.options(noload('*'))
.filter(~Event.is_deleted,
Event.starts_between(from_dt, to_dt),
Event.persons.any((EventPerson.user_id == user.id) & (has_contrib | has_subcontrib))))
for event in query:
data[event.id].add('contributor')
data = defaultdict(set)
add_acl_data()
add_contrib_data()
return data
def serialize_contribution_person_link(person_link, is_submitter=None):
"""Serialize ContributionPersonLink to JSON-like object"""
data = serialize_person_link(person_link)
data['isSpeaker'] = person_link.is_speaker
if not isinstance(person_link, SubContributionPersonLink):
data['authorType'] = person_link.author_type.value
data['isSubmitter'] = person_link.is_submitter if is_submitter is None else is_submitter
return data
class ContributionReporter(ReporterBase):
"""Reporting and filtering actions in the contribution report."""
endpoint = '.manage_contributions'
report_link_type = 'contribution'
def __init__(self, event):
super(ContributionReporter, self).__init__(event)
self.default_report_config = {'filters': {'items': {}}}
session_empty = {None: 'No session'}
track_empty = {None: 'No track'}
type_empty = {None: 'No type'}
session_choices = {unicode(s.id): s.title for s in self.report_event.sessions}
track_choices = {unicode(t.id): to_unicode(t.getTitle()) for t in self.report_event.as_legacy.getTrackList()}
type_choices = {unicode(t.id): t.name for t in self.report_event.contribution_types}
self.filterable_items = OrderedDict([
('session', {'title': _('Session'),
'filter_choices': OrderedDict(session_empty.items() + session_choices.items())}),
('track', {'title': _('Track'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('type', {'title': _('Type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('status', {'title': _('Status'), 'filter_choices': {'scheduled': _('Scheduled'),
'unscheduled': _('Not scheduled')}})
])
self.report_config = self._get_config()
def build_query(self):
timetable_entry_strategy = joinedload('timetable_entry')
timetable_entry_strategy.lazyload('*')
return (Contribution.query.with_parent(self.report_event)
.order_by(Contribution.friendly_id)
.options(timetable_entry_strategy,
joinedload('session'),
subqueryload('person_links'),
db.undefer('subcontribution_count'),
db.undefer('attachment_count'),
db.undefer('is_scheduled')))
def filter_report_entries(self, query, filters):
if not filters.get('items'):
return query
criteria = []
if 'status' in filters['items']:
filtered_statuses = filters['items']['status']
status_criteria = []
if 'scheduled' in filtered_statuses:
status_criteria.append(Contribution.is_scheduled)
if 'unscheduled' in filtered_statuses:
status_criteria.append(~Contribution.is_scheduled)
if status_criteria:
criteria.append(db.or_(*status_criteria))
filter_cols = {'session': Contribution.session_id,
'track': Contribution.track_id,
'type': Contribution.type_id}
for key, column in filter_cols.iteritems():
ids = set(filters['items'].get(key, ()))
if not ids:
continue
column_criteria = []
if None in ids:
column_criteria.append(column.is_(None))
if ids - {None}:
column_criteria.append(column.in_(ids - {None}))
criteria.append(db.or_(*column_criteria))
return query.filter(*criteria)
def get_contrib_report_kwargs(self):
contributions_query = self.build_query()
total_entries = contributions_query.count()
contributions = self.filter_report_entries(contributions_query, self.report_config['filters']).all()
sessions = [{'id': s.id, 'title': s.title, 'colors': s.colors} for s in self.report_event.sessions]
tracks = [{'id': int(t.id), 'title': to_unicode(t.getTitle())}
for t in self.report_event.as_legacy.getTrackList()]
total_duration = (sum((c.duration for c in contributions), timedelta()),
sum((c.duration for c in contributions if c.timetable_entry), timedelta()))
return {'contribs': contributions, 'sessions': sessions, 'tracks': tracks, 'total_entries': total_entries,
'total_duration': total_duration}
def render_contrib_report(self, contrib=None):
"""Render the contribution report template components.
:param contrib: Used in RHs responsible for CRUD operations on a
contribution.
:return: dict containing the report's entries, the fragment of
displayed entries and whether the contrib passed is displayed
in the results.
"""
contrib_report_kwargs = self.get_contrib_report_kwargs()
total_entries = contrib_report_kwargs.pop('total_entries')
tpl_contrib = get_template_module('events/contributions/management/_contribution_report.html')
tpl_reports = get_template_module('events/management/_reports.html')
contribs = contrib_report_kwargs['contribs']
filter_statistics = tpl_reports.render_filter_statistics(len(contribs), total_entries,
contrib_report_kwargs.pop('total_duration'))
return {'html': tpl_contrib.render_contrib_report(self.report_event, total_entries, **contrib_report_kwargs),
'hide_contrib': contrib not in contribs if contrib else None,
'filter_statistics': filter_statistics}
def flash_info_message(self, contrib):
flash(_("The contribution '{}' is not displayed in the list due to the enabled filters")
.format(contrib.title), 'info')
def generate_spreadsheet_from_contributions(contributions):
"""Return a tuple consisting of spreadsheet columns and respective
contribution values"""
headers = ['Id', 'Title', 'Description', 'Date', 'Duration', 'Type', 'Session', 'Track', 'Presenters', 'Materials']
rows = []
for c in sorted(contributions, key=attrgetter('friendly_id')):
contrib_data = {'Id': c.friendly_id, 'Title': c.title, 'Description': c.description,
'Duration': format_human_timedelta(c.duration),
'Date': format_datetime(c.timetable_entry.start_dt) if c.timetable_entry else None,
'Type': c.type.name if c.type else None,
'Session': c.session.title if c.session else None,
'Track': c.track.title if c.track else None,
'Materials': None,
'Presenters': ', '.join(speaker.person.full_name for speaker in c.speakers)}
attachments = []
attached_items = get_attached_items(c)
for attachment in attached_items.get('files', []):
attachments.append(attachment.absolute_download_url)
for folder in attached_items.get('folders', []):
for attachment in folder.attachments:
attachments.append(attachment.absolute_download_url)
if attachments:
contrib_data['Materials'] = ', '.join(attachments)
rows.append(contrib_data)
return headers, rows
def make_contribution_form(event):
"""Extends the contribution WTForm to add the extra fields.
Each extra field will use a field named ``custom_ID``.
:param event: The `Event` for which to create the contribution form.
:return: A `ContributionForm` subclass.
"""
from indico.modules.events.contributions.forms import ContributionForm
form_class = type(b'_ContributionForm', (ContributionForm,), {})
for custom_field in event.contribution_fields:
field_impl = custom_field.mgmt_field
if field_impl is None:
# field definition is not available anymore
continue
name = 'custom_{}'.format(custom_field.id)
setattr(form_class, name, field_impl.create_wtf_field())
return form_class
def contribution_type_row(contrib_type):
template = get_template_module('events/contributions/management/_types_table.html')
html = template.types_table_row(contrib_type=contrib_type)
return jsonify_data(html_row=html, flash=False)
@memoize_request
def get_contributions_with_user_as_submitter(event, user):
"""Get a list of contributions in which the `user` has submission rights"""
contribs = (Contribution.query.with_parent(event)
.options(joinedload('acl_entries'))
.filter(Contribution.acl_entries.any(ContributionPrincipal.has_management_role('submit')))
.all())
return {c for c in contribs if any(user in entry.principal for entry in iter_acl(c.acl_entries))}
def serialize_contribution_for_ical(contrib):
return {
'_fossil': 'contributionMetadata',
'id': contrib.id,
'startDate': contrib.timetable_entry.start_dt if contrib.timetable_entry else None,
'endDate': contrib.timetable_entry.end_dt if contrib.timetable_entry else None,
'url': url_for('contributions.display_contribution', contrib, _external=True),
'title': contrib.title,
'location': contrib.venue_name,
'roomFullname': contrib.room_name,
'speakers': [serialize_person_link(x) for x in contrib.speakers],
'description': contrib.description
}
def get_contribution_ical_file(contrib):
data = {'results': serialize_contribution_for_ical(contrib)}
serializer = Serializer.create('ics')
return BytesIO(serializer(data))
class ContributionDisplayReporter(ContributionReporter):
endpoint = '.contribution_list'
report_link_type = 'contribution_display'
def render_contribution_list(self):
"""Render the contribution report template components.
:return: dict containing the report's entries, the fragment of
displayed entries and whether the contrib passed is displayed
in the results.
"""
contrib_report_kwargs = self.get_contrib_report_kwargs()
total_entries = contrib_report_kwargs.pop('total_entries')
contribs = contrib_report_kwargs['contribs']
tpl = get_template_module('events/contributions/display/_contribution_list.html')
tpl_reports = get_template_module('events/management/_reports.html')
tz = timezone(DisplayTZ(session.user, self.report_event.as_legacy).getDisplayTZ())
return {'html': tpl.render_contribution_list(self.report_event, tz, contribs),
'counter': tpl_reports.render_displayed_entries_fragment(len(contribs), total_entries)}
|
gpl-3.0
| 8,352,198,904,003,203,000
| 46.953125
| 119
| 0.647703
| false
| 4.147297
| false
| false
| false
|
deathglitch/metarigging
|
python/rigging/rig_cog.py
|
1
|
1030
|
import pymel.core as pm
import grip
import metautil.miscutil as miscutil
import metautil.rigutil as rigutil
import metautil.shapeutil as shapeutil
def rig_cog_chain(start_joint, end_joint, scale):
start_joint = pm.PyNode(start_joint)
end_joint = pm.PyNode(end_joint)
chain = miscutil.get_nodes_between(start_joint, end_joint, lambda x: isinstance(x, pm.nt.Joint))
grips = []
parent_consts = []
for x, joint in enumerate(chain):
cog_shape_node = shapeutil.create_poly_shape_cog(scale)
grip_node = grip.Grip.create(joint, shape=cog_shape_node, name_root = 'cog')
grip_node.setAttr('rotateOrder', 2)
parent_const = pm.parentConstraint(grip_node, joint, w=1, mo=1)
grips.append(grip_node)
parent_consts.append(parent_const)
result = {}
result['chain'] = chain
result['start_joint'] = start_joint
result['end_joint'] = end_joint
result['grips'] = grips
result['parent_consts'] = parent_consts
return result
|
mit
| 4,577,479,774,416,454,000
| 33.366667
| 100
| 0.658252
| false
| 3.280255
| false
| false
| false
|
tgfjt/Sublime-clipboardRound
|
clipboardround.py
|
1
|
3963
|
import sublime, sublime_plugin
history = []
menuitems = []
history_index = 0
def getClipboardData():
try:# win32
import win32clipboard
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
except:
pass
try:# windows7
import ctypes
ctypes.windll.user32.OpenClipboard(None)
pc = ctypes.windll.user32.GetClipboardData(1)
data = ctypes.c_char_p(pc).value.decode()
ctypes.windll.user32.CloseClipboard()
except:
pass
try:# mac
import subprocess
p = subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xclip
import subprocess
p = subprocess.Popen(['xclip', '-o'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xsel
import subprocess
p = subprocess.Popen(['xclip', '-bo'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
if not 'data' in locals():
return False
else:
return data
def setClipboardHistory():
global history_index, menuitems, history
data = getClipboardData()
if data == False:
return None
elif data in history:
return None
elif data == '':
return None
settings = sublime.load_settings('Sublime-clipboardRound.sublime-settings')
limit = settings.get('limit')
if not history or history[0] != data:
history.insert(0, data)
history_index = 0
menuitems = history
if limit < len(history):
for i in range(len(history) - limit):
history.pop()
menuitems.pop()
return None
def pasteClipboardHistory(self, text):
self.view.run_command('undo')
self.view.run_command('paste')
sublime.set_clipboard(text)
class Clip_round_showCommand(sublime_plugin.TextCommand):
def on_chosen(self, index):
global flag
if index == -1:
return
sublime.set_clipboard(menuitems[index])
self.view.run_command('paste')
flag = True
def run(self, edit):
global menuitems
if menuitems == []:
return None
self.view.window().show_quick_panel(menuitems, self.on_chosen, sublime.MONOSPACE_FONT)
class Clip_round_prevCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = min(history_index + 1, len(history) - 1)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_nextCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = max(history_index - 1, 0)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_clearCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history, history_index, menuitems, data
del menuitems[:]
del history[:]
history_index = 0
sublime.set_clipboard('')
print('clipboardRound: clear Clipboard History.')
class ClipboardRoundListener(sublime_plugin.EventListener):
def on_query_context(self, view, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
return None
def on_text_command(self, view, command, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
|
mit
| -1,768,667,037,384,106,800
| 26.143836
| 94
| 0.604088
| false
| 3.943284
| false
| false
| false
|
wavelets/ThinkStats2
|
code/density.py
|
1
|
2742
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import random
import scipy.stats
import brfss
import first
import thinkstats2
import thinkplot
def Summarize(data):
mean = data.mean()
std = data.std()
median = thinkstats2.Median(data)
print('mean', mean)
print('std', std)
print('median', median)
print('skewness', thinkstats2.Skewness(data))
print('pearson skewness',
thinkstats2.PearsonMedianSkewness(data))
return mean, median
def ComputeSkewnesses():
def VertLine(x, y):
thinkplot.Plot([x, x], [0, y], color='0.6', linewidth=1)
live, firsts, others = first.MakeFrames()
data = live.totalwgt_lb.dropna()
print('Birth weight')
mean, median = Summarize(data)
y = 0.35
VertLine(mean, y)
thinkplot.Text(mean-0.15, 0.1*y, 'mean', horizontalalignment='right')
VertLine(median, y)
thinkplot.Text(median+0.1, 0.1*y, 'median', horizontalalignment='left')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Save(root='density_totalwgt_kde',
xlabel='lbs',
ylabel='PDF')
df = brfss.ReadBrfss(nrows=None)
data = df.wtkg2.dropna()
print('Adult weight')
mean, median = Summarize(data)
y = 0.02499
VertLine(mean, y)
thinkplot.Text(mean+1, 0.1*y, 'mean', horizontalalignment='left')
VertLine(median, y)
thinkplot.Text(median-1.5, 0.1*y, 'median', horizontalalignment='right')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='adult weight')
thinkplot.Save(root='density_wtkg2_kde',
xlabel='kg',
ylabel='PDF',
xlim=[0, 200])
def MakePdfExample():
# mean and var of women's heights in cm, from the BRFSS
mean, var = 163, 52.8
std = math.sqrt(var)
# make a PDF and compute a density, FWIW
pdf = thinkstats2.GaussianPdf(mean, std)
print(pdf.Density(mean + std))
# make a PMF and plot it
thinkplot.PrePlot(2)
thinkplot.Pdf(pdf, label='Gaussian')
# make a sample, make an estimated PDF, and plot it
sample = [random.gauss(mean, std) for i in range(100)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Save(root='pdf_example',
xlabel='Height (cm)',
ylabel='Density')
def main():
thinkstats2.RandomSeed(17)
MakePdfExample()
ComputeSkewnesses()
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,302,440,263,507,806,000
| 25.114286
| 76
| 0.633844
| false
| 3.169942
| false
| false
| false
|
SitiBanc/1061_NCTU_IOMDS
|
Final/autoencoder_keras.py
|
1
|
4171
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 14:02:01 2018
@author: jie-yu
"""
import numpy as np
np.random.seed(1337) # for reproducibility
#from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input
import matplotlib.pyplot as plt
from PIL import Image
import os
labels = np.load("img_labels.npy")
y_train = np.zeros((len(labels),1))#len(labels)
#def hot_to_num():
for i in range(len(labels)):#len(labels)
y_train[i] = np.where(labels[i]==1)[0][0]
#image = Image.open("hw3_img.jpg")
os.chdir('D:\\Jie-Yu\\碩一上\\智慧型\\期末project\\img\\img')
filelist = os.listdir()
x = np.zeros((len(filelist),150*150))
for i in range(len(filelist)):
IMG = Image.open(filelist[i])
x[i,:]=np.array(IMG.getdata())
x_train = x.copy()
x_test = x_train.copy()
y_test = y_train.copy()
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
# data pre-processing
x_train = x_train.astype('float32') / 255. - 0.5 # minmax_normalized
x_test = x_test.astype('float32') / 255. - 0.5 # minmax_normalized
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
print(x_train.shape)
print(x_test.shape)
# in order to plot in a 2D figure
encoding_dim = 2
# this is our input placeholder
input_img = Input(shape=(150*150,))
# encoder layers
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(10, activation='relu')(encoded)
encoder_output = Dense(encoding_dim)(encoded)
# decoder layers
decoded = Dense(10, activation='relu')(encoder_output)
decoded = Dense(64, activation='relu')(decoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(150*150, activation='tanh')(decoded)
# construct the autoencoder model
autoencoder = Model(input=input_img, output=decoded)
# construct the encoder model for plotting
encoder = Model(input=input_img, output=encoder_output)
# compile autoencoder
autoencoder.compile(optimizer='adam', loss='mse')
# training
autoencoder.fit(x_train, x_train,
nb_epoch=20,
batch_size=256,
shuffle=True)
"""
Epoch 20/20
60000/60000 [==============================] - 7s - loss: 0.0398
"""
# plotting
encoded_imgs = encoder.predict(x_test)
plt.scatter(encoded_imgs[:, 0], encoded_imgs[:, 1], c=y_test)
plt.colorbar()
plt.show()
def lda(X,L):
Classes = np.unique(np.array(L))#0,1,2
k = len(Classes)#k = 3
n = np.zeros((k,1))#3*1 array
C = [" "]*k #3*1 list
M = np.mean(X,axis = 0) #X的mean
S = [" "]*k #3*1 list
Sw = 0
Sb = 0
for j in range(k):#3
Xj = X[np.where(L==Classes[j])[0]]
n[j] = int(Xj.shape[0])
C[j] = np.mean(Xj,axis = 0)
S[j] = 0
for i in range(int(n[j])):
aaa = np.array([Xj[i,:]-C[j]])
S[j] = S[j]+np.dot(aaa.T,aaa)
Sw = Sw+S[j]
bbb = np.array([C[j]-M])
Sb = Sb+int(n[j])*np.dot(bbb.T,bbb)
tmp = np.dot(np.linalg.inv(Sw),Sb)
LAMBDA,W = np.linalg.eig(tmp)
SortOrder = np.argsort(-LAMBDA)
# print(W)
W = W[:,SortOrder[0:1]]
Y = np.dot(X,W)
Y = -Y
return Y,W
Y,W = lda(encoded_imgs,np.array(y_test))#降成一維的特徵
Y_sort = np.squeeze(Y).argsort()
Y_list = []
for i in range(len(Y_sort)):
aaa = (x_test[Y_sort[i]]+0.5)*255
Y_list.append(aaa.reshape(150,150).T.astype('uint8'))
Y_list = np.array(Y_list)
def draw_func(a,b):
start = min(a,b)
end = max(a,b)
if end-start>10:
jump = (end-start)//10
draw = Y_list[range(start,end,jump)]
draw = draw.reshape((len(range(start,end,jump)))*150,150)
else:
draw = Y_list[start:end]
draw = draw.reshape((end-start)*150,150)
draw = draw.T
Image.fromarray(draw).show()
#draw = np.array(Y_list)
draw_func(500,510)
#draw_func(500,502)
#draw_func(502,503)
|
apache-2.0
| 8,473,934,033,202,787,000
| 28.659259
| 90
| 0.595554
| false
| 2.64304
| true
| false
| false
|
ckclark/leetcode
|
py/count-of-smaller-numbers-after-self.py
|
1
|
1182
|
class Solution(object):
def deduceRemain(self, segment_tree, n):
for l in segment_tree:
if n < len(l):
l[n] -= 1
n >>= 1
def countRemainFirstN(self, segment_tree, n):
ans = 0
for l in segment_tree:
if n == 0:
break
if n & 1:
ans += l[n - 1]
n >>= 1
return ans
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
size = len(nums)
ans = [0] * size
segment_tree = []
segment_tree.append([1] * size)
t = size / 2
while t > 0:
prev = segment_tree[-1]
nxt = []
segment_tree.append(nxt)
for i in xrange(t):
nxt.append(prev[i * 2] + prev[i * 2 + 1])
t /= 2
order = list(enumerate(nums))
order.sort(key=lambda x:(-x[1], -x[0]))
for idx, _ in order:
ans[idx] = self.countRemainFirstN(segment_tree, size) - self.countRemainFirstN(segment_tree, idx + 1)
self.deduceRemain(segment_tree, idx)
return ans
|
apache-2.0
| 578,514,358,702,732,300
| 28.55
| 113
| 0.450085
| false
| 3.54955
| false
| false
| false
|
StephenLujan/Naith
|
game/plugins/pointlight/pointlight.py
|
1
|
1351
|
# Copyright Tom SF Haines, Reinier de Blois
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pandac.PandaModules import VBase4
from pandac.PandaModules import PointLight as PPointLight
class PointLight:
"""Creates a simple point light"""
def __init__(self,manager,xml):
self.light = PPointLight('plight')
self.lightNode = render.attachNewNode(self.light)
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0))
pos = xml.find('pos')
if pos!=None:
self.lightNode.setPos(render, float(pos.get('x')), float(pos.get('y')), float(pos.get('z')))
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
|
apache-2.0
| 7,548,093,377,534,899,000
| 31.166667
| 107
| 0.712065
| false
| 3.490956
| false
| false
| false
|
monikagrabowska/osf.io
|
kinto/kinto/core/authorization.py
|
1
|
9913
|
import functools
from pyramid.settings import aslist
from pyramid.security import IAuthorizationPolicy, Authenticated
from zope.interface import implementer
from kinto.core import utils
from kinto.core.storage import exceptions as storage_exceptions
from kinto.core.authentication import prefixed_userid
# A permission is called "dynamic" when it's computed at request time.
DYNAMIC = 'dynamic'
# When permission is set to "private", only the current user is allowed.
PRIVATE = 'private'
def groupfinder(userid, request):
"""Fetch principals from permission backend for the specified `userid`.
This is plugged by default using the ``multiauth.groupfinder`` setting.
"""
backend = getattr(request.registry, 'permission', None)
# Permission backend not configured. Ignore.
if not backend:
return []
# Safety check when Kinto-Core is used without pyramid_multiauth.
if request.prefixed_userid:
userid = request.prefixed_userid
# Query the permission backend only once per request (e.g. batch).
reify_key = userid + '_principals'
if reify_key not in request.bound_data:
principals = backend.get_user_principals(userid)
request.bound_data[reify_key] = principals
return request.bound_data[reify_key]
@implementer(IAuthorizationPolicy)
class AuthorizationPolicy(object):
"""Default authorization class, that leverages the permission backend
for shareable resources.
"""
get_bound_permissions = None
"""Callable that takes an object id and a permission and returns
a list of tuples (<object id>, <permission>). Useful when objects
permission depend on others."""
def permits(self, context, principals, permission):
if permission == PRIVATE:
return Authenticated in principals
# Add prefixed user id to principals.
prefixed_userid = context.get_prefixed_userid()
if prefixed_userid and ':' in prefixed_userid:
principals = principals + [prefixed_userid]
prefix, user_id = prefixed_userid.split(':', 1)
# Remove unprefixed user id to avoid conflicts.
# (it is added via Pyramid Authn policy effective principals)
if user_id in principals:
principals.remove(user_id)
# Retro-compatibility with cliquet 2.0 '_' user id prefixes.
# Just in case it was used in permissions definitions.
principals.append('%s_%s' % (prefix, user_id))
if permission == DYNAMIC:
permission = context.required_permission
if permission == 'create':
permission = '%s:%s' % (context.resource_name, permission)
if context.allowed_principals:
allowed = bool(set(context.allowed_principals) & set(principals))
else:
object_id = context.permission_object_id
if self.get_bound_permissions is None:
bound_perms = [(object_id, permission)]
else:
bound_perms = self.get_bound_permissions(object_id, permission)
allowed = context.check_permission(principals, bound_perms)
# If not allowed on this collection, but some records are shared with
# the current user, then authorize.
# The ShareableResource class will take care of the filtering.
is_list_operation = (context.on_collection and
not permission.endswith('create'))
if not allowed and is_list_operation:
shared = context.fetch_shared_records(permission,
principals,
self.get_bound_permissions)
allowed = shared is not None
return allowed
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError() # PRAGMA NOCOVER
class RouteFactory(object):
resource_name = None
on_collection = False
required_permission = None
allowed_principals = None
permission_object_id = None
current_record = None
shared_ids = None
method_permissions = {
"head": "read",
"get": "read",
"post": "create",
"delete": "write",
"patch": "write"
}
def __init__(self, request):
# Make it available for the authorization policy.
self.get_prefixed_userid = functools.partial(prefixed_userid, request)
# Store some shortcuts.
permission = request.registry.permission
self.check_permission = permission.check_permission
self._get_accessible_objects = permission.get_accessible_objects
# Store current resource and required permission.
service = utils.current_service(request)
is_on_resource = (service is not None and
hasattr(service, 'viewset') and
hasattr(service, 'resource'))
if is_on_resource:
self.resource_name = request.current_resource_name
self.on_collection = getattr(service, "type", None) == "collection"
self.permission_object_id, self.required_permission = (
self._find_required_permission(request, service))
# To obtain shared records on a collection endpoint, use a match:
self._object_id_match = self.get_permission_object_id(request, '*')
# Check if principals are allowed explicitly from settings.
settings = request.registry.settings
setting = '%s_%s_principals' % (self.resource_name,
self.required_permission)
self.allowed_principals = aslist(settings.get(setting, ''))
def fetch_shared_records(self, perm, principals, get_bound_permissions):
"""Fetch records that are readable or writable for the current
principals.
See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`
If no record is shared, it returns None.
.. warning::
This sets the ``shared_ids`` attribute to the context with the
return value. The attribute is then read by
:class:`kinto.core.resource.ShareableResource`
"""
if get_bound_permissions:
bound_perms = get_bound_permissions(self._object_id_match, perm)
else:
bound_perms = [(self._object_id_match, perm)]
by_obj_id = self._get_accessible_objects(principals, bound_perms)
ids = by_obj_id.keys()
if len(ids) > 0:
# Store for later use in ``ShareableResource``.
self.shared_ids = [self._extract_object_id(id_) for id_ in ids]
else:
self.shared_ids = None
return self.shared_ids
def get_permission_object_id(self, request, object_id=None):
"""Returns the permission object id for the current request.
In the nominal case, it is just the current URI without version prefix.
For collections, it is the related record URI using the specified
`object_id`.
See :meth:`kinto.core.resource.model.SharableModel` and
:meth:`kinto.core.authorization.RouteFactory.__init__`
"""
object_uri = utils.strip_uri_prefix(request.path)
if self.on_collection and object_id is not None:
# With the current request on a collection, the record URI must
# be found out by inspecting the collection service and its sibling
# record service.
matchdict = request.matchdict.copy()
matchdict['id'] = object_id
try:
object_uri = utils.instance_uri(request,
self.resource_name,
**matchdict)
if object_id == '*':
object_uri = object_uri.replace('%2A', '*')
except KeyError:
# Maybe the resource has no single record endpoint.
# We consider that object URIs in permissions backend will
# be stored naively:
object_uri = object_uri + '/' + object_id
return object_uri
def _extract_object_id(self, object_uri):
# XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']
return object_uri.split('/')[-1]
def _find_required_permission(self, request, service):
"""Find out what is the permission object id and the required
permission.
.. note::
This method saves an attribute ``self.current_record`` used
in :class:`kinto.core.resource.UserResource`.
"""
# By default, it's a URI a and permission associated to the method.
permission_object_id = self.get_permission_object_id(request)
method = request.method.lower()
required_permission = self.method_permissions.get(method)
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise.
if request.method.lower() == "put":
resource = service.resource(request=request, context=self)
try:
record = resource.model.get_record(resource.record_id)
# Save a reference, to avoid refetching from storage in
# resource.
self.current_record = record
except storage_exceptions.RecordNotFoundError:
# The record does not exist, the permission to create on
# the related collection is required.
permission_object_id = service.collection_path.format(
**request.matchdict)
required_permission = "create"
else:
required_permission = "write"
return (permission_object_id, required_permission)
|
apache-2.0
| 5,708,944,877,151,772,000
| 39.96281
| 79
| 0.616261
| false
| 4.501817
| false
| false
| false
|
ducted/duct
|
duct/service.py
|
1
|
10144
|
"""
.. module:: service
:synopsis: Core service classes
.. moduleauthor:: Colin Alston <colin@imcol.in>
"""
import time
import sys
import os
import importlib
import re
import copy
from twisted.application import service
from twisted.internet import task, reactor, defer
from twisted.python import log
class DuctService(service.Service):
"""Duct service
Runs timers, configures sources and and manages the queue
"""
def __init__(self, config):
self.running = 0
self.sources = []
self.lastEvents = {}
self.outputs = {}
self.evCache = {}
self.critical = {}
self.warn = {}
self.hostConnectorCache = {}
self.eventCounter = 0
self.factory = None
self.protocol = None
self.watchdog = None
self.config = config
if os.path.exists('/var/lib/duct'):
sys.path.append('/var/lib/duct')
# Read some config stuff
self.debug = float(self.config.get('debug', False))
self.ttl = float(self.config.get('ttl', 60.0))
self.stagger = float(self.config.get('stagger', 0.2))
# Backward compatibility
self.server = self.config.get('server', None)
self.port = int(self.config.get('port', 5555))
self.proto = self.config.get('proto', 'tcp')
self.inter = self.config.get('interval', 60.0)
if self.debug:
print("config:", repr(config))
self.setupSources(self.config)
def setupOutputs(self, config):
"""Setup output processors"""
if self.server:
if self.proto == 'tcp':
defaultOutput = {
'output': 'duct.outputs.riemann.RiemannTCP',
'server': self.server,
'port': self.port
}
else:
defaultOutput = {
'output': 'duct.outputs.riemann.RiemannUDP',
'server': self.server,
'port': self.port
}
outputs = config.get('outputs', [defaultOutput])
else:
outputs = config.get('outputs', [])
for output in outputs:
if 'debug' not in output:
output['debug'] = self.debug
cl = output['output'].split('.')[-1] # class
path = '.'.join(output['output'].split('.')[:-1]) # import path
# Import the module and construct the output object
outputObj = getattr(
importlib.import_module(path), cl)(output, self)
name = output.get('name', None)
# Add the output to our routing hash
if name in self.outputs:
self.outputs[name].append(outputObj)
else:
self.outputs[name] = [outputObj]
# connect the output
reactor.callLater(0, outputObj.createClient)
def createSource(self, source):
"""Construct the source object as defined in the configuration
"""
if source.get('path'):
path = source['path']
if path not in sys.path:
sys.path.append(path)
# Resolve the source
cl = source['source'].split('.')[-1] # class
path = '.'.join(source['source'].split('.')[:-1]) # import path
# Import the module and get the object source we care about
sourceObj = getattr(importlib.import_module(path), cl)
if 'debug' not in source:
source['debug'] = self.debug
if 'ttl' not in source.keys():
source['ttl'] = self.ttl
if 'interval' not in source.keys():
source['interval'] = self.inter
return sourceObj(source, self.sendEvent, self)
def setupTriggers(self, source, sobj):
"""Setup trigger actions for a source
"""
if source.get('critical'):
self.critical[sobj] = [(re.compile(key), val)
for key, val in source['critical'].items()]
if source.get('warning'):
self.warn[sobj] = [(re.compile(key), val)
for key, val in source['warning'].items()]
def setupSources(self, config):
"""Sets up source objects from the given config"""
sources = config.get('sources', [])
for source in sources:
src = self.createSource(source)
self.setupTriggers(source, src)
self.sources.append(src)
def _aggregateQueue(self, events):
# Handle aggregation for each event
queue = []
for ev in events:
if ev.aggregation:
eid = ev.eid()
thisM = ev.metric
if eid in self.evCache:
lastM, lastTime = self.evCache[eid]
tDelta = ev.time - lastTime
metric = ev.aggregation(
lastM, ev.metric, tDelta)
if metric:
ev.metric = metric
queue.append(ev)
self.evCache[eid] = (thisM, ev.time)
else:
queue.append(ev)
return queue
def setStates(self, source, queue):
"""
Check Event triggers against the configured source and apply the
corresponding state
"""
for ev in queue:
if ev.state == 'ok':
for key, val in self.warn.get(source, []):
if key.match(ev.service):
state = eval("service %s" % val, {'service': ev.metric})
if state:
ev.state = 'warning'
for key, val in self.critical.get(source, []):
if key.match(ev.service):
state = eval("service %s" % val, {'service': ev.metric})
if state:
ev.state = 'critical'
def routeEvent(self, source, events):
"""Route event to the queue of the output configured for it
"""
routes = source.config.get('route', None)
if not isinstance(routes, list):
routes = [routes]
for route in routes:
if self.debug:
log.msg("Sending events %s to %s" % (events, route))
if not route in self.outputs:
# Non existant route
log.msg('Could not route %s -> %s.' % (
source.config['service'], route))
else:
for output in self.outputs[route]:
reactor.callLater(0, output.eventsReceived, events)
def sendEvent(self, source, events):
"""Callback that all event sources call when they have a new event
or list of events
"""
if isinstance(events, list):
self.eventCounter += len(events)
else:
self.eventCounter += 1
events = [events]
queue = self._aggregateQueue(events)
if queue:
if (source in self.critical) or (source in self.warn):
self.setStates(source, queue)
self.routeEvent(source, queue)
queue = []
self.lastEvents[source] = time.time()
@defer.inlineCallbacks
def _startSource(self, source):
yield defer.maybeDeferred(source.startTimer)
@defer.inlineCallbacks
def startService(self):
yield self.setupOutputs(self.config)
if self.debug:
log.msg("Starting service")
stagger = 0
# Start sources internal timers
for source in self.sources:
if self.debug:
log.msg("Starting source " + source.config['service'])
# Stagger source timers, or use per-source start_delay
start_delay = float(source.config.get('start_delay', stagger))
reactor.callLater(start_delay, self._startSource, source)
stagger += self.stagger
reactor.callLater(stagger, self.startWatchdog)
self.running = 1
def startWatchdog(self):
"""Start source watchdog
"""
self.watchdog = task.LoopingCall(self.sourceWatchdog)
self.watchdog.start(10)
@defer.inlineCallbacks
def sourceWatchdog(self):
"""Watchdog timer function.
Recreates sources which have not generated events in 10*interval if
they have watchdog set to true in their configuration
"""
for i, source in enumerate(self.sources):
if not source.config.get('watchdog', False):
continue
last = self.lastEvents.get(source, None)
if last:
sn = repr(source)
try:
if last < (time.time()-(source.inter*10)):
log.msg("Trying to restart stale source %s: %ss" % (
sn, int(time.time() - last)
))
source = self.sources.pop(i)
try:
yield source.stopTimer()
except Exception as ex:
log.msg("Could not stop timer for %s: %s" % (
sn, ex))
config = copy.deepcopy(source.config)
del self.lastEvents[source]
del source
source = self.createSource(config)
reactor.callLater(0, self._startSource, source)
except Exception as ex:
log.msg("Could not reset source %s: %s" % (
sn, ex))
@defer.inlineCallbacks
def stopService(self):
self.running = 0
if self.watchdog and self.watchdog.running:
self.watchdog.stop()
for source in self.sources:
yield defer.maybeDeferred(source.stopTimer)
for _, outputs in self.outputs.items():
for output in outputs:
yield defer.maybeDeferred(output.stop)
|
mit
| 7,231,771,732,266,448,000
| 30.799373
| 80
| 0.51735
| false
| 4.534645
| true
| false
| false
|
interlegis/sigi
|
sigi/apps/parlamentares/views.py
|
1
|
4616
|
# coding: utf-8
import datetime
import csv
from django.template import Context, loader
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.conf import settings
from django.shortcuts import render, get_list_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from django.template import RequestContext
from sigi.apps.casas.models import Orgao
from sigi.apps.parlamentares.models import Parlamentar
from sigi.apps.parlamentares.reports import ParlamentaresLabels
from geraldo.generators import PDFGenerator
from django.contrib.auth.decorators import login_required
def adicionar_parlamentar_carrinho(request, queryset=None, id=None):
if request.method == 'POST':
ids_selecionados = request.POST.getlist('_selected_action')
if 'carrinho_parlametar' not in request.session:
request.session['carrinho_parlamentar'] = ids_selecionados
else:
lista = request.session['carrinho_parlamentar']
# Verifica se id já não está adicionado
for id in ids_selecionados:
if id not in lista:
lista.append(id)
request.session['carrinho_parlamentar'] = lista
@login_required
@csrf_protect
def visualizar_carrinho(request):
qs = carrinhoOrGet_for_qs(request)
paginator = Paginator(qs, 100)
# Make sure page request is an int. If not, deliver first page.
# Esteja certo de que o `page request` é um inteiro. Se não, mostre a primeira página.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# Se o page request (9999) está fora da lista, mostre a última página.
try:
paginas = paginator.page(page)
except (EmptyPage, InvalidPage):
paginas = paginator.page(paginator.num_pages)
carrinhoIsEmpty = not('carrinho_parlamentares' in request.session)
return render(
request,
'parlamentares/carrinho.html',
{
'carIsEmpty': carrinhoIsEmpty,
'paginas': paginas,
'query_str': '?' + request.META['QUERY_STRING']
}
)
def carrinhoOrGet_for_qs(request):
"""
Verifica se existe parlamentares na sessão se não verifica get e retorna qs correspondente.
"""
if 'carrinho_parlamentar' in request.session:
ids = request.session['carrinho_parlamentar']
qs = Parlamentar.objects.filter(pk__in=ids)
else:
qs = Parlamentar.objects.all()
if request.GET:
qs = get_for_qs(request.GET, qs)
return qs
def query_ordena(qs, o, ot):
list_display = ('nome_completo',)
aux = list_display[(int(o) - 1)]
if ot == 'asc':
qs = qs.order_by(aux)
else:
qs = qs.order_by("-" + aux)
return qs
def get_for_qs(get, qs):
"""
Verifica atributos do GET e retorna queryset correspondente
"""
kwargs = {}
for k, v in get.iteritems():
if not (k == 'page' or k == 'pop' or k == 'q'):
if not k == 'o':
if k == "ot":
qs = query_ordena(qs, get["o"], get["ot"])
else:
kwargs[str(k)] = v
qs = qs.filter(**kwargs)
return qs
@login_required
def deleta_itens_carrinho(request):
"""
Deleta itens selecionados do carrinho
"""
if request.method == 'POST':
ids_selecionados = request.POST.getlist('_selected_action')
if 'carrinho_parlamentar' in request.session:
lista = request.session['carrinho_parlamentar']
for item in ids_selecionados:
lista.remove(item)
if lista:
request.session['carrinho_parlamentar'] = lista
else:
del lista
del request.session['carrinho_parlamentar']
return HttpResponseRedirect('.')
@login_required
def labels_report(request, id=None, formato='3x9_etiqueta'):
""" TODO: adicionar suporte para resultado de pesquisa do admin.
"""
if request.POST:
if 'tipo_etiqueta' in request.POST:
tipo = request.POST['tipo_etiqueta']
if id:
qs = Parlamentar.objects.filter(pk=id)
else:
qs = carrinhoOrGet_for_qs(request)
if not qs:
return HttpResponseRedirect('../')
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=casas.pdf'
report = ParlamentaresLabels(queryset=qs, formato=formato)
report.generate_by(PDFGenerator, filename=response)
return response
|
gpl-2.0
| 6,510,522,160,181,261,000
| 29.496689
| 98
| 0.636699
| false
| 3.388521
| false
| false
| false
|
tfXYZ/tfXYZ
|
core/losses.py
|
1
|
1634
|
import tensorflow as tf
from .blocks import gather_nd
tf.app.flags.DEFINE_float('alpha', 1.0, '')
tf.app.flags.DEFINE_float('beta', 1.0, '')
FLAGS = tf.app.flags.FLAGS
def ce_loss(logits, labels, **kwargs):
"""
The standard classification loss. Applies softmax on the logits and computes the loss.
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
return tf.reduce_mean(cross_entropy)
def ce_loss_from_softmax(softmax_out, labels, avg=True, **kwargs):
"""
The standard classification loss. Takes the softmax output and computes the loss.
"""
indices = tf.transpose(tf.stack([tf.constant(range(0, softmax_out.get_shape()[0].value)), labels]), [1,0])
correct_probs = gather_nd(softmax_out, indices)
loss = -tf.reduce_mean(tf.log(correct_probs)) if avg else -tf.log(correct_probs)
return loss
def binary_ce_loss(logits, labels, n_classes, **kwargs):
"""
Binary CE loss, for multilabel classification and other applications.
"""
one_hot_labels = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=one_hot_labels)
return tf.reduce_mean(loss)
def MSE_loss(tensor, targets):
"""
Standard MSE loss.
"""
loss = tf.reduce_mean(tf.pow(tensor - targets, 2))
return loss
def mix_sigmoid_ce_loss(logits, labels, n_classes, **kwargs):
"""
A mix between the standard CE and binary CE loss, according to alpha and beta.
"""
print('alpha, beta:', FLAGS.alpha, FLAGS.beta)
loss = ce_loss(logits, labels) * FLAGS.alpha + binary_ce_loss(logits, labels, n_classes) * FLAGS.beta
return loss
|
gpl-3.0
| 3,675,278,564,448,912,000
| 31.039216
| 108
| 0.70257
| false
| 3.25498
| false
| false
| false
|
klahnakoski/esReplicate
|
pyLibrary/sql/redshift.py
|
1
|
5408
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# FOR WINDOWS INSTALL OF psycopg2
# http://stickpeople.com/projects/python/win-psycopg/2.6.0/psycopg2-2.6.0.win32-py2.7-pg9.4.1-release.exe
import psycopg2
from psycopg2.extensions import adapt
from pyLibrary import convert
from mo_logs.exceptions import suppress_exception
from mo_logs import Log
from mo_kwargs import override
from pyLibrary.queries import jx
from pyLibrary.sql import SQL
from mo_logs.strings import expand_template
from mo_threads import Lock
class Redshift(object):
@override
def __init__(self, host, user, password, database=None, port=5439, kwargs=None):
self.settings=kwargs
self.locker = Lock()
self.connection = None
def _connect(self):
self.connection=psycopg2.connect(
database=self.settings.database,
user=self.settings.user,
password=self.settings.password,
host=self.settings.host,
port=self.settings.port
)
def query(self, sql, param=None):
return self.execute(sql, param)
def execute(
self,
command,
param=None,
retry=True # IF command FAILS, JUST THROW ERROR
):
if param:
command = expand_template(command, self.quote_param(param))
output = None
done = False
while not done:
try:
with self.locker:
if not self.connection:
self._connect()
with Closer(self.connection.cursor()) as curs:
curs.execute(command)
if curs.rowcount >= 0:
output = curs.fetchall()
self.connection.commit()
done = True
except Exception as e:
with suppress_exception:
self.connection.rollback()
# TODO: FIGURE OUT WHY rollback() DOES NOT HELP
self.connection.close()
self.connection = None
self._connect()
if not retry:
Log.error("Problem with command:\n{{command|indent}}", command= command, cause=e)
return output
def insert(self, table_name, record):
keys = record.keys()
try:
command = "INSERT INTO " + self.quote_column(table_name) + "(" + \
",".join([self.quote_column(k) for k in keys]) + \
") VALUES (" + \
",".join([self.quote_value(record[k]) for k in keys]) + \
")"
self.execute(command)
except Exception as e:
Log.error("problem with record: {{record}}", record= record, cause=e)
def insert_list(self, table_name, records):
if not records:
return
columns = set()
for r in records:
columns |= set(r.keys())
columns = jx.sort(columns)
try:
self.execute(
"DELETE FROM " + self.quote_column(table_name) + " WHERE _id IN {{ids}}",
{"ids": self.quote_column([r["_id"] for r in records])}
)
command = \
"INSERT INTO " + self.quote_column(table_name) + "(" + \
",".join([self.quote_column(k) for k in columns]) + \
") VALUES " + ",\n".join([
"(" + ",".join([self.quote_value(r.get(k, None)) for k in columns]) + ")"
for r in records
])
self.execute(command)
except Exception as e:
Log.error("problem with insert", e)
def quote_param(self, param):
output={}
for k, v in param.items():
if isinstance(v, SQL):
output[k]=v.sql
else:
output[k]=self.quote_value(v)
return output
def quote_column(self, name):
if isinstance(name, basestring):
return SQL('"' + name.replace('"', '""') + '"')
return SQL("(" + (", ".join(self.quote_value(v) for v in name)) + ")")
def quote_value(self, value):
if value ==None:
return SQL("NULL")
if isinstance(value, list):
json = convert.value2json(value)
return self.quote_value(json)
if isinstance(value, basestring) and len(value) > 256:
value = value[:256]
return SQL(adapt(value))
def es_type2pg_type(self, es_type):
return PG_TYPES.get(es_type, "character varying")
PG_TYPES = {
"boolean": "boolean",
"double": "double precision",
"float": "double precision",
"string": "VARCHAR",
"long": "bigint"
}
class Closer(object):
def __init__(self, resource):
self.resource = resource
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with suppress_exception:
self.resource.close()
def __getattr__(self, item):
return getattr(self.resource, item)
|
mpl-2.0
| -9,127,470,044,056,634,000
| 29.382022
| 105
| 0.54253
| false
| 4.047904
| false
| false
| false
|
jimsize/PySolFC
|
pysollib/kivy/toolbar.py
|
1
|
10827
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------#
# Copyright (C) 2016-2017 LB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------#
# imports
import os
# PySol imports
from pysollib.util import IMAGE_EXTENSIONS
from pysollib.settings import TITLE
from pysollib.winsystems import TkSettings
from pysollib.mygettext import _, n_
# ************************************************************************
# *
# ************************************************************************
class AbstractToolbarButton:
def __init__(self, parent, toolbar, toolbar_name, position):
self.toolbar = toolbar
self.toolbar_name = toolbar_name
self.position = position
self.visible = False
def show(self, orient, force=False):
if self.visible and not force:
return
self.visible = True
padx, pady = 2, 2
if orient == 'horizontal':
self.grid(row=0,
column=self.position,
ipadx=padx, ipady=pady,
sticky='nsew')
else:
self.grid(row=self.position,
column=0,
ipadx=padx, ipady=pady,
sticky='nsew')
def hide(self):
if not self.visible:
return
self.visible = False
self.grid_forget()
# ************************************************************************
if True:
from pysollib.kivy.LApp import LImage
from pysollib.kivy.LApp import LBase
# from LApp import LMainWindow
from kivy.uix.boxlayout import BoxLayout
# from kivy.uix.button import Button
from kivy.uix.behaviors import ButtonBehavior
# from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image as KivyImage
# ************************************************************************
class MyButton(ButtonBehavior, KivyImage, LBase):
def __init__(self, **kwargs):
super(MyButton, self).__init__(**kwargs)
# super(MyButton, self).__init__()
self.src = None
if ('image' in kwargs):
self.src = kwargs['image'].source
self.command = None
if ('command' in kwargs):
self.command = kwargs['command']
self.source = self.src
self.allow_stretch = True
def on_press(self):
self.allow_stretch = False
def on_release(self):
self.allow_stretch = True
if (self.command is not None):
self.command()
class MyCheckButton(ButtonBehavior, KivyImage, LBase):
def __init__(self, **kwargs):
super(MyCheckButton, self).__init__(**kwargs)
# super(MyCheckButton, self).__init__()
self.src = None
if ('image' in kwargs):
self.src = kwargs['image'].source
self.command = None
if ('command' in kwargs):
self.command = kwargs['command']
self.variable = None
if ('variable' in kwargs):
self.variable = kwargs['variable']
self.win = None
if ('win' in kwargs):
self.win = kwargs['win']
self.source = self.src
self.allow_stretch = True
self.checked = False
# self.variable = self.win.app.menubar.tkopt.pause
if self.variable:
self.variable.bind(value=self.updateState)
def updateState(self, obj, val):
if (val):
self.allow_stretch = False
else:
self.allow_stretch = True
def isChecked(self):
return self.checked
def on_press(self):
if self.win is None:
return
if self.win.app is None:
return
if self.win.app.game is None:
return
game = self.win.app.game
if game.finished:
return
if game.demo:
return
# if self.win.app.menubar == None: return
# mb = self.win.app.menubar
if game.pause:
self.allow_stretch = True
self.checked = False
if (self.command is not None):
self.command()
else:
self.allow_stretch = False
self.checked = True
if (self.command is not None):
self.command()
def on_release(self):
pass
# ************************************************************************
# * Note: Applications should call show/hide after constructor.
# ************************************************************************
class PysolToolbarTk(BoxLayout):
def __init__(
self,
top,
menubar,
dir,
size=0,
relief='flat',
compound='none'):
super(PysolToolbarTk, self).__init__(orientation='vertical')
self.size_hint = (0.05, 1.0)
# self.size_hint=(None, 1.0)
# self.width = 50
self.win = top
self.menubar = menubar
self.dir = dir
self.win.setTool(self, 3)
for label, f, t in (
(n_("New"), self.mNewGame, _("New game")),
(n_("Restart"), self.mRestart, _("Restart the\ncurrent game")),
(None, None, None),
# (n_("Open"), self.mOpen, _("Open a\nsaved game")),
# (n_("Save"), self.mSave, _("Save game")),
(None, None, None),
(n_("Undo"), self.mUndo, _("Undo last move")),
(n_("Redo"), self.mRedo, _("Redo last move")),
(n_("Autodrop"), self.mDrop, _("Auto drop cards")),
(n_("Shuffle"), self.mShuffle, _("Shuffle tiles")),
(n_("Pause"), self.mPause, _("Pause game")),
(None, None, None),
# (n_("Statistics"), self.mPlayerStats, _("View statistics")),
(n_("Rules"), self.mHelpRules, _("Rules for this game")),
(None, None, None),
(n_("Quit"), self.mHoldAndQuit, _("Quit ") + TITLE),
):
if label is None:
# sep = self._createSeparator()
# sep.bind("<1>", self.clickHandler)
# sep.bind("<3>", self.rightclickHandler)
pass
elif label == 'Pause':
self._createButton(label, f, check=True, tooltip=t)
else:
self._createButton(label, f, tooltip=t)
# hier gibt es noch ein 'player label' mit contextmenu, wo
# der spielername gewählt und die spielstatistik etc.
# angezeigt werden könnte (TBD):
'''
sep = self._createFlatSeparator()
sep.bind("<1>", self.clickHandler)
sep.bind("<3>", self.rightclickHandler)
self._createLabel("player", label=n_('Player'),
tooltip=_("Player options"))
#
self.player_label.bind("<1>", self.mOptPlayerOptions)
# self.player_label.bind("<3>", self.mOptPlayerOptions)
self.popup = MfxMenu(master=None, label=n_('Toolbar'), tearoff=0)
createToolbarMenu(menubar, self.popup)
self.frame.bind("<1>", self.clickHandler)
self.frame.bind("<3>", self.rightclickHandler)
#
self.setCompound(compound, force=True)
'''
def show(self, on, **kw):
side = self.menubar.tkopt.toolbar.get()
self.win.setTool(None, side)
return False
def mHoldAndQuit(self, *args):
if not self._busy():
self.menubar.mHoldAndQuit()
return 1
def getSize(self):
return 0
def updateText(self, **kw):
pass
def config(self, w, v):
print('PysolToolbarTk: config %s, %s' % (w, v))
# y = self.yy
pass
# Lokale.
def _loadImage(self, name):
file = os.path.join(self.dir, name)
image = None
for ext in IMAGE_EXTENSIONS:
file = os.path.join(self.dir, name + ext)
if os.path.isfile(file):
image = LImage(source=file)
# print('_loadImage: file=%s' % file)
# image = Tkinter.PhotoImage(file=file)
break
return image
def _createButton(self, label, command, check=False, tooltip=None):
name = label.lower()
image = self._loadImage(name)
# position = len(self._widgets)
button_relief = TkSettings.toolbar_button_relief
bd = TkSettings.toolbar_button_borderwidth
padx, pady = TkSettings.toolbar_button_padding
kw = {
'toolbar': self,
'toolbar_name': name,
'command': command,
'takefocus': 0,
'text': _(label),
'bd': bd,
'relief': button_relief,
'padx': padx,
'pady': pady,
'overrelief': 'raised',
}
# print ('toolbar: print %s' % self.win)
# print ('toolbar: print %s' % self.win.app)
kw['win'] = self.win
if image:
kw['image'] = image
if check:
kw['offrelief'] = button_relief
kw['indicatoron'] = False
kw['selectcolor'] = ''
button = MyCheckButton(**kw)
else:
button = MyButton(**kw)
# button.show(orient=self.orient)
setattr(self, name + "_image", image)
setattr(self, name + "_button", button)
# self._widgets.append(button)
self.add_widget(button)
# TBD: tooltip ev. auf basis einer statuszeile implementieren
# if tooltip:
# b = MfxTooltip(button)
# self._tooltips.append(b)
# b.setText(tooltip)
return button
def _busy(self):
# if not self.side or not self.game or not self.menubar:
# return 1
if not self.game or not self.menubar:
return 1
print('_busy:')
self.game.stopDemo()
self.game.interruptSleep()
return self.game.busy
|
gpl-3.0
| 5,466,700,252,095,288,000
| 32.103976
| 78
| 0.506328
| false
| 4.117535
| false
| false
| false
|
treyhunner/django-simple-history
|
docs/conf.py
|
1
|
8369
|
# -*- coding: utf-8 -*-
#
# django-simple-history documentation build configuration file, created by
# sphinx-quickstart on Sun May 5 16:10:02 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pkg_resources import get_distribution
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "django-simple-history"
copyright = "2013, Corey Bertram"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = get_distribution("django-simple-history").version
# for example take major/minor
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "django-simple-historydoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"django-simple-history.tex",
"django-simple-history Documentation",
"Corey Bertram",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"django-simple-history",
"django-simple-history Documentation",
["Corey Bertram"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"django-simple-history",
"django-simple-history Documentation",
"Corey Bertram",
"django-simple-history",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
bsd-3-clause
| -8,592,172,093,522,146,000
| 30.700758
| 80
| 0.690644
| false
| 3.853131
| true
| false
| false
|
ecreall/lagendacommun
|
lac/views/services_processes/moderation_service/see_service.py
|
1
|
2407
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from dace.util import getSite
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.content.processes.services_processes.behaviors import (
SeeModerationService, SeeModerationUnitService)
from lac.content.service import (
ModerationService, ModerationServiceUnit)
from lac.utilities.utils import (
ObjectRemovedException, generate_navbars)
@view_config(
name='seemoderationservice',
context=ModerationService,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceView(BasicView):
title = ''
name = 'seemoderationservice'
behaviors = [SeeModerationService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationservice'
def update(self):
self.execute(None)
result = {}
try:
navbars = generate_navbars(self, self.context, self.request)
except ObjectRemovedException:
return HTTPFound(self.request.resource_url(getSite(), ''))
values = {'object': self.context,
'navbar_body': navbars['navbar_body']}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
item['messages'] = navbars['messages']
item['isactive'] = navbars['isactive']
result.update(navbars['resources'])
result['coordinates'] = {self.coordinates: [item]}
return result
@view_config(
name='seemoderationserviceunit',
context=ModerationServiceUnit,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceUnitView(SeeModerationServiceView):
title = ''
name = 'seemoderationserviceunit'
behaviors = [SeeModerationUnitService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationserviceunit'
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationService: SeeModerationServiceView})
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationUnitService: SeeModerationServiceUnitView})
|
agpl-3.0
| -3,885,177,443,963,789,300
| 33.385714
| 100
| 0.726215
| false
| 3.714506
| false
| false
| false
|
rv816/serrano_night
|
serrano/resources/stats.py
|
1
|
3399
|
from django.core.urlresolvers import reverse
from django.conf.urls import patterns, url
from django.views.decorators.cache import never_cache
from restlib2.params import Parametizer, BoolParam, StrParam
from avocado.models import DataContext, DataField
from avocado.query import pipeline
from .base import BaseResource, ThrottledResource
class StatsResource(BaseResource):
def get(self, request):
uri = request.build_absolute_uri
return {
'title': 'Serrano Stats Endpoint',
'_links': {
'self': {
'href': uri(reverse('serrano:stats:root')),
},
'counts': {
'href': uri(reverse('serrano:stats:counts')),
},
}
}
class CountStatsParametizer(Parametizer):
aware = BoolParam(False)
processor = StrParam('default', choices=pipeline.query_processors)
class CountStatsResource(ThrottledResource):
parametizer = CountStatsParametizer
def get(self, request):
params = self.get_params(request)
if params['aware']:
context = self.get_context(request)
else:
context = DataContext()
# Get all published app/model pairs to produce counts for.
model_names = DataField.objects.published()\
.values_list('app_name', 'model_name')\
.order_by('model_name').distinct()
data = []
models = set()
QueryProcessor = pipeline.query_processors[params['processor']]
for app_name, model_name in model_names:
# DataField used here to resolve foreign key-based fields.
model = DataField(app_name=app_name, model_name=model_name).model
# Foreign-key based fields may resolve to models that are already
# accounted for.
if model in models:
continue
models.add(model)
# Build a queryset through the context which is toggled by
# the parameter.
processor = QueryProcessor(context=context, tree=model)
queryset = processor.get_queryset(request=request)
count = queryset.values('pk').distinct().count()
opts = model._meta
# Format is called to resolve Django's internal proxy wrapper.
verbose_name = opts.verbose_name.format()
verbose_name_plural = opts.verbose_name_plural.format()
# Assume no custom verbose_name as been set in Meta class, so
# apply a minimal title-case.
if verbose_name.islower():
verbose_name = verbose_name.title()
if verbose_name_plural.islower():
verbose_name_plural = verbose_name_plural.title()
data.append({
'count': count,
'app_name': app_name,
'model_name': model_name,
'verbose_name': verbose_name,
'verbose_name_plural': verbose_name_plural,
})
return data
# Same logic, but supports submitting context via a POST.
post = get
stats_resource = never_cache(StatsResource())
counts_resource = never_cache(CountStatsResource())
# Resource endpoints
urlpatterns = patterns(
'',
url(r'^$', stats_resource, name='root'),
url(r'^counts/$', counts_resource, name='counts'),
)
|
bsd-2-clause
| 6,553,799,984,254,183,000
| 30.472222
| 77
| 0.596646
| false
| 4.391473
| false
| false
| false
|
radjkarl/appBase
|
appbase/Launcher.py
|
1
|
22087
|
# coding=utf-8
from __future__ import print_function
from builtins import str
# -*- coding: utf-8 -*-
###############
# The launcher class is not updated any more
# I might remove it
###############
# own
import appbase
from fancytools.os.PathStr import PathStr
from fancywidgets.pyQtBased.Dialogs import Dialogs
# foreign
from qtpy import QtGui, QtWidgets, QtCore, QtSvg
# built-in
import os
from zipfile import ZipFile
import distutils
from distutils import spawn
import subprocess
import sys
import tempfile
CONFIG_FILE = PathStr.home().join(__name__)
class Launcher(QtWidgets.QMainWindow):
"""
A graphical starter for *.pyz files created by the save-method from
appbase.MainWindow
NEEDS AN OVERHAUL ... after that's done it will be able to:
* show all *.pyz-files in a filetree
* show the session specific ...
* icon
* description
* author etc.
* start, remove, rename, modify a session
* modify, start a certain state of a session
"""
def __init__(self,
title='PYZ-Launcher',
icon=None,
start_script=None,
left_header=None,
right_header=None,
file_type='pyz'
):
self.dialogs = Dialogs()
_path = PathStr.getcwd()
_default_text_color = '#3c3c3c'
if icon is None:
icon = os.path.join(_path, 'media', 'launcher_logo.svg')
if start_script is None:
start_script = os.path.join(_path, 'test_session.py')
if left_header is None:
_description = "<a href=%s style='color: %s'>%s</a>" % (
appbase.__url__, _default_text_color, appbase.__doc__)
left_header = """<b>%s</b><br>
version
<a href=%s style='color: %s'>%s</a><br>
autor
<a href=mailto:%s style='color: %s'>%s</a> """ % ( # text-decoration:underline
_description,
os.path.join(_path, 'media', 'recent_changes.txt'),
_default_text_color,
appbase.__version__,
appbase.__email__,
_default_text_color,
appbase.__author__
)
if right_header is None:
# if no header is given, list all pdfs in folder media as link
d = _path
right_header = ''
for f in os.listdir(os.path.join(d, 'media')):
if f.endswith('.pdf'):
_guidePath = os.path.join(d, 'media', f)
right_header += "<a href=%s style='color: %s'>%s</a><br>" % (
_guidePath, _default_text_color, f[:-4])
right_header = right_header[:-4]
QtWidgets.QMainWindow.__init__(self)
self._start_script = start_script
self.setWindowTitle(title)
self.setWindowIcon(QtGui.QIcon(icon))
self.resize(900, 500)
# BASE STRUTURE
area = QtWidgets.QWidget()
self.setCentralWidget(area)
layout = QtWidgets.QVBoxLayout()
area.setLayout(layout)
#header = QtWidgets.QHBoxLayout()
# layout.addLayout(header)
# grab the default text color of a qlabel to color all links from blue to it:
# LEFT TEXT
info = QtWidgets.QLabel(left_header)
info.setOpenExternalLinks(True)
# LOGO
header = QtWidgets.QWidget()
header.setFixedHeight(70)
headerlayout = QtWidgets.QHBoxLayout()
header.setLayout(headerlayout)
logo = QtSvg.QSvgWidget(icon)
logo.setFixedWidth(50)
logo.setFixedHeight(50)
headerlayout.addWidget(logo)
headerlayout.addWidget(info)
layout.addWidget(header)
# RIGHT_HEADER
userGuide = QtWidgets.QLabel(right_header)
userGuide.setOpenExternalLinks(True)
userGuide.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignRight)
headerlayout.addWidget(userGuide)
# ROOT-PATH OF THE SESSIONS
rootLayout = QtWidgets.QHBoxLayout()
rootFrame = QtWidgets.QFrame()
rootFrame.setFrameStyle(
QtWidgets.QFrame.StyledPanel | QtWidgets.QFrame.Plain)
rootFrame.setFixedHeight(45)
rootFrame.setLineWidth(0)
rootFrame.setLayout(rootLayout)
layout.addWidget(rootFrame)
self.rootDir = QtWidgets.QLabel()
self.rootDir.setAutoFillBackground(True)
self.rootDir.setStyleSheet("QLabel { background-color: white; }")
# FILE-BROWSER
self.treeView = _TreeView()
self.fileSystemModel = _FileSystemModel(self.treeView, file_type)
self.fileSystemModel.setNameFilters(['*.%s' % file_type])
self.fileSystemModel.setNameFilterDisables(False)
self.treeView.setModel(self.fileSystemModel)
treelayout = QtWidgets.QHBoxLayout()
splitter = QtWidgets.QSplitter(QtCore.Qt.Orientation(1))
self.fileInfo = _PyzInfo(splitter, self.fileSystemModel, self.treeView)
self.treeView.clicked.connect(self.fileInfo.update)
splitter.addWidget(self.treeView)
splitter.addWidget(self.fileInfo)
treelayout.addWidget(splitter)
layout.addLayout(treelayout)
# get last root-path
self._path = PathStr('')
if CONFIG_FILE:
try:
self._path = PathStr(
open(
CONFIG_FILE,
'r').read().decode('unicode-escape'))
except IOError:
pass # file not existant
if not self._path or not self._path.exists():
msgBox = QtWidgets.QMessageBox()
msgBox.setText("Please choose your projectDirectory.")
msgBox.exec_()
self._changeRootDir()
self.treeView.setPath(self._path)
abspath = os.path.abspath(self._path)
self.rootDir.setText(abspath)
rootLayout.addWidget(self.rootDir)
# GO UPWARDS ROOT-PATH BUTTON
btnUpRootDir = QtWidgets.QPushButton('up')
btnUpRootDir.clicked.connect(self._goUpRootDir)
rootLayout.addWidget(btnUpRootDir)
# DEFINE CURRENT DIR AS ROOT-PATH
btnDefineRootDir = QtWidgets.QPushButton('set')
btnDefineRootDir.clicked.connect(self._defineRootDir)
rootLayout.addWidget(btnDefineRootDir)
# SELECT ROOT-PATH BUTTON
buttonRootDir = QtWidgets.QPushButton('select')
buttonRootDir.clicked.connect(self._changeRootDir)
rootLayout.addWidget(buttonRootDir)
# NEW-BUTTON
if self._start_script:
newButton = QtWidgets.QPushButton('NEW')
newButton.clicked.connect(self._openNew)
layout.addWidget(newButton)
@staticmethod
def rootDir():
try:
return PathStr(
open(CONFIG_FILE, 'r').read().decode('unicode-escape'))
except IOError: # create starter
return PathStr.home()
def _goUpRootDir(self):
self._setRootDir(self._path.dirname())
def _defineRootDir(self):
i = self.treeView.selectedIndexes()
# if not self.treeView.isIndexHidden(i):
if i:
if self.fileSystemModel.isDir(i[0]):
self._setRootDir(PathStr(self.fileSystemModel.filePath(i[0])))
def _changeRootDir(self):
path = self.dialogs.getExistingDirectory()
if path:
self._setRootDir(path)
def _setRootDir(self, path):
self._path = path
self.rootDir.setText(self._path)
root = self.fileSystemModel.setRootPath(self._path)
self.treeView.setRootIndex(root)
# save last path to file
if CONFIG_FILE:
open(CONFIG_FILE, 'w').write(self._path.encode('unicode-escape'))
def _openNew(self):
p = spawn.find_executable("python")
os.spawnl(os.P_NOWAIT, p, 'python', '%s' % self._start_script)
class _FileEditMenu(QtWidgets.QWidget):
def __init__(self, treeView):
QtWidgets.QWidget.__init__(self)
self._treeView = treeView
self._menu = QtWidgets.QMenu(self)
d = PathStr.getcwd()
iconpath = os.path.join(d, 'media', 'icons', 'approve.svg')
self._actionStart = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Start', self._treeView,
triggered=self._treeView.openProject)
iconpath = os.path.join(d, 'media', 'icons', 'delete.svg')
delete = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Delete', self._treeView,
triggered=self._treeView.deleteSelected)
iconpath = os.path.join(d, 'media', 'icons', 'rename.svg')
rename = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Rename', self._treeView,
triggered=self._treeView.editSelected)
iconpath = os.path.join(d, 'media', 'icons', 'new.svg')
newDir = QtWidgets.QAction(QtGui.QIcon(iconpath),
'New Directory', self._treeView,
triggered=self._treeView.newDirInSelected)
iconpath = os.path.join(d, 'media', 'icons', 'findReplace.svg')
self._editStartScript = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Edit start script', self._treeView,
triggered=self._treeView.editStartScriptInSelected)
iconpath = os.path.join(d, 'media', 'icons', 'bug.png')
self._actionInDebugMode = QtWidgets.QAction(QtGui.QIcon(iconpath),
'Run in debug mode', self._treeView,
triggered=self._treeView.runInDebugMode)
self._menu.addAction(self._actionStart)
self._menu.addAction(rename)
self._menu.addAction(newDir)
self._menu.addAction(self._editStartScript)
self._menu.addAction(delete)
self._menu.addAction(self._actionInDebugMode)
# TODO: does not match signature
def show(self, evt):
isDir = self._treeView.selectedIsDir(evt.pos())
self._actionStart.setVisible(not isDir)
self._editStartScript.setVisible(not isDir)
self._actionInDebugMode.setVisible(not isDir)
self._menu.popup(evt.globalPos())
class _TreeView(QtWidgets.QTreeView):
def __init__(self):
super(_TreeView, self).__init__()
self.setHeaderHidden(False)
# connect own function for doubleclick
self.setExpandsOnDoubleClick(False)
self._menu = _FileEditMenu(self)
# no editing of the items when clicked, rightclicked, doubleclicked:
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.sortByColumn(0, QtCore.Qt.AscendingOrder) # sort by name
self.setSortingEnabled(True)
self.setAnimated(True) # expanding/collapsing animated
self.setIconSize(QtCore.QSize(60, 60))
# DRAG/DROP
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.doubleClicked.connect(self._doubleClicked)
def keyPressEvent(self, event):
if event.matches(QtGui.QKeySequence.Delete):
self.deleteSelected()
def selectionChanged(self, selected, deselected):
for index in deselected.indexes():
# print index
self.closePersistentEditor(index)
super(_TreeView, self).selectionChanged(selected, deselected)
def mousePressEvent(self, event):
mouseBtn = event.button()
if mouseBtn == QtCore.Qt.RightButton:
self._menu.show(event)
super(_TreeView, self).mousePressEvent(event)
def deleteSelected(self):
msgBox = QtWidgets.QMessageBox()
msgBox.setText("Are you sure?")
msgBox.addButton('Yes', QtWidgets.QMessageBox.YesRole)
msgBox.addButton('No', QtWidgets.QMessageBox.RejectRole)
ret = msgBox.exec_()
if ret == 0: # yes
self.fileSystemModel.remove(self.currentIndex())
def selectedIsDir(self, pos):
index = self.indexAt(pos)
return self.fileSystemModel.isDir(index)
def editSelected(self):
self.openPersistentEditor(self.currentIndex())
def newDirInSelected(self):
index = self.currentIndex()
if not self.fileSystemModel.isDir(index):
index = index.parent()
else:
self.setExpanded(index, True)
self.fileSystemModel.mkdir(index, 'NEW')
def editStartScriptInSelected(self):
index = self.currentIndex()
self.fileSystemModel.editStartScript(index)
def dropEvent(self, e):
index = self.indexAt(e.pos())
# only insert into directories
if self.fileSystemModel.isDir(index):
super(_TreeView, self).dropEvent(e)
def setModel(self, model):
self.fileSystemModel = model
super(_TreeView, self).setModel(model)
self.setColumnWidth(0, 300)
self.hideColumn(1) # type
self.hideColumn(2) # size
def setPath(self, path):
self._path = path
root = self.fileSystemModel.setRootPath(self._path)
self.setRootIndex(root)
def _doubleClicked(self, index):
# if folder->toggle expanding
if self.fileSystemModel.isDir(index):
self.setExpanded(index, not self.isExpanded(index))
else:
self.openProject(index)
def runInDebugMode(self):
index = self.currentIndex()
#term = os.environ.get('TERM')
self.fileSystemModel.updateStartStript(index)
if os.name == 'posix': # linux
term = 'xterm'
else:
sys.exit('debug mode not supported on windows yet')
subprocess.call([term, '-e',
'python %s -d' % self.fileSystemModel.filePath(index)])
def openProject(self, index=None):
if not index:
index = self.currentIndex()
self.fileSystemModel.updateStartStript(index)
p = distutils.spawn.find_executable("python")
# start an indepentent python-process
os.spawnl(
os.P_NOWAIT, p, 'python', '%s' %
self.fileSystemModel.filePath(index))
class _PyzInfo(QtWidgets.QWidget):
def __init__(self, vsplitter, filesystemmodel, treeView):
QtWidgets.QWidget.__init__(self)
self.layout = QtWidgets.QVBoxLayout()
self._filesystemmodel = filesystemmodel
self._treeView = treeView
self.vsplitter = vsplitter
self.hsplitter = QtWidgets.QSplitter(QtCore.Qt.Orientation(0))
self.vsplitter.splitterMoved.connect(self.scaleImgV)
self.hsplitter.splitterMoved.connect(self.scaleImgH)
self.layout.addWidget(self.hsplitter)
self._sizeDefined = False
self.setLayout(self.layout)
self.img = QtWidgets.QLabel()
self.text = QtWidgets.QTextEdit()
self.text.setReadOnly(True)
self.hsplitter.addWidget(self.img)
self.hsplitter.addWidget(self.text)
btnStart = QtWidgets.QPushButton('start')
self._btnDebug = QtWidgets.QCheckBox('debug mode')
#labelOpen = QtWidgets.QLabel('open/edit')
openBox = QtWidgets.QGroupBox('open/edit')
openBox.setAlignment(QtCore.Qt.AlignHCenter)
btnCode = QtWidgets.QPushButton('startscript')
btnActivities = QtWidgets.QPushButton('activities')
btnLogs = QtWidgets.QPushButton('logs')
btnStart.clicked.connect(self._startPYZ)
btnCode.clicked.connect(self._treeView.editStartScriptInSelected)
lBtn = QtWidgets.QHBoxLayout()
lStart = QtWidgets.QVBoxLayout()
lOpen = QtWidgets.QHBoxLayout()
# lOpen.addWidget(openBox)
openBox.setLayout(lOpen)
lBtn.addLayout(lStart)
lBtn.addWidget(openBox)
lStart.addWidget(btnStart)
lStart.addWidget(self._btnDebug)
#lOpen.addWidget(labelOpen, alignment=QtCore.Qt.AlignCenter)
# lOpenBtn = QtWidgets.QHBoxLayout()
# lOpen.addLayout(lOpenBtn)
lOpen.addWidget(btnCode)
lOpen.addWidget(btnActivities)
lOpen.addWidget(btnLogs)
self.layout.addLayout(lBtn)
self.hide()
def _startPYZ(self):
if self._btnDebug.isChecked():
self._treeView.runInDebugMode()
else:
self._treeView.openProject()
def scaleImgV(self, sizeTreeView, pos):
width = self.vsplitter.sizes()[1] - 30
self.img.setPixmap(QtGui.QPixmap(self.imgpath).scaledToWidth(width))
def scaleImgH(self, sizeTreeView, pos):
height = self.hsplitter.sizes()[0] - 30
self.img.setPixmap(QtGui.QPixmap(self.imgpath).scaledToHeight(height))
def update(self, index):
if self._filesystemmodel.isPyz(index):
(self.imgpath, description_path) = self._filesystemmodel.extractFiles(
index, 'screenshot.png', 'description.html')
# if not self.imgpath:
# self.imgpath = self.filesystemmodel.extractFiles(index,'icon')[0]
# print self.imgpath
if self.imgpath:
if not self._sizeDefined:
self._sizeDefined = True
width = 400
# self.splitter.sizes()[0]*0.5,1)
self.vsplitter.moveSplitter(400, 1)
self.img.setPixmap(
QtGui.QPixmap(
self.imgpath).scaledToWidth(width))
self.img.show()
else:
self.img.hide()
if description_path:
self.text.setText(file(description_path).read())
else:
self.text.setText('<b>No Description found</b>')
self.show()
else:
self.hide()
class _FileSystemModel(QtWidgets.QFileSystemModel):
def __init__(self, view, file_type):
QtWidgets.QFileSystemModel.__init__(self, view)
self.view = view
self.file_type = file_type
self.setReadOnly(False)
self._editedSessions = {}
self._tmp_dir_work = tempfile.mkdtemp('PYZ-launcher')
def isPyz(self, index):
return str(self.fileName(index)).endswith('.%s' % self.file_type)
def extractFiles(self, index, *fnames):
extnames = []
with ZipFile(str(self.filePath(index)), 'r') as myzip:
for name in fnames:
try:
myzip.extract(name, self._tmp_dir_work)
extnames.append(os.path.join(self._tmp_dir_work, name))
except KeyError:
extnames.append(None)
return extnames
# TODO: does not match signature
def data(self, index, role):
"""use zipped icon.png as icon"""
if index.column() == 0 and role == QtCore.Qt.DecorationRole:
if self.isPyz(index):
with ZipFile(str(self.filePath(index)), 'r') as myzip:
# print myzip.namelist()
try:
myzip.extract('icon', self._tmp_dir_work)
p = os.path.join(self._tmp_dir_work, 'icon')
return QtGui.QIcon(p)
except KeyError:
pass
return super(_FileSystemModel, self).data(index, role)
def editStartScript(self, index):
"""open, edit, replace __main__.py"""
f = str(self.fileName(index))
if f.endswith('.%s' % self.file_type):
zipname = str(self.filePath(index))
with ZipFile(zipname, 'a') as myzip:
# extract+save script in tmp-dir:
myzip.extract('__main__.py', self._tmp_dir_work)
tempfilename = f[:-4]
tempfilepath = os.path.join(self._tmp_dir_work, tempfilename)
os.rename(
os.path.join(
self._tmp_dir_work,
'__main__.py'),
tempfilepath)
self.openTxt(tempfilepath)
self._editedSessions[index] = (
zipname, self._tmp_dir_work, tempfilename)
def openTxt(self, path):
# open and editor (depending on platform):
if sys.platform.startswith('darwin'):
subprocess.call(('open', path))
elif os.name == 'nt':
os.startfile(path)
elif os.name == 'posix':
subprocess.call(('xdg-open', path))
def updateStartStript(self, index):
if index in self._editedSessions:
zipname, dirname, tempfilename = self._editedSessions[index]
tempfilepath = os.path.join(dirname, tempfilename)
# print dirname, tempfilename
if os.path.exists(tempfilepath):
print("adopt changed startScript '%s'" % tempfilename)
with ZipFile(zipname, 'a') as myzip:
myzip.write(tempfilepath, '__main__.py')
os.remove(tempfilepath)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
a = Launcher()
a.show()
sys.exit(app.exec_())
|
gpl-3.0
| -3,223,180,019,371,425,300
| 35.950172
| 101
| 0.568841
| false
| 4.123786
| true
| false
| false
|
CospanDesign/nysa
|
nysa/tools/nysa_cli.py
|
1
|
7013
|
#! /usr/bin/python
#Distributed under the MIT licesnse.
#Copyright (c) 2014 Dave McCoy (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import argparse
import collections
from nysa.common import status
import image_builder
import nysa_utils
import device_list
import generate_slave
import list_boards
import reset_board
import ping_board
import board_programmed
import program_board
import upload_board
import list_platforms
import sdb_viewer
import init
import install_platform
import install_verilog_modules
import install_examples
import nysa_status
import nysa_paths
from completer_extractor import completer_extractor as ce
__author__ = "dave.mccoy@cospandesign.com (Dave McCoy)"
SCRIPT_NAME = os.path.basename(__file__)
DESCRIPTION = "Nysa Tool"
COMPLETER_EXTRACTOR = False
TEMP_BASH_COMPLETER_FILEPATH = "nsya"
EPILOG = "Enter the toolname with a -h to find help about that specific tool\n"
TYPE_DICT = collections.OrderedDict()
TYPE_DICT["cbuilder"] = "Functions to help create code to go into platforms"
TYPE_DICT["ibuilder"] = "Functions to generate an entire image (or binary) to be downloaded into a platform"
TYPE_DICT["host"] = "Functions to view and control boards"
TYPE_DICT["utility"] = "Functions to update and/or upgrade the nysa tool including adding new platforms and verilog packages"
TOOL_DICT = collections.OrderedDict([
(generate_slave.NAME,{
"type": "cbuilder",
"module": generate_slave,
"tool": generate_slave.generate_slave
}),
(device_list.NAME,{
"type": "cbuilder",
"module": device_list,
"tool": device_list.device_list
}),
(image_builder.NAME,{
"type": "ibuilder",
"module": image_builder,
"tool": image_builder.image_builder
}),
(reset_board.NAME,{
"type": "host",
"module": reset_board,
"tool": reset_board.reset_board
}),
(ping_board.NAME,{
"type": "host",
"module": ping_board,
"tool": ping_board.ping_board
}),
(board_programmed.NAME,{
"type": "host",
"module": board_programmed,
"tool": board_programmed.board_programmed
}),
(program_board.NAME,{
"type": "host",
"module": program_board,
"tool": program_board.program_board
}),
(upload_board.NAME,{
"type": "host",
"module": upload_board,
"tool": upload_board.upload_board
}),
(sdb_viewer.NAME,{
"type": "host",
"module": sdb_viewer,
"tool": sdb_viewer.view_sdb
}),
(init.NAME,{
"type": "utility",
"module": init,
"tool": init.init
}),
(nysa_utils.NAME,{
"type": "utility",
"module": nysa_utils,
"tool": nysa_utils.nysa_utils
}),
(list_boards.NAME,{
"type": "utility",
"module": list_boards,
"tool": list_boards.list_boards
}),
(list_platforms.NAME,{
"type": "utility",
"module": list_platforms,
"tool": list_platforms.list_platforms
}),
(install_platform.NAME,{
"type": "utility",
"module": install_platform,
"tool": install_platform.install
}),
(install_verilog_modules.NAME,{
"type": "utility",
"module": install_verilog_modules,
"tool": install_verilog_modules.install
}),
(install_examples.NAME,{
"type": "utility",
"module": install_examples,
"tool": install_examples.install
}),
(nysa_status.NAME,{
"type": "utility",
"module": nysa_status,
"tool": nysa_status.nysa_status
}),
(nysa_paths.NAME,{
"type": "utility",
"module": nysa_paths,
"tool": nysa_paths.nysa_paths
})
])
def update_epilog():
global EPILOG
tool_type_dict = collections.OrderedDict()
for type_d in TYPE_DICT:
tool_type_dict[type_d] = {}
tool_type_dict[type_d]["description"] = TYPE_DICT[type_d]
tool_type_dict[type_d]["tools"] = []
for tool in TOOL_DICT:
tool_type_dict[TOOL_DICT[tool]["type"]]["tools"].append(tool)
EPILOG += "\n"
EPILOG += "Tools:\n\n"
for tool_type in tool_type_dict:
EPILOG += "{0:25}{1}\n\n".format(tool_type, tool_type_dict[tool_type]["description"])
for tool in tool_type_dict[tool_type]["tools"]:
EPILOG += "{0:5}{1:20}{2}\n".format("", tool, TOOL_DICT[tool]["module"].DESCRIPTION)
EPILOG += "\n"
EPILOG += "\n"
def main():
update_epilog()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
#Setup the status message
s = status.Status()
s.set_level(status.StatusLevel.INFO)
#Global Flags
parser.add_argument("-v", "--verbose", action='store_true', help="Output verbose information")
parser.add_argument("-d", "--debug", action='store_true', help="Output test debug information")
subparsers = parser.add_subparsers( title = "Tools",
description = "Nysa Tools",
metavar = None,
dest = "tool")
for tool in TOOL_DICT:
p = subparsers.add_parser(tool,
description=TOOL_DICT[tool]["module"].DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
TOOL_DICT[tool]["module"].setup_parser(p)
TOOL_DICT[tool]["parser"] = p
#Parse the arguments
if COMPLETER_EXTRACTOR:
ce(parser, TEMP_BASH_COMPLETER_FILEPATH)
return
args = parser.parse_args()
if args.debug:
s.set_level(status.StatusLevel.DEBUG)
if args.verbose:
s.set_level(status.StatusLevel.VERBOSE)
#print "args: %s" % str(args)
#print "args dict: %s" % str(dir(args))
TOOL_DICT[args.tool]["tool"](args, s)
|
mit
| 2,666,145,066,565,960,000
| 28.970085
| 125
| 0.62541
| false
| 3.631797
| false
| false
| false
|
smyrman/django-jquery-widgets
|
jquery_widgets/admin.py
|
1
|
3264
|
# -*- coding: utf-8 -*-
# Based on code from: Jannis Leidal, 2008 (http://jannisleidel.com/),
# Copyright (C) 2010: Sindre Røkenes Myren,
# This file is part of KiKrit wich is distrebuted under GPLv3. See the file
# COPYING.txt for more details.
import operator
from django.db import models
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib import admin
from django.contrib.admin.options import BaseModelAdmin
from django.utils.encoding import smart_str
from jquery_widgets.widgets import *
__all__ = ('JQWAdminMixin', 'ExtendedModelAdmin')
class JQWAdminMixin(object):
"""Enables you to configure jQury UI widgets in the admin.
jqw_autocomplete_fields
=======================
For fields of type 'ForeignKey' and 'ManyToMany', you can configure the
'jqw_autocomplete_fields' with entries of type::
'<field_name>' : ('<lookup_field1>', '<lookup_field2>'),
or::
'<field_name>' : JQWAdminMixin.LOOKUP_CHOICES,
For any other field type where you have configured 'choices', you may add
entires of the latest type only.
Example
-------
::
jqw_autocomplete_fields = {
'user': ('username', 'email'),
'group': JQWAdminMixin.LOOKUP_CHOICES,
}
jqw_radio_fields
================
WARNING: Currently works kind of crap in good in the admin!
Any field with a choices attribut can be listed as in 'jqw_radio_fields'
with entires of type::
'<field_name>': <allignment>,
Note that this syntax is identical to the the existing ModelAdmin's
'radio_fields'. Also note that currently, the <allignment> parameter is
ignored.
Example
-------
::
jqw_radio_fields = {
'gender': JQWAdminMixin.HORIZONTAL
}
"""
LOOKUP_CHOICES = 1
HORIZONTAL = admin.HORIZONTAL
VERTICAL = admin.VERTICAL
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.jqw_autocomplete_fields:
lookup = self.jqw_autocomplete_fields[db_field.name]
if lookup == self.LOOKUP_CHOICES:
kwargs['widget'] = JQWAutocompleteSelect(
choices=db_field.get_choices(include_blank=False),
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
use_admin_icons=True,
)
elif isinstance(db_field, models.ForeignKey):
kwargs['widget'] = JQWAutocompleteFKSelect(
rel=db_field.rel,
lookup_fields=self.jqw_autocomplete_fields[db_field.name],
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
use_admin_icons=True,
)
elif isinstance(db_field, models.ManyToManyField):
# FIXME
pass
elif db_field.name in self.jqw_radio_fields:
align = self.jqw_radio_fields[db_field.name]
kwargs['widget'] = JQWRadioSelect(
theme='ui-admin',
#theme=settings.JQUERY_UI_THEME['admin'],
)
return BaseModelAdmin.formfield_for_dbfield(self, db_field, **kwargs)
#### Classes kept for bacward compabillity only ###
class ExtendedModelAdmin(JQWAdminMixin, admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
# 'related_search_fields' has been deprecated in favour of
# 'jqw_autocomplete_fields'.
if hasattr(self, "related_search_fields"):
self.jqw_autocomplete_fields = self.related_search_fields
return super(ExtendedModelAdmin, self).formfield_for_dbfield(db_field,
**kwargs)
|
mit
| -4,657,023,323,136,276,000
| 28.133929
| 75
| 0.703953
| false
| 3.174125
| false
| false
| false
|
openstack/os-win
|
os_win/utils/network/networkutils.py
|
1
|
42102
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for network related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import functools
import re
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import units
import six
from os_win._i18n import _
from os_win import conf
from os_win import constants
from os_win import exceptions
from os_win.utils import _wqlutils
from os_win.utils import baseutils
from os_win.utils import jobutils
CONF = conf.CONF
LOG = logging.getLogger(__name__)
_PORT_PROFILE_ATTR_MAP = {
"profile_id": "ProfileId",
"profile_data": "ProfileData",
"profile_name": "ProfileName",
"net_cfg_instance_id": "NetCfgInstanceId",
"cdn_label_id": "CdnLabelId",
"cdn_label_string": "CdnLabelString",
"vendor_id": "VendorId",
"vendor_name": "VendorName",
}
class NetworkUtils(baseutils.BaseUtilsVirt):
EVENT_TYPE_CREATE = "__InstanceCreationEvent"
EVENT_TYPE_DELETE = "__InstanceDeletionEvent"
_VNIC_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData'
_EXTERNAL_PORT = 'Msvm_ExternalEthernetPort'
_ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort'
_PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData'
_PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData'
_PORT_PROFILE_SET_DATA = 'Msvm_EthernetSwitchPortProfileSettingData'
_PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData'
_PORT_HW_OFFLOAD_SET_DATA = 'Msvm_EthernetSwitchPortOffloadSettingData'
_PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_PORT_BANDWIDTH_SET_DATA = 'Msvm_EthernetSwitchPortBandwidthSettingData'
_PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA
_LAN_ENDPOINT = 'Msvm_LANEndpoint'
_STATE_DISABLED = 3
_VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_VM_SUMMARY_ENABLED_STATE = 100
_HYPERV_VM_STATE_ENABLED = 2
_OFFLOAD_ENABLED = 100
_OFFLOAD_DISABLED = 0
_ACL_DIR_IN = 1
_ACL_DIR_OUT = 2
_ACL_TYPE_IPV4 = 2
_ACL_TYPE_IPV6 = 3
_ACL_ACTION_ALLOW = 1
_ACL_ACTION_DENY = 2
_ACL_ACTION_METER = 3
_ACL_APPLICABILITY_LOCAL = 1
_ACL_APPLICABILITY_REMOTE = 2
_ACL_DEFAULT = 'ANY'
_IPV4_ANY = '0.0.0.0/0'
_IPV6_ANY = '::/0'
_TCP_PROTOCOL = 'tcp'
_UDP_PROTOCOL = 'udp'
_ICMP_PROTOCOL = '1'
_ICMPV6_PROTOCOL = '58'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types = 4 ACLs
_REJECT_ACLS_COUNT = 4
_VNIC_LISTENER_TIMEOUT_MS = 2000
_switches = {}
_switch_ports = {}
_vlan_sds = {}
_profile_sds = {}
_hw_offload_sds = {}
_vsid_sds = {}
_sg_acl_sds = {}
_bandwidth_sds = {}
def __init__(self):
super(NetworkUtils, self).__init__()
self._jobutils = jobutils.JobUtils()
self._enable_cache = CONF.os_win.cache_temporary_wmi_objects
def init_caches(self):
if not self._enable_cache:
LOG.info('WMI caching is disabled.')
return
for vswitch in self._conn.Msvm_VirtualEthernetSwitch():
self._switches[vswitch.ElementName] = vswitch
# map between switch port ID and switch port WMI object.
for port in self._conn.Msvm_EthernetPortAllocationSettingData():
self._switch_ports[port.ElementName] = port
# VLAN and VSID setting data's InstanceID will contain the switch
# port's InstanceID.
switch_port_id_regex = re.compile(
"Microsoft:[0-9A-F-]*\\\\[0-9A-F-]*\\\\[0-9A-F-]",
flags=re.IGNORECASE)
# map between switch port's InstanceID and their Port Profile settings
# data WMI objects.
for profile in self._conn.Msvm_EthernetSwitchPortProfileSettingData():
match = switch_port_id_regex.match(profile.InstanceID)
if match:
self._profile_sds[match.group()] = profile
# map between switch port's InstanceID and their VLAN setting data WMI
# objects.
for vlan_sd in self._conn.Msvm_EthernetSwitchPortVlanSettingData():
match = switch_port_id_regex.match(vlan_sd.InstanceID)
if match:
self._vlan_sds[match.group()] = vlan_sd
# map between switch port's InstanceID and their VSID setting data WMI
# objects.
for vsid_sd in self._conn.Msvm_EthernetSwitchPortSecuritySettingData():
match = switch_port_id_regex.match(vsid_sd.InstanceID)
if match:
self._vsid_sds[match.group()] = vsid_sd
# map between switch port's InstanceID and their bandwidth setting
# data WMI objects.
bandwidths = self._conn.Msvm_EthernetSwitchPortBandwidthSettingData()
for bandwidth_sd in bandwidths:
match = switch_port_id_regex.match(bandwidth_sd.InstanceID)
if match:
self._bandwidth_sds[match.group()] = bandwidth_sd
# map between switch port's InstanceID and their HW offload setting
# data WMI objects.
hw_offloads = self._conn.Msvm_EthernetSwitchPortOffloadSettingData()
for hw_offload_sd in hw_offloads:
match = switch_port_id_regex.match(hw_offload_sd.InstanceID)
if match:
self._hw_offload_sds[match.group()] = hw_offload_sd
def update_cache(self):
if not self._enable_cache:
return
# map between switch port ID and switch port WMI object.
self._switch_ports.clear()
for port in self._conn.Msvm_EthernetPortAllocationSettingData():
self._switch_ports[port.ElementName] = port
def clear_port_sg_acls_cache(self, switch_port_name):
self._sg_acl_sds.pop(switch_port_name, None)
def get_vswitch_id(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
return vswitch.Name
def get_vswitch_extensions(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
extensions = self._conn.Msvm_EthernetSwitchExtension(
SystemName=vswitch.Name)
dict_ext_list = [
{'name': ext.ElementName,
'version': ext.Version,
'vendor': ext.Vendor,
'description': ext.Description,
'enabled_state': ext.EnabledState,
'extension_type': ext.ExtensionType}
for ext in extensions]
return dict_ext_list
def get_vswitch_external_network_name(self, vswitch_name):
ext_port = self._get_vswitch_external_port(vswitch_name)
if ext_port:
return ext_port.ElementName
def _get_vswitch(self, vswitch_name):
if vswitch_name in self._switches:
return self._switches[vswitch_name]
vswitch = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not vswitch:
raise exceptions.HyperVvSwitchNotFound(vswitch_name=vswitch_name)
if self._enable_cache:
self._switches[vswitch_name] = vswitch[0]
return vswitch[0]
def _get_vswitch_external_port(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
ext_ports = self._conn.Msvm_ExternalEthernetPort()
for ext_port in ext_ports:
lan_endpoint_assoc_list = (
self._conn.Msvm_EthernetDeviceSAPImplementation(
Antecedent=ext_port.path_()))
if lan_endpoint_assoc_list:
lan_endpoint_assoc_list = self._conn.Msvm_ActiveConnection(
Dependent=lan_endpoint_assoc_list[0].Dependent.path_())
if lan_endpoint_assoc_list:
lan_endpoint = lan_endpoint_assoc_list[0].Antecedent
if lan_endpoint.SystemName == vswitch.Name:
return ext_port
def vswitch_port_needed(self):
return False
def get_switch_ports(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
vswitch_ports = self._conn.Msvm_EthernetSwitchPort(
SystemName=vswitch.Name)
return set(p.Name for p in vswitch_ports)
def get_port_by_id(self, port_id, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
switch_ports = self._conn.Msvm_EthernetSwitchPort(
SystemName=vswitch.Name)
for switch_port in switch_ports:
if (switch_port.ElementName == port_id):
return switch_port
def vnic_port_exists(self, port_id):
try:
self._get_vnic_settings(port_id)
except Exception:
return False
return True
def get_vnic_ids(self):
return set(
p.ElementName
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
if p.ElementName is not None)
def get_vnic_mac_address(self, switch_port_name):
vnic = self._get_vnic_settings(switch_port_name)
return vnic.Address
def _get_vnic_settings(self, vnic_name):
vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=vnic_name)
if not vnic_settings:
raise exceptions.HyperVvNicNotFound(vnic_name=vnic_name)
return vnic_settings[0]
def get_vnic_event_listener(self, event_type):
query = self._get_event_wql_query(cls=self._VNIC_SET_DATA,
event_type=event_type,
timeframe=2)
listener = self._conn.Msvm_SyntheticEthernetPortSettingData.watch_for(
query)
def _poll_events(callback):
if patcher.is_monkey_patched('thread'):
listen = functools.partial(tpool.execute, listener,
self._VNIC_LISTENER_TIMEOUT_MS)
else:
listen = functools.partial(listener,
self._VNIC_LISTENER_TIMEOUT_MS)
while True:
# Retrieve one by one all the events that occurred in
# the checked interval.
try:
event = listen()
if event.ElementName:
callback(event.ElementName)
else:
LOG.warning("Ignoring port event. "
"The port name is missing.")
except exceptions.x_wmi_timed_out:
# no new event published.
pass
return _poll_events
def _get_event_wql_query(self, cls, event_type, timeframe=2, **where):
"""Return a WQL query used for polling WMI events.
:param cls: the Hyper-V class polled for events.
:param event_type: the type of event expected.
:param timeframe: check for events that occurred in
the specified timeframe.
:param where: key-value arguments which are to be included in the
query. For example: like=dict(foo="bar").
"""
like = where.pop('like', {})
like_str = " AND ".join("TargetInstance.%s LIKE '%s%%'" % (k, v)
for k, v in like.items())
like_str = "AND " + like_str if like_str else ""
query = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' %(like)s" % {
'class': cls,
'event_type': event_type,
'like': like_str,
'timeframe': timeframe})
return query
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
port, found = self._get_switch_port_allocation(
switch_port_name, create=True, expected=False)
if found and port.HostResource and port.HostResource[0]:
# vswitch port already exists and is connected to vswitch.
return
vswitch = self._get_vswitch(vswitch_name)
vnic = self._get_vnic_settings(switch_port_name)
port.HostResource = [vswitch.path_()]
port.Parent = vnic.path_()
if not found:
vm = self._get_vm_from_res_setting_data(vnic)
self._jobutils.add_virt_resource(port, vm)
else:
self._jobutils.modify_virt_resource(port)
def _get_vm_from_res_setting_data(self, res_setting_data):
vmsettings_instance_id = res_setting_data.InstanceID.split('\\')[0]
sd = self._conn.Msvm_VirtualSystemSettingData(
InstanceID=vmsettings_instance_id)
vm = self._conn.Msvm_ComputerSystem(Name=sd[0].ConfigurationID)
return vm[0]
def remove_switch_port(self, switch_port_name, vnic_deleted=False):
"""Removes the switch port."""
sw_port, found = self._get_switch_port_allocation(switch_port_name,
expected=False)
if not sw_port:
# Port not found. It happens when the VM was already deleted.
return
if not vnic_deleted:
try:
self._jobutils.remove_virt_resource(sw_port)
except exceptions.x_wmi:
# port may have already been destroyed by Hyper-V
pass
self._switch_ports.pop(switch_port_name, None)
self._profile_sds.pop(sw_port.InstanceID, None)
self._vlan_sds.pop(sw_port.InstanceID, None)
self._vsid_sds.pop(sw_port.InstanceID, None)
self._bandwidth_sds.pop(sw_port.InstanceID, None)
self._hw_offload_sds.pop(sw_port.InstanceID, None)
def set_vswitch_port_profile_id(self, switch_port_name, profile_id,
profile_data, profile_name, vendor_name,
**kwargs):
"""Sets up the port profile id.
:param switch_port_name: The ElementName of the vSwitch port.
:param profile_id: The profile id to be set for the given switch port.
:param profile_data: Additional data for the Port Profile.
:param profile_name: The name of the Port Profile.
:param net_cfg_instance_id: Unique device identifier of the
sub-interface.
:param cdn_label_id: The CDN Label Id.
:param cdn_label_string: The CDN label string.
:param vendor_id: The id of the Vendor defining the profile.
:param vendor_name: The name of the Vendor defining the profile.
"""
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
port_profile = self._get_profile_setting_data_from_port_alloc(
port_alloc)
new_port_profile = self._prepare_profile_sd(
profile_id=profile_id, profile_data=profile_data,
profile_name=profile_name, vendor_name=vendor_name, **kwargs)
if port_profile:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(port_profile)
# remove from cache.
self._profile_sds.pop(port_alloc.InstanceID, None)
try:
self._jobutils.add_virt_feature(new_port_profile, port_alloc)
except Exception as ex:
raise exceptions.HyperVException(
'Unable to set port profile settings %(port_profile)s '
'for port %(port)s. Error: %(error)s' %
dict(port_profile=new_port_profile, port=port_alloc, error=ex))
def set_vswitch_port_vlan_id(self, vlan_id=None, switch_port_name=None,
**kwargs):
"""Sets up operation mode, VLAN ID and VLAN trunk for the given port.
:param vlan_id: the VLAN ID to be set for the given switch port.
:param switch_port_name: the ElementName of the vSwitch port.
:param operation_mode: the VLAN operation mode. The acceptable values
are:
os_win.constants.VLAN_MODE_ACCESS, os_win.constants.VLAN_TRUNK_MODE
If not given, VLAN_MODE_ACCESS is used by default.
:param trunk_vlans: an array of VLAN IDs to be set in trunk mode.
:raises AttributeError: if an unsupported operation_mode is given, or
the given operation mode is VLAN_MODE_ACCESS and the given
trunk_vlans is not None.
"""
operation_mode = kwargs.get('operation_mode',
constants.VLAN_MODE_ACCESS)
trunk_vlans = kwargs.get('trunk_vlans')
if operation_mode not in [constants.VLAN_MODE_ACCESS,
constants.VLAN_MODE_TRUNK]:
msg = _('Unsupported VLAN operation mode: %s')
raise AttributeError(msg % operation_mode)
if (operation_mode == constants.VLAN_MODE_ACCESS and
trunk_vlans is not None):
raise AttributeError(_('The given operation mode is ACCESS, '
'cannot set given trunk_vlans.'))
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if operation_mode == constants.VLAN_MODE_ACCESS:
new_vlan_settings = self._prepare_vlan_sd_access_mode(
vlan_settings, vlan_id)
else:
new_vlan_settings = self._prepare_vlan_sd_trunk_mode(
vlan_settings, vlan_id, trunk_vlans)
if not new_vlan_settings:
# if no object was returned, it means that the VLAN Setting Data
# was already added with the desired attributes.
return
if vlan_settings:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(vlan_settings)
# remove from cache.
self._vlan_sds.pop(port_alloc.InstanceID, None)
self._jobutils.add_virt_feature(new_vlan_settings, port_alloc)
# TODO(claudiub): This will help solve the missing VLAN issue, but it
# comes with a performance cost. The root cause of the problem must
# be solved.
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if not vlan_settings:
raise exceptions.HyperVException(
_('Port VLAN not found: %s') % switch_port_name)
def _prepare_profile_sd(self, **kwargs):
profile_id_settings = self._create_default_setting_data(
self._PORT_PROFILE_SET_DATA)
for argument_name, attr_name in _PORT_PROFILE_ATTR_MAP.items():
attribute = kwargs.pop(argument_name, None)
if attribute is None:
continue
setattr(profile_id_settings, attr_name, attribute)
if kwargs:
raise TypeError("Unrecognized attributes %r" % kwargs)
return profile_id_settings
def _prepare_vlan_sd_access_mode(self, vlan_settings, vlan_id):
if vlan_settings:
# the given vlan_id might be None.
vlan_id = vlan_id or vlan_settings.AccessVlanId
if (vlan_settings.OperationMode == constants.VLAN_MODE_ACCESS and
vlan_settings.AccessVlanId == vlan_id):
# VLAN already set to correct value, no need to change it.
return None
vlan_settings = self._create_default_setting_data(
self._PORT_VLAN_SET_DATA)
vlan_settings.AccessVlanId = vlan_id
vlan_settings.OperationMode = constants.VLAN_MODE_ACCESS
return vlan_settings
def _prepare_vlan_sd_trunk_mode(self, vlan_settings, vlan_id, trunk_vlans):
if vlan_settings:
# the given vlan_id might be None.
vlan_id = vlan_id or vlan_settings.NativeVlanId
trunk_vlans = trunk_vlans or vlan_settings.TrunkVlanIdArray or []
trunk_vlans = sorted(trunk_vlans)
if (vlan_settings.OperationMode == constants.VLAN_MODE_TRUNK and
vlan_settings.NativeVlanId == vlan_id and
sorted(vlan_settings.TrunkVlanIdArray) == trunk_vlans):
# VLAN already set to correct value, no need to change it.
return None
vlan_settings = self._create_default_setting_data(
self._PORT_VLAN_SET_DATA)
vlan_settings.NativeVlanId = vlan_id
vlan_settings.TrunkVlanIdArray = trunk_vlans
vlan_settings.OperationMode = constants.VLAN_MODE_TRUNK
return vlan_settings
def set_vswitch_port_vsid(self, vsid, switch_port_name):
self._set_switch_port_security_settings(switch_port_name,
VirtualSubnetId=vsid)
def set_vswitch_port_mac_spoofing(self, switch_port_name, state):
"""Sets the given port's MAC spoofing to the given state.
:param switch_port_name: the name of the port which will have MAC
spoofing set to the given state.
:param state: boolean, if MAC spoofing should be turned on or off.
"""
self._set_switch_port_security_settings(switch_port_name,
AllowMacSpoofing=state)
def _set_switch_port_security_settings(self, switch_port_name, **kwargs):
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
sec_settings = self._get_security_setting_data_from_port_alloc(
port_alloc)
exists = sec_settings is not None
if exists:
if all(getattr(sec_settings, k) == v for k, v in kwargs.items()):
# All desired properties already properly set. Nothing to do.
return
else:
sec_settings = self._create_default_setting_data(
self._PORT_SECURITY_SET_DATA)
for k, v in kwargs.items():
setattr(sec_settings, k, v)
if exists:
self._jobutils.modify_virt_feature(sec_settings)
else:
self._jobutils.add_virt_feature(sec_settings, port_alloc)
# TODO(claudiub): This will help solve the missing VSID issue, but it
# comes with a performance cost. The root cause of the problem must
# be solved.
sec_settings = self._get_security_setting_data_from_port_alloc(
port_alloc)
if not sec_settings:
raise exceptions.HyperVException(
_('Port Security Settings not found: %s') % switch_port_name)
def set_vswitch_port_sriov(self, switch_port_name, enabled):
"""Enables / Disables SR-IOV for the given port.
:param switch_port_name: the name of the port which will have SR-IOV
enabled or disabled.
:param enabled: boolean, if SR-IOV should be turned on or off.
"""
# TODO(claudiub): We have added a different method that sets all sorts
# of offloading options on a vswitch port, including SR-IOV.
# Remove this method in S.
self.set_vswitch_port_offload(switch_port_name, sriov_enabled=enabled)
def set_vswitch_port_offload(self, switch_port_name, sriov_enabled=None,
iov_queues_requested=None, vmq_enabled=None,
offloaded_sa=None):
"""Enables / Disables different offload options for the given port.
Optional prameters are ignored if they are None.
:param switch_port_name: the name of the port which will have VMQ
enabled or disabled.
:param sriov_enabled: if SR-IOV should be turned on or off.
:param iov_queues_requested: the number of IOV queues to use. (> 1)
:param vmq_enabled: if VMQ should be turned on or off.
:param offloaded_sa: the number of IPsec SA offloads to use. (> 1)
:raises os_win.exceptions.InvalidParameterValue: if an invalid value
is passed for the iov_queues_requested or offloaded_sa parameters.
"""
if iov_queues_requested is not None and iov_queues_requested < 1:
raise exceptions.InvalidParameterValue(
param_name='iov_queues_requested',
param_value=iov_queues_requested)
if offloaded_sa is not None and offloaded_sa < 1:
raise exceptions.InvalidParameterValue(
param_name='offloaded_sa',
param_value=offloaded_sa)
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
# NOTE(claudiub): All ports have a HW offload SD.
hw_offload_sd = self._get_hw_offload_sd_from_port_alloc(port_alloc)
sd_changed = False
if sriov_enabled is not None:
desired_state = (self._OFFLOAD_ENABLED if sriov_enabled else
self._OFFLOAD_DISABLED)
if hw_offload_sd.IOVOffloadWeight != desired_state:
hw_offload_sd.IOVOffloadWeight = desired_state
sd_changed = True
if iov_queues_requested is not None:
if hw_offload_sd.IOVQueuePairsRequested != iov_queues_requested:
hw_offload_sd.IOVQueuePairsRequested = iov_queues_requested
sd_changed = True
if vmq_enabled is not None:
desired_state = (self._OFFLOAD_ENABLED if vmq_enabled else
self._OFFLOAD_DISABLED)
if hw_offload_sd.VMQOffloadWeight != desired_state:
hw_offload_sd.VMQOffloadWeight = desired_state
sd_changed = True
if offloaded_sa is not None:
if hw_offload_sd.IPSecOffloadLimit != offloaded_sa:
hw_offload_sd.IPSecOffloadLimit = offloaded_sa
sd_changed = True
# NOTE(claudiub): The HW offload SD can simply be modified. No need to
# remove it and create a new one.
if sd_changed:
self._jobutils.modify_virt_feature(hw_offload_sd)
def _get_profile_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._profile_sds, self._PORT_PROFILE_SET_DATA)
def _get_vlan_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._vlan_sds, self._PORT_VLAN_SET_DATA)
def _get_security_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._vsid_sds, self._PORT_SECURITY_SET_DATA)
def _get_hw_offload_sd_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._hw_offload_sds, self._PORT_HW_OFFLOAD_SET_DATA)
def _get_bandwidth_setting_data_from_port_alloc(self, port_alloc):
return self._get_setting_data_from_port_alloc(
port_alloc, self._bandwidth_sds, self._PORT_BANDWIDTH_SET_DATA)
def _get_setting_data_from_port_alloc(self, port_alloc, cache, data_class):
if port_alloc.InstanceID in cache:
return cache[port_alloc.InstanceID]
setting_data = self._get_first_item(
_wqlutils.get_element_associated_class(
self._conn, data_class,
element_instance_id=port_alloc.InstanceID))
if setting_data and self._enable_cache:
cache[port_alloc.InstanceID] = setting_data
return setting_data
def _get_switch_port_allocation(self, switch_port_name, create=False,
expected=True):
if switch_port_name in self._switch_ports:
return self._switch_ports[switch_port_name], True
switch_port, found = self._get_setting_data(
self._PORT_ALLOC_SET_DATA,
switch_port_name, create)
if found:
# newly created setting data cannot be cached, they do not
# represent real objects yet.
# if it was found, it means that it was not created.
if self._enable_cache:
self._switch_ports[switch_port_name] = switch_port
elif expected:
raise exceptions.HyperVPortNotFoundException(
port_name=switch_port_name)
return switch_port, found
def _get_setting_data(self, class_name, element_name, create=True):
element_name = element_name.replace("'", '"')
q = self._compat_conn.query("SELECT * FROM %(class_name)s WHERE "
"ElementName = '%(element_name)s'" %
{"class_name": class_name,
"element_name": element_name})
data = self._get_first_item(q)
found = data is not None
if not data and create:
data = self._get_default_setting_data(class_name)
data.ElementName = element_name
return data, found
def _get_default_setting_data(self, class_name):
return self._compat_conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _create_default_setting_data(self, class_name):
return getattr(self._compat_conn, class_name).new()
def _get_first_item(self, obj):
if obj:
return obj[0]
def add_metrics_collection_acls(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
# Add the ACLs only if they don't already exist
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
element_instance_id=port.InstanceID)
for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]:
for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]:
_acls = self._filter_acls(
acls, self._ACL_ACTION_METER, acl_dir, acl_type)
if not _acls:
acl = self._create_acl(
acl_dir, acl_type, self._ACL_ACTION_METER)
self._jobutils.add_virt_feature(acl, port)
def is_metrics_collection_allowed(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
if not self._is_port_vm_started(port):
return False
# all 4 meter ACLs must be existent first. (2 x direction)
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
element_instance_id=port.InstanceID)
acls = [a for a in acls if a.Action == self._ACL_ACTION_METER]
if len(acls) < 2:
return False
return True
def _is_port_vm_started(self, port):
vmsettings_instance_id = port.InstanceID.split('\\')[0]
vmsettings = self._conn.Msvm_VirtualSystemSettingData(
InstanceID=vmsettings_instance_id)
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = self._vs_man_svc.GetSummaryInformation(
[self._VM_SUMMARY_ENABLED_STATE],
[v.path_() for v in vmsettings])
if ret_val or not summary_info:
raise exceptions.HyperVException(_('Cannot get VM summary data '
'for: %s') % port.ElementName)
return summary_info[0].EnabledState == self._HYPERV_VM_STATE_ENABLED
def create_security_rules(self, switch_port_name, sg_rules):
port = self._get_switch_port_allocation(switch_port_name)[0]
self._bind_security_rules(port, sg_rules)
def remove_security_rules(self, switch_port_name, sg_rules):
port = self._get_switch_port_allocation(switch_port_name)[0]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
remove_acls = []
for sg_rule in sg_rules:
filtered_acls = self._filter_security_acls(sg_rule, acls)
remove_acls.extend(filtered_acls)
if remove_acls:
self._jobutils.remove_multiple_virt_features(remove_acls)
# remove the old ACLs from the cache.
new_acls = [a for a in acls if a not in remove_acls]
self._sg_acl_sds[port.ElementName] = new_acls
def remove_all_security_rules(self, switch_port_name):
port = self._get_switch_port_allocation(switch_port_name)[0]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
filtered_acls = [a for a in acls if
a.Action != self._ACL_ACTION_METER]
if filtered_acls:
self._jobutils.remove_multiple_virt_features(filtered_acls)
# clear the cache.
self._sg_acl_sds[port.ElementName] = []
def _bind_security_rules(self, port, sg_rules):
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
# Add the ACL only if it don't already exist.
add_acls = []
processed_sg_rules = []
weights = self._get_new_weights(sg_rules, acls)
index = 0
for sg_rule in sg_rules:
filtered_acls = self._filter_security_acls(sg_rule, acls)
if filtered_acls:
# ACL already exists.
continue
acl = self._create_security_acl(sg_rule, weights[index])
add_acls.append(acl)
index += 1
# append sg_rule the acls list, to make sure that the same rule
# is not processed twice.
processed_sg_rules.append(sg_rule)
if add_acls:
self._jobutils.add_multiple_virt_features(add_acls, port)
# caching the Security Group Rules that have been processed and
# added to the port. The list should only be used to check the
# existence of rules, nothing else.
acls.extend(processed_sg_rules)
def _get_port_security_acls(self, port):
"""Returns a mutable list of Security Group Rule objects.
Returns the list of Security Group Rule objects from the cache,
otherwise it fetches and caches from the port's associated class.
"""
if port.ElementName in self._sg_acl_sds:
return self._sg_acl_sds[port.ElementName]
acls = _wqlutils.get_element_associated_class(
self._conn, self._PORT_EXT_ACL_SET_DATA,
element_instance_id=port.InstanceID)
if self._enable_cache:
self._sg_acl_sds[port.ElementName] = acls
return acls
def _create_acl(self, direction, acl_type, action):
acl = self._create_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA)
acl.set(Direction=direction,
AclType=acl_type,
Action=action,
Applicability=self._ACL_APPLICABILITY_LOCAL)
return acl
def _create_security_acl(self, sg_rule, weight):
# Acl instance can be created new each time, the object should be
# of type ExtendedEthernetSettingsData.
acl = self._create_default_setting_data(self._PORT_EXT_ACL_SET_DATA)
acl.set(**sg_rule.to_dict())
return acl
def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""):
return [v for v in acls
if v.Action == action and
v.Direction == direction and
v.AclType == acl_type and
v.RemoteAddress == remote_addr]
def _filter_security_acls(self, sg_rule, acls):
return [a for a in acls if sg_rule == a]
def _get_new_weights(self, sg_rules, existent_acls):
"""Computes the weights needed for given sg_rules.
:param sg_rules: ACLs to be added. They must have the same Action.
:existent_acls: ACLs already bound to a switch port.
:return: list of weights which will be used to create ACLs. List will
have the recommended order for sg_rules' Action.
"""
return [0] * len(sg_rules)
def set_port_qos_rule(self, port_id, qos_rule):
"""Sets the QoS rule for the given port.
:param port_id: the port's ID to which the QoS rule will be applied to.
:param qos_rule: a dictionary containing the following keys:
min_kbps, max_kbps, max_burst_kbps, max_burst_size_kb.
:raises exceptions.HyperVInvalidException: if
- min_kbps is smaller than 10MB.
- max_kbps is smaller than min_kbps.
- max_burst_kbps is smaller than max_kbps.
:raises exceptions.HyperVException: if the QoS rule cannot be set.
"""
# Hyper-V stores bandwidth limits in bytes.
min_bps = qos_rule.get("min_kbps", 0) * units.Ki
max_bps = qos_rule.get("max_kbps", 0) * units.Ki
max_burst_bps = qos_rule.get("max_burst_kbps", 0) * units.Ki
max_burst_sz = qos_rule.get("max_burst_size_kb", 0) * units.Ki
if not (min_bps or max_bps or max_burst_bps or max_burst_sz):
# no limits need to be set
return
if min_bps and min_bps < 10 * units.Mi:
raise exceptions.InvalidParameterValue(
param_name="min_kbps", param_value=min_bps)
if max_bps and max_bps < min_bps:
raise exceptions.InvalidParameterValue(
param_name="max_kbps", param_value=max_bps)
if max_burst_bps and max_burst_bps < max_bps:
raise exceptions.InvalidParameterValue(
param_name="max_burst_kbps", param_value=max_burst_bps)
port_alloc = self._get_switch_port_allocation(port_id)[0]
bandwidth = self._get_bandwidth_setting_data_from_port_alloc(
port_alloc)
if bandwidth:
# Removing the feature because it cannot be modified
# due to a wmi exception.
self._jobutils.remove_virt_feature(bandwidth)
# remove from cache.
self._bandwidth_sds.pop(port_alloc.InstanceID, None)
bandwidth = self._get_default_setting_data(
self._PORT_BANDWIDTH_SET_DATA)
bandwidth.Reservation = min_bps
bandwidth.Limit = max_bps
bandwidth.BurstLimit = max_burst_bps
bandwidth.BurstSize = max_burst_sz
try:
self._jobutils.add_virt_feature(bandwidth, port_alloc)
except Exception as ex:
if '0x80070057' in six.text_type(ex):
raise exceptions.InvalidParameterValue(
param_name="qos_rule", param_value=qos_rule)
raise exceptions.HyperVException(
'Unable to set qos rule %(qos_rule)s for port %(port)s. '
'Error: %(error)s' %
dict(qos_rule=qos_rule, port=port_alloc, error=ex))
def remove_port_qos_rule(self, port_id):
"""Removes the QoS rule from the given port.
:param port_id: the port's ID from which the QoS rule will be removed.
"""
port_alloc = self._get_switch_port_allocation(port_id)[0]
bandwidth = self._get_bandwidth_setting_data_from_port_alloc(
port_alloc)
if bandwidth:
self._jobutils.remove_virt_feature(bandwidth)
# remove from cache.
self._bandwidth_sds.pop(port_alloc.InstanceID, None)
class NetworkUtilsR2(NetworkUtils):
_PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types x 4 protocols = 16 ACLs
_REJECT_ACLS_COUNT = 16
def _create_security_acl(self, sg_rule, weight):
acl = super(NetworkUtilsR2, self)._create_security_acl(sg_rule,
weight)
acl.Weight = weight
sg_rule.Weight = weight
return acl
def _get_new_weights(self, sg_rules, existent_acls):
sg_rule = sg_rules[0]
num_rules = len(sg_rules)
existent_acls = [a for a in existent_acls
if a.Action == sg_rule.Action]
if not existent_acls:
if sg_rule.Action == self._ACL_ACTION_DENY:
return list(range(1, 1 + num_rules))
else:
return list(range(self._MAX_WEIGHT - 1,
self._MAX_WEIGHT - 1 - num_rules, - 1))
# there are existent ACLs.
weights = [a.Weight for a in existent_acls]
if sg_rule.Action == self._ACL_ACTION_DENY:
return [i for i in list(range(1, self._REJECT_ACLS_COUNT + 1))
if i not in weights][:num_rules]
min_weight = min(weights)
last_weight = min_weight - num_rules - 1
if last_weight > self._REJECT_ACLS_COUNT:
return list(range(min_weight - 1, last_weight, - 1))
# not enough weights. Must search for available weights.
# if it is this case, num_rules is a small number.
current_weight = self._MAX_WEIGHT - 1
new_weights = []
for i in list(range(num_rules)):
while current_weight in weights:
current_weight -= 1
new_weights.append(current_weight)
return new_weights
|
apache-2.0
| 3,369,183,737,832,306,000
| 40.155425
| 79
| 0.600898
| false
| 3.844932
| false
| false
| false
|
google-research/language
|
language/emql/cm_sketch.py
|
1
|
5160
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of Count-Min Sketch.
Implement a count-min sketch module that can create count-min sketch, check
membership of an element, and compute intersection and union of the sketches
of two sets.
"""
from absl import app
from absl import flags
import numpy as np
from tqdm import tqdm
FLAGS = flags.FLAGS
np.random.seed(0)
class CountMinContext(object):
"""Definition of countmin sketch context.
A CountMinContext hold the information needed to construct a count-min
sketch. It caches the hash values of observed elements.
"""
def __init__(self, width, depth, n = -1):
"""Initialize the count-min sketch context.
Pre-compute the hashes of all elements if the number of elements is
known (n>0).
Args:
width: width of the cm-sketch
depth: depth of the cm-sketch
n: number of elements, -1 if it's unknown
"""
self.width = width
self.depth = depth
self.cache = dict() # cache of hash value to a list of ids
if n != -1:
for e in tqdm(range(n)):
e = str(e)
self.cache[e] = [self._hash(e, i) for i in range(self.depth)]
def _hash(self, x, i):
"""Get the i'th hash value of element x.
Args:
x: name or id in string
i: the i'th hash function
Returns:
hash result
"""
assert isinstance(x, str)
assert isinstance(i, int)
assert i >= 0 and i < self.depth
hash_val = hash((i, x))
return hash_val % self.width
def get_hashes(self, x):
"""Get the hash values of x.
Each element is hashed w times, where w is the width of the count-min
sketch specified in the constructor of CountMinContext. This function
returns w hash values of element x.
Args:
x: name or id in string
Returns:
a list of hash values with the length of depth
"""
x = str(x)
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
return self.cache[x]
def get_sketch(self, xs = None):
"""Return a sketch for set xs (all zeros if xs not specified).
This function takes a list of elements xs, take their hash values, and
set 1.0 to the corresponding positions. It returns a 2d numpy array
with width and depth declared in the constructor of CountMinContext.
Values at unassigned positions remain 0.
Args:
xs: a set of name or id in string
Returns:
a sketch np.array()
"""
sketch = np.zeros((self.depth, self.width), dtype=np.float32)
if xs is not None:
self.add_set(sketch, xs)
return sketch
def add(self, sketch, x):
"""Add an element to the sketch.
Args:
sketch: sketch to add x to
x: name or id in string
"""
assert isinstance(x, str)
assert self.depth, self.width == sketch.shape
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
for i in range(self.depth):
sketch[i, self.cache[x][i]] += 1.0
def add_set(self, sketch, xs):
"""Add a set of elements to the sketch.
Args:
sketch: sketch to add xs to
xs: a set of name or id in string
"""
assert self.depth, self.width == sketch.shape
for x in xs:
x = str(x)
if not self.contain(sketch, x):
self.add(sketch, x)
def contain(self, sketch, x):
"""Check if the sketch contains x.
Args:
sketch: sketch to add xs to
x: name or id in string
Returns:
True or False
"""
assert self.depth, self.width == sketch.shape
x = str(x)
if x not in self.cache:
self.cache[x] = [self._hash(x, i) for i in range(self.depth)]
for i in range(self.depth):
if sketch[i, self.cache[x][i]] == 0.0:
return False
return True
def intersection(self, sk1, sk2):
"""Intersect two sketches.
Args:
sk1: first sketch
sk2: second sketch
Returns:
a countmin sketch for intersection
"""
assert sk1.shape == sk2.shape
assert self.depth, self.width == sk1.shape
sk_intersection = sk1 * sk2
return sk_intersection
def union(self, sk1, sk2):
"""Union two sketches.
Args:
sk1: first sketch
sk2: second sketch
Returns:
a countmin sketch for union
"""
assert sk1.shape == sk2.shape
assert self.depth, self.width == sk1.shape
sk_union = sk1 + sk2
return sk_union
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if __name__ == '__main__':
app.run(main)
|
apache-2.0
| 4,624,487,439,419,646,000
| 25.326531
| 76
| 0.642636
| false
| 3.519782
| false
| false
| false
|
AmericanResearchInstitute/ari-backup
|
ari_backup/__init__.py
|
1
|
16849
|
import os
import settings
import subprocess
import shlex
from logger import Logger
'''Wrapper around rdiff-backup
This module provides facilites for centrally managing a large set of
rdiff-backup backup jobs. Backup job management is built around common tools
like cron, run-parts, and xargs. The base features include:
* central configuration file
* backup jobs for local and remote hosts
* configurable job parallelization
* ability to run arbitrary commands locally or remotely before and after
backup jobs (something especially handy for preparing databases pre-backup)
* logging to syslog
The base features are designed to be extended and we include an extension to
manage the setup and tear down of LVM snapshots for backup.
'''
class ARIBackup(object):
'''Base class includes core features and basic rdiff-backup functionality
This class can be used if all that is needed is to leverage the basic
rdiff-backup features. The pre and post hook functionality as well as
command execution is also part of this class.
'''
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
# The name of the backup job (this will be the name of the directory in the backup store
# that has the data).
self.label = label
# This is the host that has the source data.
self.source_hostname = source_hostname
# We'll bring in the remote_user from our settings, but it is a var
# that the end-user is welcome to override.
self.remote_user = settings.remote_user
# setup logging
self.logger = Logger('ARIBackup ({label})'.format(label=label), settings.debug_logging)
# Include nothing by default
self.include_dir_list = []
self.include_file_list = []
# Exclude nothing by default
# We'll put the '**' exclude on the end of the arg_list later
self.exclude_dir_list = []
self.exclude_file_list = []
# initialize hook lists
self.pre_job_hook_list = []
self.post_job_hook_list = []
if remove_older_than_timespec != None:
self.post_job_hook_list.append((
self._remove_older_than,
{'timespec': remove_older_than_timespec}))
def _process_pre_job_hooks(self):
self.logger.info('processing pre-job hooks...')
for task in self.pre_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
hook(**kwargs)
def _process_post_job_hooks(self, error_case):
if error_case:
self.logger.error('processing post-job hooks for error case...')
else:
self.logger.info('processing post-job hooks...')
for task in self.post_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
kwargs.update({'error_case': error_case})
hook(**kwargs)
def _run_command(self, command, host='localhost'):
'''Runs an arbitrary command on host.
Given an input string or list, we attempt to execute it on the host via
SSH unless host is "localhost".
Returns a tuple with (stdout, stderr) if the exitcode is zero,
otherwise an Exception is raised.
'''
# make args a list if it's not already so
if isinstance(command, basestring):
args = shlex.split(command)
elif isinstance(command, list):
args = command
else:
raise Exception('_run_command: command arg must be str or list')
# add SSH arguments if this is a remote command
if host != 'localhost':
ssh_args = shlex.split('%s %s@%s' % (settings.ssh_path, self.remote_user, host))
args = ssh_args + args
try:
self.logger.debug('_run_command %r' % args)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# We really want to block until our subprocess exists or
# KeyboardInterrupt. If we don't, clean up tasks can likely fail.
try:
stdout, stderr = p.communicate()
except KeyboardInterrupt:
# TODO terminate() doesn't block, so we'll need to poll
p.terminate()
raise KeyboardInterrupt
if stdout:
self.logger.debug(stdout)
if stderr:
# Warning level should be fine here since we'll also look at
# the exitcode.
self.logger.warning(stderr)
exitcode = p.returncode
except IOError:
raise Exception('Unable to execute/find {args}'.format(args=args))
if exitcode > 0:
error_message = ('[{host}] A command terminated with errors and likely requires intervention. The '
'command attempted was "{command}".').format(
host=host, command=command)
raise Exception(error_message)
return (stdout, stderr)
def run_backup(self):
self.logger.info('started')
try:
error_case = False
self._process_pre_job_hooks()
self.logger.info('data backup started...')
self._run_backup()
self.logger.info('data backup complete')
except Exception, e:
error_case = True
self.logger.error((str(e)))
self.logger.info("let's try to clean up...")
except KeyboardInterrupt:
error_case = True
# using error level here so that these messages will
# print to the console
self.logger.error('backup job cancelled by user')
self.logger.error("let's try to clean up...")
finally:
self._process_post_job_hooks(error_case)
self.logger.info('stopped')
def _run_backup(self, top_level_src_dir='/'):
'''Run rdiff-backup job.
Builds an argument list for a full rdiff-backup command line based on
the settings in the instance and optionally the top_level_src_dir
parameter. Said parameter is used to define the context for the backup
mirror. This is especially handy when backing up mounted spanshots so
that the mirror doesn't contain the directory where the snapshot is
mounted.
'''
self.logger.debug('_run_backup started')
# Init our arguments list with the path to rdiff-backup.
# This will be in the format we'd normally pass to the command-line
# e.g. [ '--include', '/dir/to/include', '--exclude', '/dir/to/exclude']
arg_list = [settings.rdiff_backup_path]
# setup some default rdiff-backup options
# TODO provide a way to override these
arg_list.append('--exclude-device-files')
arg_list.append('--exclude-fifos')
arg_list.append('--exclude-sockets')
# Bring the terminal verbosity down so that we only see errors
arg_list += ['--terminal-verbosity', '1']
# This conditional reads strangely, but that's because rdiff-backup
# not only defaults to having SSH compression enabled, it also doesn't
# have an option to explicitly enable it -- only one to disable it.
if not settings.ssh_compression:
arg_list.append('--ssh-no-compression')
# Populate self.argument list
for exclude_dir in self.exclude_dir_list:
arg_list.append('--exclude')
arg_list.append(exclude_dir)
for exclude_file in self.exclude_file_list:
arg_list.append('--exclude-filelist')
arg_list.append(exclude_file)
for include_dir in self.include_dir_list:
arg_list.append('--include')
arg_list.append(include_dir)
for include_file in self.include_file_list:
arg_list.append('--include-filelist')
arg_list.append(include_file)
# Exclude everything else
arg_list.append('--exclude')
arg_list.append('**')
# Add a source argument
if self.source_hostname == 'localhost':
arg_list.append(top_level_src_dir)
else:
arg_list.append(
'{remote_user}@{source_hostname}::{top_level_src_dir}'.format(
remote_user=self.remote_user,
source_hostname=self.source_hostname,
top_level_src_dir=top_level_src_dir
)
)
# Add a destination argument
arg_list.append(
'{backup_store_path}/{label}'.format(
backup_store_path=settings.backup_store_path,
label=self.label
)
)
# Rdiff-backup GO!
self._run_command(arg_list)
self.logger.debug('_run_backup completed')
def _remove_older_than(self, timespec, error_case):
'''Trims increments older than timespec
Post-job hook that uses rdiff-backup's --remove-old-than feature to
trim old increments from the backup history
'''
if not error_case:
self.logger.info('remove_older_than %s started' % timespec)
arg_list = [settings.rdiff_backup_path]
arg_list.append('--force')
arg_list.append('--remove-older-than')
arg_list.append(timespec)
arg_list.append('%s/%s' % (settings.backup_store_path, self.label))
self._run_command(arg_list)
self.logger.info('remove_older_than %s completed' % timespec)
class LVMBackup(ARIBackup):
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
super(LVMBackup, self).__init__(label, source_hostname, remove_older_than_timespec)
# This is a list of 2-tuples, where each inner 2-tuple expresses the LV
# to back up, the mount point for that LV any mount options necessary.
# For example: [('hostname/root, '/', 'noatime'),]
# TODO I wonder if noatime being used all the time makes sense to
# improve read performance and reduce writes to the snapshots.
self.lv_list = []
# a list of dicts with the snapshot paths and where they should be
# mounted
self.lv_snapshots = []
# mount the snapshots in a directory named for this job's label
self.snapshot_mount_point_base_path = os.path.join(settings.snapshot_mount_root, self.label)
# setup pre and post job hooks to manage snapshot work flow
self.pre_job_hook_list.append((self._create_snapshots, {}))
self.pre_job_hook_list.append((self._mount_snapshots, {}))
self.post_job_hook_list.append((self._umount_snapshots, {}))
self.post_job_hook_list.append((self._delete_snapshots, {}))
def _create_snapshots(self):
'''Creates snapshots of all the volumns listed in self.lv_list'''
self.logger.info('creating LVM snapshots...')
for volume in self.lv_list:
try:
lv_path, src_mount_path, mount_options = volume
except ValueError:
lv_path, src_mount_path = volume
mount_options = None
vg_name, lv_name = lv_path.split('/')
new_lv_name = lv_name + settings.snapshot_suffix
mount_path = '{snapshot_mount_point_base_path}{src_mount_path}'.format(
snapshot_mount_point_base_path=self.snapshot_mount_point_base_path,
src_mount_path=src_mount_path
)
# TODO Is it really OK to always make a 1GB exception table?
self._run_command('lvcreate -s -L 1G %s -n %s' % (lv_path, new_lv_name), self.source_hostname)
self.lv_snapshots.append({
'lv_path': vg_name + '/' + new_lv_name,
'mount_path': mount_path,
'mount_options': mount_options,
'created': True,
'mount_point_created': False,
'mounted': False,
})
def _delete_snapshots(self, error_case=None):
'''Deletes snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
self.logger.info('deleting LVM snapshots...')
for snapshot in self.lv_snapshots:
if snapshot['created']:
lv_path = snapshot['lv_path']
# -f makes lvremove not interactive
self._run_command('lvremove -f %s' % lv_path, self.source_hostname)
snapshot.update({'created': False})
def _mount_snapshots(self):
self.logger.info('mounting LVM snapshots...')
for snapshot in self.lv_snapshots:
lv_path = snapshot['lv_path']
device_path = '/dev/' + lv_path
mount_path = snapshot['mount_path']
mount_options = snapshot['mount_options']
# mkdir the mount point
self._run_command('mkdir -p %s' % mount_path, self.source_hostname)
snapshot.update({'mount_point_created': True})
# If where we want to mount our LV is already a mount point then
# let's back out.
if os.path.ismount(mount_path):
raise Exception("{mount_path} is already a mount point".format(mount_path=mount_path))
# mount the LV, possibly with mount options
if mount_options:
command = 'mount -o {mount_options} {device_path} {mount_path}'.format(
mount_options=mount_options,
device_path=device_path,
mount_path=mount_path
)
else:
command = 'mount {device_path} {mount_path}'.format(
device_path=device_path,
mount_path=mount_path
)
self._run_command(command, self.source_hostname)
snapshot.update({'mounted': True})
def _umount_snapshots(self, error_case=None):
'''Umounts mounted snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
# TODO If the user doesn't put '/' in their include_dir_list, then
# we'll end up with directories around where the snapshots are mounted
# that will not get cleaned up. We should probably add functionality
# to make sure the "label" directory is recursively removed.
# Check out shutil.rmtree() to help resolve this issue.
self.logger.info('umounting LVM snapshots...')
# We need a local copy of the lv_snapshots list to muck with in
# this method.
local_lv_snapshots = self.lv_snapshots
# We want to umount these LVs in reverse order as this should ensure
# that we umount the deepest paths first.
local_lv_snapshots.reverse()
for snapshot in local_lv_snapshots:
mount_path = snapshot['mount_path']
if snapshot['mounted']:
self._run_command('umount %s' % mount_path, self.source_hostname)
snapshot.update({'mounted': False})
if snapshot['mount_point_created']:
self._run_command('rmdir %s' % mount_path, self.source_hostname)
snapshot.update({'mount_point_created': False})
def _run_backup(self):
'''Run backup of LVM snapshots'''
self.logger.debug('LVMBackup._run_backup started')
# Cook the self.include_dir_list and self.exclude_dir_list so that the
# src paths include the mount path for the LV(s).
local_include_dir_list = []
for include_dir in self.include_dir_list:
local_include_dir_list.append('{snapshot_mount_point_base_path}{include_dir}'.format(
snapshot_mount_point_base_path=snapshot_mount_point_base_path,
include_dir=include_dir
))
local_exclude_dir_list = []
for exclude_dir in self.exclude_dir_list:
local_exclude_dir_list.append('{snapshot_mount_point_base_path}{exclude_dir}'.format(
snapshot_mount_point_base_path=snapshot_mount_point_base_path,
exclude_dir=exclude_dir
))
self.include_dir_list = local_include_dir_list
self.exclude_dir_list = local_exclude_dir_list
# We don't support include_file_list and exclude_file_list in this
# class as it would take extra effort and it's not likely to be used.
# Have the base class perform an rdiff-backup
super(LVMBackup, self)._run_backup(self.snapshot_mount_point_base_path)
self.logger.debug('LVMBackup._run_backup completed')
|
bsd-3-clause
| 7,867,966,649,929,638,000
| 38.366822
| 111
| 0.599442
| false
| 4.233417
| false
| false
| false
|
stccenter/datadiscovery
|
ranking/evaluation.py
|
1
|
12251
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 13:13:44 2017
@author: larakamal
total evaluation evalutes the sorted documents
using precision and NDCG
change the directory of the sorted documents from lines 79-87
change the directory of the precision and NDCG graphs from
line 345 and 352
"""
from math import log10
import csv
import numpy as np
import pandas as pd
def getNDCG(list, k):
#convert to double
dcg = float(getDCG(list,k))
idcg = float(getIDCG(list,k))
ndcg = 0.0
if (idcg > 0.0):
ndcg = dcg/idcg
return ndcg
def getPrecision(list, k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if(k > size):
k = size
rel_doc_num = getRelevantDocNum(list,k)
#convert to double
precision = float(float(rel_doc_num)/float(k))
return precision
def getRelevantDocNum(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0
if (k > size):
k = size
rel_num = 0
for i in range(k):
if list[i] > 5:
rel_num = rel_num + 1
return rel_num
def getDCG(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if (k > size):
k = size
#convert to double
dcg = list[0]
dcg = float(dcg)
for i in range(1,k):
rel = list[i]
pos = i+1
rel_log = log10(pos)/log10(2)
rel_log = float(rel_log)
dcg = dcg + (rel/rel_log)
return dcg
def getIDCG(list, k):
# sort list
sortedList = list
sortedList = sorted(sortedList, key=int, reverse=True)
idcg = getDCG(sortedList, k)
return float(idcg)
def evaluate(algorithm):
path = "data/results/test/" + algorithm + "/"
#change directory of the ranked documents
dataframe1 = pd.read_csv(path + "gravity_sorted.csv")
dataframe2 = pd.read_csv(path + "ocean pressure_sorted.csv")
dataframe3 = pd.read_csv(path + "ocean temperature_sorted.csv")
dataframe4 = pd.read_csv(path + "ocean wind_sorted.csv")
dataframe5 = pd.read_csv(path + "pathfinder_sorted.csv")
dataframe6 = pd.read_csv(path + "quikscat_sorted.csv")
dataframe7 = pd.read_csv(path + "radar_sorted.csv")
dataframe8 = pd.read_csv(path + "saline density_sorted.csv")
dataframe9 = pd.read_csv(path + "sea ice_sorted.csv")
label1 = dataframe1.ix[:,10:11]
label2 = dataframe2.ix[:,10:11]
label3 = dataframe3.ix[:,10:11]
label4 = dataframe4.ix[:,10:11]
label5 = dataframe5.ix[:,10:11]
label6 = dataframe6.ix[:,10:11]
label7 = dataframe7.ix[:,10:11]
label8 = dataframe8.ix[:,10:11]
label9 = dataframe9.ix[:,10:11]
temp_list1 = label1['label'].tolist()
temp_list2 = label2['label'].tolist()
temp_list3 = label3['label'].tolist()
temp_list4 = label4['label'].tolist()
temp_list5 = label5['label'].tolist()
temp_list6 = label6['label'].tolist()
temp_list7 = label7['label'].tolist()
temp_list8 = label8['label'].tolist()
temp_list9 = label9['label'].tolist()
label_list1 = [];
label_list2 = [];
label_list3 = [];
label_list4 = [];
label_list5 = [];
label_list6 = [];
label_list7 = [];
label_list8 = [];
label_list9 = [];
for i in range(len(temp_list1)):
if temp_list1[i] == 'Excellent':
label_list1.append(7)
elif temp_list1[i] == 'Very good':
label_list1.append(6)
elif temp_list1[i] == 'Good':
label_list1.append(5)
elif temp_list1[i] == 'Ok':
label_list1.append(4)
elif temp_list1[i] == 'Bad':
label_list1.append(3)
elif temp_list1[i] == 'Very bad':
label_list1.append(2)
elif temp_list1[i] == 'Terrible':
label_list1.append(1)
else:
label_list1.append(0)
for i in range(len(temp_list2)):
if temp_list2[i] == 'Excellent':
label_list2.append(7)
elif temp_list2[i] == 'Very good':
label_list2.append(6)
elif temp_list2[i] == 'Good':
label_list2.append(5)
elif temp_list2[i] == 'Ok':
label_list2.append(4)
elif temp_list2[i] == 'Bad':
label_list2.append(3)
elif temp_list2[i] == 'Very bad':
label_list2.append(2)
elif temp_list2[i] == 'Terrible':
label_list2.append(1)
else:
label_list2.append(0)
for i in range(len(temp_list3)):
if temp_list3[i] == 'Excellent':
label_list3.append(7)
elif temp_list3[i] == 'Very good':
label_list3.append(6)
elif temp_list3[i] == 'Good':
label_list3.append(5)
elif temp_list3[i] == 'Ok':
label_list3.append(4)
elif temp_list3[i] == 'Bad':
label_list3.append(3)
elif temp_list3[i] == 'Very bad':
label_list3.append(2)
elif temp_list3[i] == 'Terrible':
label_list3.append(1)
else:
label_list3.append(0)
for i in range(len(temp_list4)):
if temp_list4[i] == 'Excellent':
label_list4.append(7)
elif temp_list4[i] == 'Very good':
label_list4.append(6)
elif temp_list4[i] == 'Good':
label_list4.append(5)
elif temp_list4[i] == 'Ok':
label_list4.append(4)
elif temp_list4[i] == 'Bad':
label_list4.append(3)
elif temp_list4[i] == 'Very bad':
label_list4.append(2)
elif temp_list4[i] == 'Terrible':
label_list4.append(1)
else:
label_list4.append(0)
for i in range(len(temp_list5)):
if temp_list5[i] == 'Excellent':
label_list5.append(7)
elif temp_list5[i] == 'Very good':
label_list5.append(6)
elif temp_list5[i] == 'Good':
label_list5.append(5)
elif temp_list5[i] == 'Ok':
label_list5.append(4)
elif temp_list5[i] == 'Bad':
label_list5.append(3)
elif temp_list5[i] == 'Very bad':
label_list5.append(2)
elif temp_list5[i] == 'Terrible':
label_list5.append(1)
else:
label_list5.append(0)
for i in range(len(temp_list6)):
if temp_list6[i] == 'Excellent':
label_list6.append(7)
elif temp_list6[i] == 'Very good':
label_list6.append(6)
elif temp_list6[i] == 'Good':
label_list6.append(5)
elif temp_list6[i] == 'Ok':
label_list6.append(4)
elif temp_list6[i] == 'Bad':
label_list6.append(3)
elif temp_list6[i] == 'Very bad':
label_list6.append(2)
elif temp_list6[i] == 'Terrible':
label_list6.append(1)
else:
label_list6.append(0)
for i in range(len(temp_list7)):
if temp_list7[i] == 'Excellent':
label_list7.append(7)
elif temp_list7[i] == 'Very good':
label_list7.append(6)
elif temp_list7[i] == 'Good':
label_list7.append(5)
elif temp_list7[i] == 'Ok':
label_list7.append(4)
elif temp_list7[i] == 'Bad':
label_list7.append(3)
elif temp_list7[i] == 'Very bad':
label_list7.append(2)
elif temp_list7[i] == 'Terrible':
label_list7.append(1)
else:
label_list7.append(0)
for i in range(len(temp_list8)):
if temp_list8[i] == 'Excellent':
label_list8.append(7)
elif temp_list8[i] == 'Very good':
label_list8.append(6)
elif temp_list8[i] == 'Good':
label_list8.append(5)
elif temp_list8[i] == 'Ok':
label_list8.append(4)
elif temp_list8[i] == 'Bad':
label_list8.append(3)
elif temp_list8[i] == 'Very bad':
label_list8.append(2)
elif temp_list8[i] == 'Terrible':
label_list8.append(1)
else:
label_list8.append(0)
for i in range(len(temp_list9)):
if temp_list9[i] == 'Excellent':
label_list9.append(7)
elif temp_list9[i] == 'Very good':
label_list9.append(6)
elif temp_list9[i] == 'Good':
label_list9.append(5)
elif temp_list9[i] == 'Ok':
label_list9.append(4)
elif temp_list9[i] == 'Bad':
label_list9.append(3)
elif temp_list9[i] == 'Very bad':
label_list9.append(2)
elif temp_list9[i] == 'Terrible':
label_list9.append(1)
else:
label_list9.append(0)
NDCG_list1 = []
NDCG_list2 = []
NDCG_list3 = []
NDCG_list4 = []
NDCG_list5 = []
NDCG_list6 = []
NDCG_list7 = []
NDCG_list8 = []
NDCG_list9 = []
for i in range(1,41):
k = i
NDCG_list1.append(getNDCG(label_list1,k))
NDCG_list2.append(getNDCG(label_list2,k))
NDCG_list3.append(getNDCG(label_list3,k))
NDCG_list4.append(getNDCG(label_list4,k))
NDCG_list5.append(getNDCG(label_list5,k))
NDCG_list6.append(getNDCG(label_list6,k))
NDCG_list7.append(getNDCG(label_list7,k))
NDCG_list8.append(getNDCG(label_list8,k))
NDCG_list9.append(getNDCG(label_list9,k))
precision_list1 = []
precision_list2 = []
precision_list3 = []
precision_list4 = []
precision_list5 = []
precision_list6 = []
precision_list7 = []
precision_list8 = []
precision_list9 = []
for i in range(1,41):
k = i
precision_list1.append(getPrecision(label_list1,k))
precision_list2.append(getPrecision(label_list2,k))
precision_list3.append(getPrecision(label_list3,k))
precision_list4.append(getPrecision(label_list4,k))
precision_list5.append(getPrecision(label_list5,k))
precision_list6.append(getPrecision(label_list6,k))
precision_list7.append(getPrecision(label_list7,k))
precision_list8.append(getPrecision(label_list8,k))
precision_list9.append(getPrecision(label_list9,k))
total_list_NDCG = []
for i in range(len(NDCG_list1)):
average = (NDCG_list1[i] + NDCG_list2[i]+ NDCG_list3[i] + NDCG_list4[i]+ NDCG_list5[i] + NDCG_list6[i] + NDCG_list7[i] + NDCG_list8[i] + NDCG_list9[i])/9
array = np.array([NDCG_list1[i],NDCG_list2[i], NDCG_list3[i], NDCG_list4[i], NDCG_list5[i], NDCG_list6[i], NDCG_list7[i], NDCG_list8[i], NDCG_list9[i], average])
total_list_NDCG.append(array)
total_list_precision = []
for i in range(len(precision_list1)):
average = (precision_list1[i] + precision_list2[i]+ precision_list3[i] + precision_list4[i]+ precision_list5[i] + precision_list6[i] + precision_list7[i] + precision_list8[i] + precision_list9[i])/9
array = np.array([precision_list1[i],precision_list2[i], precision_list3[i], precision_list4[i], precision_list5[i], precision_list6[i], precision_list7[i], precision_list8[i], precision_list9[i], average])
total_list_precision.append(array)
with open('data/results/rank/' + algorithm + 'NDCG_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_NDCG:
writer.writerow(i)
with open('data/results/rank/' + algorithm + 'precision_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_precision:
writer.writerow(i)
|
apache-2.0
| -4,196,124,341,994,484,000
| 32.939058
| 214
| 0.548119
| false
| 3.10545
| false
| false
| false
|
ufo2mstar/PersonalPys
|
GreyscriptNB15.py
|
1
|
2408
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Kyu
#
# Created: 01/11/2012
# Copyright: (c) Kyu 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
# Combi Try!
import sys
import os
path=os.getcwd()
def makeit(allincd,l):
#combi='\"copy -b\"'+allmp3s+' '+path+'\\comb\\'+files[1][:-9]+'.mp3\"\"';
combi='\"copy /b '+allincd+' \"CD '+str(l)+'.mp3\"';
print combi
os.system(combi);
if __name__ == '__main__':
## cdname=[]
j=0
allincd=''
# Get files to unzip
for root, dirs, files in os.walk(path):
cdname=[]
for i in range(106,121):
## j=j+1
for fil in files:
if 'Track '+str(i).zfill(3) in fil:
print '>>> founddddd = track ',i
if '.mp3' in fil:
cdname.append('\"'+fil+'\"')
print cdname
print 'all in cd = ',allincd,'\n'
if len(cdname)==15:
j=j+1
allincd=' + '.join(cdname)
makeit(allincd,j)
cdname=[]
# New walk to get unziped mp3 files
##mp3files=[];
##for root, dirs, files in os.walk(path):
#### print files
## for fil in files:
## if fil[-4:]=='.mp3':
## mp3files.append('\"'+fil+'\"')
## allmp3s=''
#### for mp3f in mp3files:
## allmp3s=' + '.join(mp3files)
## print 'all mp3 cat = ',allmp3s,'\n'
## #combi='\"copy -b\"'+allmp3s+' '+path+'\\comb\\'+files[1][:-9]+'.mp3\"\"';
## combi='\"copy /b '+allmp3s+' '+'nice.mp3\"';
## print combi
## os.system(combi);
##
### Delete unziped and copied file
## dell='\"del '+allmp3s+'\"'
## os.system(dell)
##
# Garbage
## ofile=open(files,'w')
## ifile=open('a.mp3','r')
#### ofile=open('ans/'+name,'w')
## newlines=[]
## for line in ifile:
#### line0 = str.strip(line)
#### if line0 !='':
#### if line0[0] != '%': #and line0[0] != '%' and line0[-1] != ';':
#### if '%' in line0:
#### newlines.append(line[:line.find("%")]+'\n')
#### else:
## newlines.append(line)
## print line
##
|
gpl-2.0
| 4,172,745,439,984,703,000
| 28.012048
| 86
| 0.405316
| false
| 3.197875
| false
| false
| false
|
f0rki/cb-multios
|
original-challenges/A_Game_of_Chance/support/dealer.py
|
1
|
9666
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from random import choice, randint
import support as sp
from common import DEBUG, CONFIG
from player import Player as p
from pov_deck import POV_DECK_BYTES
TRIGGER_BYTES = ['\x21', '\x5f', '\x3c', '\x42', '\x63', '\x51']
# 'NAME': (IDX, BYTE)
TRIGGER_BYTE_LOOKUP = {
'DECK': (0, TRIGGER_BYTES[0]),
'DEAL': (1, TRIGGER_BYTES[1]),
'DATA': (2, TRIGGER_BYTES[2]),
'BALANCE': (3, TRIGGER_BYTES[3]),
'COUNT': (4, TRIGGER_BYTES[4]),
'QUIT': (5, TRIGGER_BYTES[5]),
}
def get_non_trigger_byte():
b = chr(randint(0, 255))
while b in TRIGGER_BYTES:
b = chr(randint(0,255))
return b
class Dealer(object):
def __init__(self, magic_page):
self.players = []
self.magic_page = magic_page
self.deck = []
self.ok = ''
def add_players(self, p_count):
for i in range(1, p_count + 1):
noob = p(i, self.magic_page)
self.players.append(noob)
self.balances = [0 for _ in range(p_count + 1)]
self.wagers = [0 for _ in range(p_count + 1)]
self.results = [0 for _ in range(p_count + 1)]
self.deal1 = [0 for _ in range(p_count + 1)]
self.deal2 = [0 for _ in range(p_count + 1)]
if DEBUG:
print "added {0} players".format(p_count)
def make_trigger_buf(self, cmd_name):
cmd = ''
for cn in ['DECK', 'DEAL', 'DATA', 'BALANCE', 'COUNT', 'QUIT']:
if cn == cmd_name:
(idx, b) = TRIGGER_BYTE_LOOKUP[cmd_name]
cmd += b
else:
cmd += get_non_trigger_byte()
return cmd
def make_new_random_deck(self, count, bad_cards=False):
deck = [chr(randint(CONFIG['MIN_CARD_VALUE'], CONFIG['MAX_CARD_VALUE'])) for _ in range(count)]
if True == bad_cards:
deck[count/2] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
deck[count-1] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
if DEBUG:
print "making new deck with {0} cards with {1}bad cards".format(count, "" if bad_cards else "no ")
# if DEBUG:
# print "deck:"
# for c in deck:
# print " card 0x{0:02x},".format(ord(c))
return deck
def gen_ok(self):
ok = [0,0,0,0,0,0,0,0]
for idx in range(len(self.magic_page)):
ok[idx % 8] ^= ord(self.magic_page[idx])
self.ok = ''.join([chr(c) for c in ok])
def get_packed_balances(self):
p_bal = ''
if DEBUG:
print " balances were: {0}".format(self.balances)
for b in self.balances:
p_bal += sp.pack_single_int32(b)
return p_bal
def get_packed_wagers(self):
p_wag = ''
if DEBUG:
print " wagers were: {0}".format(self.wagers)
for w in self.wagers:
p_wag += sp.pack_single_uint32(w)
return p_wag
def get_packed_results(self):
p_res = ''
if DEBUG:
print " results were: {0}".format(self.results)
for r in self.results:
p_res += sp.pack_single_uint32(r)
return p_res
def enough_cards_for_round(self):
return 0 < len(self.deck) and len(self.deck) >= len(self.players)
def calculate_round_results(self):
max_sum = 0
if DEBUG:
print "calculating round results"
for idx in range(1, len(self.results)):
if max_sum < self.deal1[idx] + self.deal2[idx]:
max_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " updated round max_sum {0}".format(max_sum)
for idx in range(1, len(self.results)):
if DEBUG:
print " calculating results for player {0}".format(idx)
my_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " round sum {1}".format(idx, my_sum)
if my_sum == max_sum:
self.results[idx] = CONFIG['WIN']
if DEBUG:
" WIN".format(idx)
else:
self.results[idx] = CONFIG['LOSS']
if DEBUG:
" LOSS".format(idx)
def get_next_card(self):
c = self.deck.pop()
if DEBUG:
print "got next card 0x{0:02x}".format(ord(c))
return c
def play_one_round(self):
if DEBUG:
print "play one round with {0} players".format(len(self.players))
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal1[p.id] = ord(c)
p.cards[0] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 1: {0}".format(self.deal1)
# get wager from each player
for p in self.players:
self.wagers[p.id] = p.get_wager()
if DEBUG:
print " player {0} wagered {1}".format(p.id, self.wagers[p.id])
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal2[p.id] = ord(c)
p.cards[1] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 2: {0}".format(self.deal2)
self.calculate_round_results()
# pay each player
for p in self.players:
if CONFIG['WIN'] == self.results[p.id]:
p.exchange_money(self.wagers[p.id])
p.win()
self.balances[p.id] += self.wagers[p.id]
else:
p.exchange_money(-self.wagers[p.id])
p.loss()
self.balances[p.id] += -self.wagers[p.id]
## For testing ##
def total_magic_page_indices_used(self):
mpiu = set()
for p in self.players:
mpiu.update(p.magic_page_indices_used)
mpiu_l = list(mpiu)
mpiu_l.sort()
return mpiu_l
def check_magic_bytes_usage(self):
mpiu_list = self.total_magic_page_indices_used()
# print " total set of magic_page_indices_used: {0}".format(mpiu_list)
for idx in range(len(mpiu_list) - 3):
i0 = mpiu_list[idx]
i1 = mpiu_list[idx + 1]
i2 = mpiu_list[idx + 2]
i3 = mpiu_list[idx + 3]
if i0 == i1 - 1 and \
i1 == i2 - 1 and \
i2 == i3 - 1:
if DEBUG:
print "#### Warning 4 consecutive magic page indices: {0}, {1}, {2}, {3}.".format(i0, i1, i2, i3)
print "magic total_magic_page_indices_used: {0}".format(mpiu_list)
return [i0, i1, i2, i3]
if DEBUG:
print " Done checking for 4 consecutive magic page indices."
return []
def check_deck(deck_sz=1000):
if DEBUG:
print "find_trigger_deck size {0}".format(deck_sz)
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
# make deck
deck = d.make_new_random_deck(deck_sz)
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
if DEBUG:
print "indices {0}".format(mpiu_list)
return deck
return []
def find_trigger_deck():
deck_sz = 2500
deck = check_deck(deck_sz)
while [] == deck:
deck_sz += 10
deck = check_deck(deck_sz)
if DEBUG:
print "trigger deck len {0}, deck {1}".format(len(deck), deck)
return deck
def test_trigger_deck(deck):
if DEBUG:
print "test_trigger_deck"
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
# if DEBUG:
print "trigger indices {0}".format(mpiu_list)
return True
return False
if __name__ == '__main__':
# deck = find_trigger_deck()
deck = POV_DECK_BYTES
print test_trigger_deck(deck)
|
mit
| -238,666,694,014,413,470
| 31.22
| 117
| 0.542727
| false
| 3.403521
| true
| false
| false
|
bhrzslm/uncertainty-reasoning
|
my_engine/others/GrMPy/lib/GrMPy/Tests/test_BNET_sumproduct.py
|
1
|
3633
|
# Author: Almero Gouws <14366037@sun.ac.za>
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
from sprinkler_data import sprinkler_evidence, sprinkler_probs
"""Import the GrMPy modules"""
import models
import inference
import cpds
def test_bnet_sumproduct():
"""
Testing: SUM-PRODUCT on BNET
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
all_ev = sprinkler_evidence();
all_prob = sprinkler_probs();
count = 0;
errors = 0;
for evidence in all_ev:
"""Execute the max-sum algorithm"""
net.sum_product(evidence)
ans = [1, 1, 1, 1]
marginal = net.marginal_nodes([C])
if evidence[C] is None:
ans[C] = marginal.T[1]
marginal = net.marginal_nodes([S])
if evidence[S] is None:
ans[S] = marginal.T[1]
marginal = net.marginal_nodes([R])
if evidence[R] is None:
ans[R] = marginal.T[1]
marginal = net.marginal_nodes([W])
if evidence[W] is None:
ans[W] = marginal.T[1]
errors = errors + \
np.round(np.sum(np.array(ans) - np.array(all_prob[count])), 3)
count = count + 1
assert errors == 0
|
mit
| -8,610,013,309,501,509,000
| 27.778689
| 79
| 0.517754
| false
| 3.466603
| false
| false
| false
|
kartikshah1/Test
|
courseware/serializers.py
|
1
|
2712
|
"""
Serializers for the courseware API
"""
from rest_framework import serializers
from courseware import models
from video.serializers import VideoSerializer
from quiz.serializers import QuizSerializer
from document.serializers import DocumentSerializer
class AddGroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages', 'course')
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages',)
class ConceptSerializer(serializers.ModelSerializer):
"""
Serializer for Concept
"""
videos = VideoSerializer(many=True)
quizzes = QuizSerializer(many=True)
pages = DocumentSerializer(many=True)
class Meta:
"""
Defining model
"""
model = models.Concept
fields = ('id', 'title', 'description', 'image', 'playlist', 'is_published')
#fields = ('id', 'group', 'title', 'image', 'playlist')
class ConceptDataPlaylistSerializer(serializers.Serializer):
"""
Serializer to create the playlist to send to the concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title not specified')
seen_status = serializers.BooleanField(default=False)
toc = serializers.CharField()
url = serializers.CharField()
class GroupPlaylistSerializer(serializers.Serializer):
"""
Serializer for the playlist of a group_playlist
"""
id = serializers.IntegerField()
title = serializers.CharField()
class ConceptDataSerializer(serializers.Serializer):
"""
Selrializer to send the data required for the
concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title_not_specified')
description = serializers.CharField(default='description_not_provided')
group = serializers.IntegerField(default=0)
group_title = serializers.CharField(default='group_not_spefified')
course = serializers.IntegerField(default=0)
course_title = serializers.CharField(default='course_not_specified')
playlist = ConceptDataPlaylistSerializer(many=True)
current_video = serializers.IntegerField(default=-1)
group_playlist = GroupPlaylistSerializer(many=True)
course_playlist = GroupPlaylistSerializer(many=True)
title_document = DocumentSerializer()
class ConceptHistorySerializer(serializers.ModelSerializer):
"""
Serializer for ConceptHistory
"""
class Meta:
"""
Defining model
"""
model = models.ConceptHistory
class AddQuizSerializer(serializers.Serializer):
title = serializers.CharField(max_length=models.SHORT_TEXT)
|
mit
| -2,709,606,381,594,734,600
| 28.16129
| 84
| 0.707965
| false
| 4.535117
| false
| false
| false
|
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201811/reconciliation_line_item_report_service/get_reconciliation_line_item_reports_for_reconciliation_report.py
|
1
|
3086
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all line item reports for a given reconciliation report.
To determine how many reconciliation reports exist,
run get_all_reconciliation_reports.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the reconciliation report to query.
RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE'
def main(client):
# Initialize appropriate service.
reconciliation_line_item_report_service = (client.GetService(
'ReconciliationLineItemReportService', version='v201811'))
# Create a statement to select reconciliation line item reports.
statement = (ad_manager.StatementBuilder(version='v201811')
.Where(('reconciliationReportId = :reconciliationReportId AND '
'lineItemId != :lineItemId'))
.OrderBy('lineItemId', ascending=True)
.WithBindVariable('reconciliationReportId',
RECONCILIATION_REPORT_ID)
.WithBindVariable('lineItemId', 0))
# Retrieve a small amount of reconciliation line item reports at a time,
# paging through until all reconciliation line item reports have been
# retrieved.
result_set_size = 0
should_continue = True
while should_continue:
page = (reconciliation_line_item_report_service
.getReconciliationLineItemReportsByStatement(
statement.ToStatement()))
if 'results' in page and len(page['results']):
result_set_size += page['totalResultSetSize']
# Iterate over individual results in the page.
for line_item_report in page['results']:
print ('Reconciliation line item report with ID %d, line item ID %d, '
'reconciliation source "%s", and reconciled volume %d was '
'found.' % (line_item_report['id'],
line_item_report['lineItemId'],
line_item_report['reconciliationSource'],
(line_item_report['reconciledVolume']
if 'reconciledVolume' in line_item_report else 0)))
# Update statement for next page.
statement.offset += statement.limit
should_continue = statement.offset < result_set_size
print 'Number of results found: %d' % result_set_size
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
apache-2.0
| 3,541,868,690,910,024,000
| 40.146667
| 79
| 0.680817
| false
| 4.192935
| false
| false
| false
|
elewis33/doorstop
|
doorstop/core/test/test_builder.py
|
1
|
2041
|
"""Unit tests for the doorstop.core.builder module."""
import unittest
from unittest.mock import patch, Mock
from doorstop.core.tree import Tree
from doorstop.core.builder import build, find_document, find_item, _clear_tree
from doorstop.core.test import FILES, EMPTY
from doorstop.core.test import MockDocumentSkip, MockDocumentNoSkip
class TestModule(unittest.TestCase):
"""Unit tests for the doorstop.core.builder module."""
@patch('doorstop.core.vcs.find_root', Mock(return_value=EMPTY))
def test_run_empty(self):
"""Verify an empty directory is an empty hierarchy."""
tree = build(EMPTY)
self.assertEqual(0, len(tree))
@patch('doorstop.core.document.Document', MockDocumentNoSkip)
@patch('doorstop.core.vcs.find_root', Mock(return_value=FILES))
def test_build(self):
"""Verify a tree can be built."""
tree = build(FILES)
self.assertEqual(4, len(tree))
@patch('doorstop.core.document.Document', MockDocumentSkip)
@patch('doorstop.core.vcs.find_root', Mock(return_value=FILES))
def test_build_with_skips(self):
"""Verify documents can be skipped while building a tree."""
tree = build(FILES)
self.assertEqual(0, len(tree))
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_document')
def test_find_document(self, mock_find_document): # pylint: disable=R0201
"""Verify documents can be found using a convenience function."""
_clear_tree()
prefix = 'req'
find_document(prefix)
mock_find_document.assert_called_once_with(prefix)
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_item')
def test_find_item(self, mock_find_item): # pylint: disable=R0201
"""Verify items can be found using a convenience function."""
_clear_tree()
uid = 'req1'
find_item(uid)
mock_find_item.assert_called_once_with(uid)
|
lgpl-3.0
| -8,596,280,262,397,212,000
| 37.509434
| 78
| 0.677119
| false
| 3.580702
| true
| false
| false
|
juliakreutzer/bandit-neuralmonkey
|
neuralmonkey/model/stateful.py
|
1
|
3523
|
"""Module that provides classes that encapsulate model parts with states.
There are three classes: `Stateful`, `TemporalStateful`, and `SpatialStateful`.
Model parts that do not keep states in time but have a single tensor on the
output should be instances of `Stateful`. Model parts that keep their hidden
states in a time-oriented list (e.g. recurrent encoder) should be instances
of `TemporalStateful`. Model parts that keep the states in a 2D matrix (e.g.
image encoders) should be instances of `SpatialStateful`.
There are also classes that inherit from both stateful and temporal or spatial
stateful (e.g. `TemporalStatefulWithOutput`) that can be used for model parts
that satisfy more requirements (e.g. recurrent encoder).
"""
from abc import ABCMeta, abstractproperty
import tensorflow as tf
# pylint: disable=too-few-public-methods
# pydocstyle: disable=
class Stateful(metaclass=ABCMeta):
@abstractproperty
def output(self) -> tf.Tensor:
"""Return the object output.
A 2D `Tensor` of shape (batch, state_size) which contains the
resulting state of the object.
"""
raise NotImplementedError("Abstract property")
# pylint: enable=too-few-public-methods
class TemporalStateful(metaclass=ABCMeta):
@abstractproperty
def temporal_states(self) -> tf.Tensor:
"""Return object states in time.
A 3D `Tensor` of shape (batch, time, state_size) which contains the
states of the object in time (e.g. hidden states of a recurrent
encoder.
"""
raise NotImplementedError("Abstract property")
@abstractproperty
def temporal_mask(self) -> tf.Tensor:
"""Return mask for the temporal_states.
A 2D `Tensor` of shape (batch, time) of type float32 which masks the
temporal states so each sequence can have a different length. It should
only contain ones or zeros.
"""
raise NotImplementedError("Abstract property")
@property
def lengths(self) -> tf.Tensor:
"""Return the sequence lengths.
A 1D `Tensor` of type `int32` that stores the lengths of the
state sequences in the batch.
"""
return tf.to_int32(tf.reduce_sum(self.temporal_mask, 1))
@property
def dimension(self) -> int:
"""Return the dimension of the states."""
return self.temporal_states.get_shape()[-1].value
class SpatialStateful(metaclass=ABCMeta):
@property
def spatial_states(self) -> tf.Tensor:
"""Return object states in space.
A 4D `Tensor` of shape (batch, width, height, state_size) which
contains the states of the object in space (e.g. final layer of a
convolution network processing an image.
"""
raise NotImplementedError("Abstract property")
@abstractproperty
def spatial_mask(self) -> tf.Tensor:
"""Return mask for the spatial_states.
A 3D `Tensor` of shape (batch, width, height) of type float32
which masks the spatial states that they can be of different shapes.
The mask should only contain ones or zeros.
"""
raise NotImplementedError("Abstract property")
@property
def dimension(self) -> int:
"""Return the dimension of the states."""
return self.spatial_states.get_shape()[-1].value
# pylint: disable=abstract-method
class TemporalStatefulWithOutput(Stateful, TemporalStateful):
pass
class SpatialStatefulWithOutput(Stateful, SpatialStateful):
pass
|
bsd-3-clause
| -9,093,537,448,114,825,000
| 33.539216
| 79
| 0.68805
| false
| 4.229292
| false
| false
| false
|
crevetor/vcrwrapper
|
vcrwrapper/vcrutils.py
|
1
|
2982
|
from contextlib import contextmanager
import inspect
import json
import vcr
def json_query_matcher(r1, r2):
"""
Match two queries by decoding json-encoded query args and comparing them
"""
if len(r1.query) != len(r2.query):
return False
for i,q in enumerate(r1.query):
if q[0] != r2.query[i][0]:
return False
try:
j1 = json.loads(q[1])
j2 = json.loads(r2.query[i][1])
if j1 != j2:
return False
except ValueError:
# If we were unable to decode json just compare the values normally
if q[1] != r2.query[i][1]:
return False
return True
def get_vcr(*args, **kwargs):
"""Return a VCR, with our custom matchers registered.
Params are passed to VCR init."""
v = vcr.VCR(*args, **kwargs)
# register custom matchers here
v.register_matcher('json_query', json_query_matcher)
return v
def get_filename_from_method(func, receiver):
"""Return an unambigious filename built from a test method invocation.
The method is assumed to be declared inside venmo_tests.
:attr func: the method's function object.
:attr receiver: the first argument to the method, i.e. self or cls.
"""
mod_name = func.__module__
if inspect.isclass(receiver):
class_name = receiver.__name__
else:
class_name = receiver.__class__.__name__
return "%s.%s.%s.yaml" % (mod_name, class_name, func.__name__)
def _get_subcassette_filename(name, parent_filename):
"""Return a cassette namespaced by a parent cassette filename.
For example::
>>> _get_subcassette_filename('foo', 'mytests.test_bar.yaml')
'mytests.test_bar.foo.yaml'
"""
parent_components = parent_filename.split('.')
parent_components.insert(len(parent_components) - 1, name)
return '.'.join(parent_components)
def get_namespace_cm(my_vcr, parent_filename, make_external_requests):
"""Return a context manager that uses a cassette namespaced under the parent.
The context manager takes two arguments:
* name: a string that names the cassette.
* match_on: (optional), passed to use_cassette to override the default.
"""
@contextmanager
def namespace_cm(name, match_on=None,
my_vr=my_vcr, parent_filename=parent_filename,
make_external_requests=make_external_requests):
if make_external_requests:
yield
else:
kwargs = {
'path': _get_subcassette_filename(name, parent_filename),
'match_on': match_on
}
if match_on is None:
# vcr doesn't use a sentinel for match_on;
# it just shouldn't be present to default it.
del kwargs['match_on']
with my_vcr.use_cassette(**kwargs):
yield
return namespace_cm
|
gpl-2.0
| 6,299,087,452,579,828,000
| 28.524752
| 81
| 0.597586
| false
| 3.872727
| true
| false
| false
|
flavour/eden
|
modules/s3/codecs/xls.py
|
1
|
49164
|
# -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3XLS",
)
from gluon import HTTP, current
from gluon.contenttype import contenttype
from gluon.storage import Storage
from s3compat import INTEGER_TYPES, BytesIO, xrange
from ..s3codec import S3Codec
from ..s3utils import s3_str, s3_strip_markup, s3_unicode, s3_get_foreign_key
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# The xlwt library supports a maximum of 182 characters in a single cell
MAX_CELL_SIZE = 182
# Customizable styles
COL_WIDTH_MULTIPLIER = 310
# Python xlwt Colours
# https://docs.google.com/spreadsheets/d/1ihNaZcUh7961yU7db1-Db0lbws4NT24B7koY8v8GHNQ/pubhtml?gid=1072579560&single=true
LARGE_HEADER_COLOUR = 0x2C # pale_blue
HEADER_COLOUR = 0x2C # pale_blue
SUB_HEADER_COLOUR = 0x18 # periwinkle
SUB_TOTALS_COLOUR = 0x96
TOTALS_COLOUR = 0x00
ROW_ALTERNATING_COLOURS = [0x2A, # light_green
0x2B, # light_yellow
]
ERROR = Storage(
XLRD_ERROR = "XLS export requires python-xlrd module to be installed on server",
XLWT_ERROR = "XLS export requires python-xlwt module to be installed on server",
)
# -------------------------------------------------------------------------
def extract(self, resource, list_fields):
"""
Extract the rows from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = dict(current.request.vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(query)
if orderby is None:
orderby = resource.get_config("orderby")
# Hierarchical FK Expansion:
# setting = {field_selector: [LevelLabel, LevelLabel, ...]}
expand_hierarchy = resource.get_config("xls_expand_hierarchy")
data = resource.select(list_fields,
left = left,
limit = None,
count = True,
getids = True,
orderby = orderby,
represent = True,
show_links = False,
raw_data = True if expand_hierarchy else False,
)
rfields = data.rfields
rows = data.rows
types = []
lfields = []
heading = {}
for rfield in rfields:
if rfield.show:
if expand_hierarchy:
levels = expand_hierarchy.get(rfield.selector)
else:
levels = None
if levels:
num_levels = len(levels)
colnames = self.expand_hierarchy(rfield, num_levels, rows)
lfields.extend(colnames)
types.extend(["string"] * num_levels)
T = current.T
for i, colname in enumerate(colnames):
heading[colname] = T(levels[i])
else:
lfields.append(rfield.colname)
heading[rfield.colname] = rfield.label or \
rfield.field.name.capitalize().replace("_", " ")
if rfield.ftype == "virtual":
types.append("string")
else:
types.append(rfield.ftype)
return (title, types, lfields, heading, rows)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param resource: the source of the data that is to be encoded
as a spreadsheet, can be either of:
1) an S3Resource
2) an array of value dicts (dict of
column labels as first item, list of
field types as second item)
3) a dict like:
{columns: [key, ...],
headers: {key: label},
types: {key: type},
rows: [{key:value}],
}
@param attr: keyword arguments (see below)
@keyword as_stream: return the buffer (BytesIO) rather than
its contents (str), useful when the output
is supposed to be stored locally
@keyword title: the main title of the report
@keyword list_fields: fields to include in list views
@keyword report_groupby: used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in
the heading
@keyword use_colour: True to add colour to the cells, default False
@keyword evenodd: render different background colours
for even/odd rows ("stripes")
"""
# Do not redirect from here!
# ...but raise proper status code, which can be caught by caller
try:
import xlwt
except ImportError:
error = self.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
error = self.ERROR.XLRD_ERROR
current.log.error(error)
raise HTTP(503, body=error)
import datetime
MAX_CELL_SIZE = self.MAX_CELL_SIZE
COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER
# Get the attributes
title = attr.get("title")
if title is None:
title = current.T("Report")
list_fields = attr.get("list_fields")
group = attr.get("dt_group")
use_colour = attr.get("use_colour", False)
evenodd = attr.get("evenodd", True)
# Extract the data from the resource
if isinstance(resource, dict):
headers = resource.get("headers", {})
lfields = resource.get("columns", list_fields)
column_types = resource.get("types")
types = [column_types[col] for col in lfields]
rows = resource.get("rows")
elif isinstance(resource, (list, tuple)):
headers = resource[0]
types = resource[1]
rows = resource[2:]
lfields = list_fields
else:
if not list_fields:
list_fields = resource.list_fields()
(title, types, lfields, headers, rows) = self.extract(resource,
list_fields,
)
# Verify columns in items
request = current.request
if len(rows) > 0 and len(lfields) > len(rows[0]):
msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
current.log.error(msg)
# Grouping
report_groupby = lfields[group] if group else None
groupby_label = headers[report_groupby] if report_groupby else None
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = settings.get_L10n_date_format()
date_format_str = str(date_format)
dt_format_translate = self.dt_format_translate
date_format = dt_format_translate(date_format)
time_format = dt_format_translate(settings.get_L10n_time_format())
datetime_format = dt_format_translate(settings.get_L10n_datetime_format())
title_row = settings.get_xls_title_row()
# Get styles
styles = self._styles(use_colour = use_colour,
evenodd = evenodd,
datetime_format = datetime_format,
)
# Create the workbook
book = xlwt.Workbook(encoding="utf-8")
# Add sheets
sheets = []
# XLS exports are limited to 65536 rows per sheet, we bypass
# this by creating multiple sheets
row_limit = 65536
sheetnum = len(rows) / row_limit
# Can't have a / in the sheet_name, so replace any with a space
sheet_name = str(title.replace("/", " "))
if len(sheet_name) > 31:
# Sheet name cannot be over 31 chars
# (take sheet number suffix into account)
sheet_name = sheet_name[:31] if sheetnum == 1 else sheet_name[:28]
count = 1
while len(sheets) <= sheetnum:
sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
count += 1
if callable(title_row):
# Calling with sheet None to get the number of title rows
title_row_length = title_row(None)
else:
title_row_length = 2
# Add header row to all sheets, determine columns widths
header_style = styles["header"]
for sheet in sheets:
# Move this down if a title row will be added
if title_row:
header_row = sheet.row(title_row_length)
else:
header_row = sheet.row(0)
column_widths = []
has_id = False
col_index = 0
for selector in lfields:
if selector == report_groupby:
continue
label = headers[selector]
if label == "Id":
# Indicate to adjust col_index when writing out
has_id = True
column_widths.append(0)
col_index += 1
continue
if label == "Sort":
continue
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
header_row.write(write_col_index, str(label), header_style)
width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
width = min(width, 65535) # USHRT_MAX
column_widths.append(width)
sheet.col(write_col_index).width = width
col_index += 1
title = s3_str(title)
# Title row (optional, deployment setting)
if title_row:
T = current.T
large_header_style = styles["large_header"]
notes_style = styles["notes"]
for sheet in sheets:
if callable(title_row):
# Custom title rows
title_row(sheet)
else:
# First row => Title (standard = "title_list" CRUD string)
current_row = sheet.row(0)
if col_index > 0:
sheet.write_merge(0, 0, 0, col_index,
title,
large_header_style,
)
current_row.height = 500
# Second row => Export date/time
current_row = sheet.row(1)
current_row.write(0, "%s:" % T("Date Exported"), notes_style)
current_row.write(1, request.now, notes_style)
# Fix the size of the last column to display the date
if 16 * COL_WIDTH_MULTIPLIER > width:
sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER
# Initialize counters
total_cols = col_index
# Move the rows down if a title row is included
if title_row:
row_index = title_row_length
else:
row_index = 0
# Helper function to get the current row
def get_current_row(row_count, row_limit):
sheet_count = int(row_count / row_limit)
row_number = row_count - (sheet_count * row_limit)
if sheet_count > 0:
row_number += 1
return sheets[sheet_count], sheets[sheet_count].row(row_number)
# Write the table contents
subheading = None
odd_style = styles["odd"]
even_style = styles["even"]
subheader_style = styles["subheader"]
for row in rows:
# Current row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
# Group headers
if report_groupby:
represent = s3_strip_markup(s3_unicode(row[report_groupby]))
if subheading != represent:
# Start of new group - write group header
subheading = represent
current_sheet.write_merge(row_index, row_index, 0, total_cols,
subheading,
subheader_style,
)
# Move on to next row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
col_index = 0
remaining_fields = lfields
# Custom row style?
row_style = None
if "_style" in row:
stylename = row["_style"]
if stylename in styles:
row_style = styles[stylename]
# Group header/footer row?
if "_group" in row:
group_info = row["_group"]
label = group_info.get("label")
totals = group_info.get("totals")
if label:
label = s3_strip_markup(s3_unicode(label))
style = row_style or subheader_style
span = group_info.get("span")
if span == 0:
current_sheet.write_merge(row_index,
row_index,
0,
total_cols - 1,
label,
style,
)
if totals:
# Write totals into the next row
row_index += 1
current_sheet, current_row = \
get_current_row(row_index, row_limit)
else:
current_sheet.write_merge(row_index,
row_index,
0,
span - 1,
label,
style,
)
col_index = span
remaining_fields = lfields[span:]
if not totals:
continue
for field in remaining_fields:
label = headers[field]
if label == groupby_label:
continue
if label == "Id":
# Skip the ID column from XLS exports
col_index += 1
continue
if field not in row:
represent = ""
else:
represent = s3_strip_markup(s3_unicode(row[field]))
coltype = types[col_index]
if coltype == "sort":
continue
if len(represent) > MAX_CELL_SIZE:
represent = represent[:MAX_CELL_SIZE]
value = represent
if coltype == "date":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
current_row.write(write_col_index, value, style)
width = len(represent) * COL_WIDTH_MULTIPLIER
if width > column_widths[col_index]:
column_widths[col_index] = width
current_sheet.col(write_col_index).width = width
col_index += 1
# Additional sheet settings
for sheet in sheets:
sheet.panes_frozen = True
sheet.horz_split_pos = 1
# Write output
output = BytesIO()
book.save(output)
output.seek(0)
if attr.get("as_stream", False):
return output
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, title)
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def expand_hierarchy(rfield, num_levels, rows):
"""
Expand a hierarchical foreign key column into one column
per hierarchy level
@param rfield: the column (S3ResourceField)
@param num_levels: the number of levels (from root)
@param rows: the Rows from S3ResourceData
@returns: list of keys (column names) for the inserted columns
"""
field = rfield.field
if not field or rfield.ftype[:9] != "reference":
return []
# Get the look-up table
ktablename = s3_get_foreign_key(field, m2m=False)[0]
if not ktablename:
return []
colname = rfield.colname
represent = field.represent
# Get the hierarchy
from ..s3hierarchy import S3Hierarchy
h = S3Hierarchy(ktablename)
if not h.config:
return []
# Collect the values from rows
values = set()
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
values.add(value)
# Generate the expanded values
expanded = h.repr_expand(values,
levels = num_levels,
represent = represent,
)
# ...and add them into the rows
colnames = ["%s__%s" % (colname, l) for l in range(num_levels)]
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
hcols = expanded.get(value)
for level in range(num_levels):
row[colnames[level]] = hcols[level] if hcols else None
return colnames
# -------------------------------------------------------------------------
@staticmethod
def encode_pt(pt, title):
"""
Encode a S3PivotTable as XLS sheet
@param pt: the S3PivotTable
@param title: the title for the report
@returns: the XLS file as stream
"""
output = BytesIO()
book = S3PivotTableXLS(pt).encode(title)
book.save(output)
output.seek(0)
return output
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
}
PERCENT = "__percent__"
xlfmt = str(pyfmt).replace("%%", PERCENT)
for tag, translation in translate.items():
xlfmt = xlfmt.replace(tag, translation)
return xlfmt.replace(PERCENT, "%")
# -------------------------------------------------------------------------
@classmethod
def _styles(cls,
use_colour=False,
evenodd=True,
datetime_format=None,
):
"""
XLS encoder standard cell styles
@param use_colour: use background colour in cells
@param evenodd: render different background colours
for even/odd rows ("stripes")
@param datetime_format: the date/time format
"""
import xlwt
if datetime_format is None:
# Support easier usage from external functions
datetime_format = cls.dt_format_translate(current.deployment_settings.get_L10n_datetime_format())
# Styles
large_header = xlwt.XFStyle()
large_header.font.bold = True
large_header.font.height = 400
if use_colour:
SOLID_PATTERN = large_header.pattern.SOLID_PATTERN
large_header.alignment.horz = large_header.alignment.HORZ_CENTER
large_header.pattern.pattern = SOLID_PATTERN
large_header.pattern.pattern_fore_colour = cls.LARGE_HEADER_COLOUR
notes = xlwt.XFStyle()
notes.font.italic = True
notes.font.height = 160 # 160 Twips = 8 point
notes.num_format_str = datetime_format
header = xlwt.XFStyle()
header.font.bold = True
header.num_format_str = datetime_format
if use_colour:
header.pattern.pattern = SOLID_PATTERN
header.pattern.pattern_fore_colour = cls.HEADER_COLOUR
subheader = xlwt.XFStyle()
subheader.font.bold = True
if use_colour:
subheader.pattern.pattern = SOLID_PATTERN
subheader.pattern.pattern_fore_colour = cls.SUB_HEADER_COLOUR
subtotals = xlwt.XFStyle()
subtotals.font.bold = True
if use_colour:
subtotals.pattern.pattern = SOLID_PATTERN
subtotals.pattern.pattern_fore_colour = cls.SUB_TOTALS_COLOUR
totals = xlwt.XFStyle()
totals.font.bold = True
if use_colour:
totals.pattern.pattern = SOLID_PATTERN
totals.pattern.pattern_fore_colour = cls.TOTALS_COLOUR
odd = xlwt.XFStyle()
if use_colour and evenodd:
odd.pattern.pattern = SOLID_PATTERN
odd.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[0]
even = xlwt.XFStyle()
if use_colour and evenodd:
even.pattern.pattern = SOLID_PATTERN
even.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[1]
return {"large_header": large_header,
"notes": notes,
"header": header,
"subheader": subheader,
"subtotals": subtotals,
"totals": totals,
"odd": odd,
"even": even,
}
# =============================================================================
class S3PivotTableXLS(object):
"""
XLS encoder for S3PivotTables
@todo: merge+DRY with S3XLS?
@todo: support multiple layers (=write multiple sheets)
@todo: handle huge pivot tables (=exceeding XLS rows/cols limits)
"""
def __init__(self, pt):
"""
Constructor
@param pt: the S3PivotTable to encode
"""
self.pt = pt
# Initialize properties
self._styles = None
self._formats = None
self.lookup = {}
self.valuemap = {}
# -------------------------------------------------------------------------
def encode(self, title):
"""
Convert this pivot table into an XLS file
@param title: the title of the report
@returns: the XLS workbook
"""
try:
import xlwt
except ImportError:
error = S3XLS.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
T = current.T
TOTAL = s3_str(s3_unicode(T("Total")).upper())
pt = self.pt
# Get report options
report_options = pt.resource.get_config("report_options", {})
# Report dimensions
fact = pt.facts[0]
layer = fact.layer
rows_dim = pt.rows
cols_dim = pt.cols
numrows = pt.numrows
numcols = pt.numcols
# Resource fields for dimensions
rfields = pt.rfields
fact_rfield = rfields[fact.selector]
rows_rfield = rfields[rows_dim] if rows_dim else None
cols_rfield = rfields[cols_dim] if cols_dim else None
# Dimension labels
get_label = fact._get_field_label
if rows_dim:
# Get row axis label
rows_label = s3_str(get_label(rows_rfield,
report_options.get("rows"),
))
else:
rows_label = ""
if cols_dim:
cols_label = s3_str(get_label(cols_rfield,
report_options.get("cols"),
))
else:
cols_label = ""
fact_label = s3_str(fact.get_label(fact_rfield,
report_options.get("fact"),
))
# Index of the column for row totals
total_column = (numcols + 1) if cols_dim else 1
# Sort+represent rows and columns
rows, cols = self.sortrepr()
# Create workbook and sheet
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(s3_str(title))
write = self.write
# Write header
title_row = current.deployment_settings.get_xls_title_row()
if callable(title_row):
# Custom header (returns number of header rows)
title_length = title_row(sheet)
elif title_row:
# Default header
title_length = 2
# Report title
write(sheet, 0, 0, s3_str(title),
colspan = numcols + 2,
style = "title",
)
# Current date/time (in local timezone)
from ..s3datetime import S3DateTime
dt = S3DateTime.to_local(current.request.utcnow)
write(sheet, 1, 0, dt, style = "subheader", numfmt = "datetime")
else:
# No header
title_length = -1
rowindex = title_length + 1
# Fact label
if rows_dim and cols_dim:
write(sheet, rowindex, 0, fact_label, style="fact_label")
# Columns axis title
if cols_dim:
write(sheet, rowindex, 1, cols_label,
colspan = numcols,
style = "axis_title",
)
rowindex += 1
# Row axis title
write(sheet, rowindex, 0, rows_label, style="axis_title")
# Column labels
if cols_dim:
for idx, col in enumerate(cols):
write(sheet, rowindex, idx + 1, col[2]["text"],
style = "col_label",
)
total_label = TOTAL
else:
# Use fact title as row total label if there is no column axis
total_label = fact_label
# Row totals label
write(sheet, rowindex, total_column, total_label, style="total_right")
# Determine the number format for cell values
numfmt = self.number_format()
totfmt = "integer" if fact.method in ("count", "list") else numfmt
# Choose cell value style according to number format
fact_style = "numeric" if numfmt else None
# Get fact representation method
if fact.method == "list":
listrepr = self.listrepr
fk, fact_repr = pt._represents([layer])[fact.selector]
else:
listrepr = fk = fact_repr = None
# Write data rows (if any)
rowindex += 1
if rows_dim:
icell = pt.cell
for i in xrange(numrows):
row = rows[i]
# Row-label
write(sheet, rowindex + i, 0, row[2]["text"],
style = "row_label",
)
# Cell column values (if any)
if cols_dim:
for j in xrange(numcols):
cell = icell[row[0]][cols[j][0]]
if listrepr:
value = listrepr(cell, fact_rfield, fact_repr, fk=fk)
else:
value = cell[layer]
write(sheet, rowindex + i, j + 1, value,
numfmt = numfmt,
style = fact_style,
)
# Row-total
write(sheet, rowindex + i, total_column, row[1],
style = "total",
numfmt = totfmt,
)
rowindex += numrows
total_label = TOTAL
else:
# Use fact label as column totals label if
# there is no row dimension
total_label = fact_label
# Column totals label
write(sheet, rowindex, 0, total_label, style="total_left")
# Column totals
if cols_dim:
for i in xrange(numcols):
write(sheet, rowindex, i + 1, cols[i][1],
style = "total",
numfmt = totfmt,
)
# Grand total
total = pt.totals[layer]
write(sheet, rowindex, total_column, total,
style = "grand_total",
numfmt = totfmt,
)
return book
# -------------------------------------------------------------------------
def write(self,
sheet,
rowindex,
colindex,
value,
style=None,
numfmt=None,
rowspan=None,
colspan=None,
adjust=True
):
"""
Write a value to a spreadsheet cell
@param sheet: the work sheet
@param rowindex: the row index of the cell
@param colindex: the column index of the cell
@param value: the value to write
@param style: a style name (see styles property)
@param numfmt: a number format name (see formats property)
@param rowspan: number of rows to merge
@param colspan: number of columns to merge
@param adjust: True to adjust column width and row height,
False to suppress automatic adjustment
"""
styles = self.styles
if style:
style = styles.get(style)
if style is None:
style = styles["default"]
# Apply number format
if numfmt:
style.num_format_str = self.formats.get(numfmt, "")
# Get the row
row = sheet.row(rowindex)
if type(value) is list:
labels = [s3_str(v) for v in value]
contents = "\n".join(labels)
else:
labels = [s3_str(value)]
contents = value
# Apply rowspan and colspan
rowspan = 0 if not rowspan or rowspan < 1 else rowspan - 1
colspan = 0 if not colspan or colspan < 1 else colspan - 1
if rowspan > 1 or colspan > 1:
# Write-merge
sheet.write_merge(rowindex, rowindex + rowspan,
colindex, colindex + colspan,
contents,
style,
)
else:
# Just write
row.write(colindex, contents, style)
# Reset number format
style.num_format_str = ""
# Adjust column width and row height
# NB approximations, no exact science (not possible except by
# enforcing a particular fixed-width font, which we don't
# want), so manual adjustments after export may still be
# necessary. Better solutions welcome!
if adjust:
fontsize = float(style.font.height)
# Adjust column width
col = sheet.col(colindex)
if not colspan:
if labels:
width = int(min(max(len(l) for l in labels), 28) *
fontsize * 5.0 / 3.0)
else:
width = 0
if width > col.width:
col.width = width
# Adjust row height
if not rowspan:
lineheight = 1.2 if style.font.bold else 1.0
import math
numlines = 0
width = (col.width * 0.8 * (colspan + 1))
for label in labels:
numlines += math.ceil(len(label) * fontsize / width)
if numlines > 1:
lines = min(numlines, 10)
height = int((lines + 0.8 / lineheight) *
fontsize * lineheight)
else:
height = int(fontsize * lineheight)
if height > row.height:
row.height = height
row.height_mismatch = 1
# -------------------------------------------------------------------------
@property
def styles(self):
"""
Style definitions for pivot tables (lazy property)
@returns: dict of named XFStyle instances
"""
styles = self._styles
if styles is None:
from xlwt import Alignment, XFStyle
# Alignments
center = Alignment()
center.horz = Alignment.HORZ_CENTER
center.vert = Alignment.VERT_CENTER
center.wrap = 1
centerleft = Alignment()
centerleft.horz = Alignment.HORZ_LEFT
centerleft.vert = Alignment.VERT_CENTER
centerleft.wrap = 1
bottomcentered = Alignment()
bottomcentered.horz = Alignment.HORZ_CENTER
bottomcentered.vert = Alignment.VERT_BOTTOM
bottomcentered.wrap = 1
bottomleft = Alignment()
bottomleft.horz = Alignment.HORZ_LEFT
bottomleft.vert = Alignment.VERT_BOTTOM
bottomleft.wrap = 1
bottomright = Alignment()
bottomright.horz = Alignment.HORZ_RIGHT
bottomright.vert = Alignment.VERT_BOTTOM
bottomright.wrap = 1
topleft = Alignment()
topleft.horz = Alignment.HORZ_LEFT
topleft.vert = Alignment.VERT_TOP
topleft.wrap = 1
topright = Alignment()
topright.horz = Alignment.HORZ_RIGHT
topright.vert = Alignment.VERT_TOP
topright.wrap = 1
# Styles
twips = lambda pt: 20 * pt # Points to Twips
def style(fontsize=10, bold=False, italic=False, align=None):
""" XFStyle builder helper """
style = XFStyle()
style.font.height = twips(fontsize)
style.font.bold = bold
style.font.italic = italic
if align is not None:
style.alignment = align
return style
self._styles = styles = {
"default": style(align=topleft),
"numeric": style(align=bottomright),
"title": style(fontsize=14, bold=True, align=bottomleft),
"subheader": style(fontsize=8, italic=True, align=bottomleft),
"row_label": style(bold=True, align=topleft),
"col_label": style(bold=True, align=bottomcentered),
"fact_label": style(fontsize=13, bold=True, align=centerleft),
"axis_title": style(fontsize=11, bold=True, align=center),
"total": style(fontsize=11, bold=True, italic=True, align=topright),
"total_left": style(fontsize=11, bold=True, italic=True, align=topleft),
"total_right": style(fontsize=11, bold=True, italic=True, align=center),
"grand_total": style(fontsize=12, bold=True, italic=True, align=topright),
}
return styles
# -------------------------------------------------------------------------
@property
def formats(self):
"""
Number formats for pivot tables (lazy property)
@returns: dict of format strings
"""
formats = self._formats
if formats is None:
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
translate = S3XLS.dt_format_translate
date_format = translate(settings.get_L10n_date_format())
datetime_format = translate(settings.get_L10n_datetime_format())
time_format = translate(settings.get_L10n_time_format())
formats = {
"date": date_format,
"datetime": datetime_format,
"time": time_format,
"integer": "0",
"double": "0.00"
}
self._formats = formats
return formats
# -------------------------------------------------------------------------
def number_format(self):
"""
Determine the number format for this pivot table
@returns: the number format key (see formats property)
"""
numfmt = None
pt = self.pt
fact = pt.facts[0]
rfield = pt.rfields[fact.selector]
ftype = rfield.ftype
if fact.method == "count":
numfmt = "integer"
elif ftype == "integer":
if fact.method == "avg":
# Average value of ints is a float
numfmt = "double"
else:
numfmt = "integer"
elif ftype in ("date", "datetime", "time", "double"):
numfmt = ftype
elif ftype == "virtual":
# Probe the first value
value = pt.cell[0][0][fact.layer]
if isinstance(value, INTEGER_TYPES):
numfmt = "integer"
elif isinstance(value, float):
numfmt = "double"
else:
import datetime
if isinstance(value, datetime.datetime):
numfmt = "datetime"
elif isinstance(value, datetime.date):
numfmt = "date"
elif isinstance(value, datetime.time):
numfmt = "time"
return numfmt
# -------------------------------------------------------------------------
def sortrepr(self):
"""
Sort and represent pivot table axes
@returns: tuple (rows, cols), each a list of tuples:
(index, ...the index of the row/column in
the original cell array
total, ...total value of the row/column
{value: axis_value, ...group value of the row/column
text: axis_repr, ...representation of the group value
},
)
"""
pt = self.pt
rfields = pt.rfields
layer = pt.facts[0].layer
# Sort rows
rows_dim = pt.rows
rows_rfield = rfields[rows_dim] if rows_dim else None
row_repr = pt._represent_method(rows_dim)
irows = pt.row
rows = []
for i in xrange(pt.numrows):
irow = irows[i]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, irow[layer], header))
pt._sortdim(rows, rows_rfield, index=2)
# Sort columns
cols_dim = pt.cols
cols_rfield = rfields[cols_dim] if cols_dim else None
col_repr = pt._represent_method(cols_dim)
icols = pt.col
cols = []
for i in xrange(pt.numcols):
icol = icols[i]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, icol[layer], header))
pt._sortdim(cols, cols_rfield, index=2)
return rows, cols
# -------------------------------------------------------------------------
def listrepr(self, cell, rfield, represent, fk=True):
"""
Represent and sort a list of cell values (for "list" aggregation
method)
@param cell - the cell data
@param rfield - the fact S3ResourceField
@param represent - representation method for the fact field
@param fk - fact field is a foreign key
@returns: sorted list of represented cell values
"""
pt = self.pt
records = pt.records
colname = rfield.colname
lookup = self.lookup
valuemap = self.valuemap
keys = []
for record_id in cell["records"]:
record = records[record_id]
try:
fvalue = record[colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = represent(v)
else:
if v not in valuemap:
next_id = len(valuemap)
valuemap[v] = next_id
keys.append(next_id)
lookup[next_id] = represent(v)
else:
prev_id = valuemap[v]
if prev_id not in keys:
keys.append(prev_id)
keys.sort(key=lambda i: lookup[i])
items = [s3_str(lookup[key]) for key in keys if key in lookup]
return items
# =============================================================================
#class S3HTML2XLS(object):
# """
# Class that takes HTML in the form of web2py helper objects
# and converts it to XLS
#
# @ToDo: Complete this (e.g. start with a copy of S3html2pdf)
# See https://gist.github.com/JustOnce/2be3e4d951a66c22c5e0
# & http://pydoc.net/Python/Kiowa/0.2w.rc9/kiowa.utils.xls.html2xls/
#
# Places to use this:
# org_CapacityReport()
# """
#
# def __init__(self):
#
# pass
#
# # -------------------------------------------------------------------------
# def parse(self, html):
# """
# Entry point for class
# """
#
# return None
#
# END =========================================================================
|
mit
| -3,162,987,197,995,865,000
| 34.964887
| 124
| 0.467252
| false
| 4.751981
| false
| false
| false
|
denisenkom/django
|
tests/serializers_regress/tests.py
|
1
|
21745
|
"""
A test spanning all the capabilities of all the serializers.
This class defines sample data and a dynamically generated
test case that is capable of testing the capabilities of
the serializers. This includes all valid data values, plus
forward, backwards and self references.
"""
from __future__ import unicode_literals
import datetime
import decimal
from unittest import expectedFailure, skipUnless
try:
import yaml
except ImportError:
yaml = None
from django.core import serializers
from django.core.serializers import SerializerDoesNotExist
from django.core.serializers.base import DeserializationError
from django.core.serializers.xml_serializer import DTDForbidden
from django.db import connection, models
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import curry
from .models import (BinaryData, BooleanData, CharData, DateData, DateTimeData, EmailData,
FileData, FilePathData, DecimalData, FloatData, IntegerData, IPAddressData,
GenericIPAddressData, NullBooleanData, PositiveIntegerData,
PositiveSmallIntegerData, SlugData, SmallData, TextData, TimeData,
GenericData, Anchor, UniqueAnchor, FKData, M2MData, O2OData,
FKSelfData, M2MSelfData, FKDataToField, FKDataToO2O, M2MIntermediateData,
Intermediate, BooleanPKData, CharPKData, EmailPKData, FilePathPKData,
DecimalPKData, FloatPKData, IntegerPKData, IPAddressPKData,
GenericIPAddressPKData, PositiveIntegerPKData,
PositiveSmallIntegerPKData, SlugPKData, SmallPKData,
AutoNowDateTimeData, ModifyingSaveData, InheritAbstractModel, BaseModel,
ExplicitInheritBaseModel, InheritBaseModel, ProxyBaseModel,
ProxyProxyBaseModel, BigIntegerData, LengthModel, Tag, ComplexModel,
NaturalKeyAnchor, FKDataNaturalKey)
# A set of functions that can be used to recreate
# test data objects of various kinds.
# The save method is a raw base model save, to make
# sure that the data in the database matches the
# exact test case.
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return [instance]
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save_base(instance, raw=True)
return [instance]
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
instance.data = data
return [instance]
def im2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
return [instance]
def im_create(pk, klass, data):
instance = klass(id=pk)
instance.right_id = data['right']
instance.left_id = data['left']
if 'extra' in data:
instance.extra = data['extra']
models.Model.save_base(instance, raw=True)
return [instance]
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save_base(instance, raw=True)
return [instance]
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def inherited_create(pk, klass, data):
instance = klass(id=pk,**data)
# This isn't a raw save because:
# 1) we're testing inheritance, not field behavior, so none
# of the field values need to be protected.
# 2) saving the child class and having the parent created
# automatically is easier than manually creating both.
models.Model.save(instance)
created = [instance]
for klass,field in instance._meta.parents.items():
created.append(klass.objects.get(id=pk))
return created
# A set of functions that can be used to compare
# test data objects of various kinds
def data_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
if klass == BinaryData and data is not None:
testcase.assertEqual(bytes(data), bytes(instance.data),
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, repr(bytes(data)), type(data), repr(bytes(instance.data)),
type(instance.data))
)
else:
testcase.assertEqual(data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, data, type(data), instance, type(instance.data))
)
def generic_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data[0], instance.data)
testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')])
def fk_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, instance.data_id)
def m2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')])
def im2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
#actually nothing else to check, the instance just should exist
def im_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data['left'], instance.left_id)
testcase.assertEqual(data['right'], instance.right_id)
if 'extra' in data:
testcase.assertEqual(data['extra'], instance.extra)
else:
testcase.assertEqual("doesn't matter", instance.extra)
def o2o_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data_id)
def pk_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data)
def inherited_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
for key,value in data.items():
testcase.assertEqual(value, getattr(instance,key))
# Define some data types. Each data type is
# actually a pair of functions; one to create
# and one to compare objects of that type
data_obj = (data_create, data_compare)
generic_obj = (generic_create, generic_compare)
fk_obj = (fk_create, fk_compare)
m2m_obj = (m2m_create, m2m_compare)
im2m_obj = (im2m_create, im2m_compare)
im_obj = (im_create, im_compare)
o2o_obj = (o2o_create, o2o_compare)
pk_obj = (pk_create, pk_compare)
inherited_obj = (inherited_create, inherited_compare)
test_data = [
# Format: (data type, PK value, Model Class, data)
(data_obj, 1, BinaryData, six.memoryview(b"\x05\xFD\x00")),
(data_obj, 2, BinaryData, None),
(data_obj, 5, BooleanData, True),
(data_obj, 6, BooleanData, False),
(data_obj, 10, CharData, "Test Char Data"),
(data_obj, 11, CharData, ""),
(data_obj, 12, CharData, "None"),
(data_obj, 13, CharData, "null"),
(data_obj, 14, CharData, "NULL"),
(data_obj, 15, CharData, None),
# (We use something that will fit into a latin1 database encoding here,
# because that is still the default used on many system setups.)
(data_obj, 16, CharData, '\xa5'),
(data_obj, 20, DateData, datetime.date(2006,6,16)),
(data_obj, 21, DateData, None),
(data_obj, 30, DateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 31, DateTimeData, None),
(data_obj, 40, EmailData, "hovercraft@example.com"),
(data_obj, 41, EmailData, None),
(data_obj, 42, EmailData, ""),
(data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'),
# (data_obj, 51, FileData, None),
(data_obj, 52, FileData, ""),
(data_obj, 60, FilePathData, "/foo/bar/whiz.txt"),
(data_obj, 61, FilePathData, None),
(data_obj, 62, FilePathData, ""),
(data_obj, 70, DecimalData, decimal.Decimal('12.345')),
(data_obj, 71, DecimalData, decimal.Decimal('-12.345')),
(data_obj, 72, DecimalData, decimal.Decimal('0.0')),
(data_obj, 73, DecimalData, None),
(data_obj, 74, FloatData, 12.345),
(data_obj, 75, FloatData, -12.345),
(data_obj, 76, FloatData, 0.0),
(data_obj, 77, FloatData, None),
(data_obj, 80, IntegerData, 123456789),
(data_obj, 81, IntegerData, -123456789),
(data_obj, 82, IntegerData, 0),
(data_obj, 83, IntegerData, None),
#(XX, ImageData
(data_obj, 90, IPAddressData, "127.0.0.1"),
(data_obj, 91, IPAddressData, None),
(data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
(data_obj, 96, GenericIPAddressData, None),
(data_obj, 100, NullBooleanData, True),
(data_obj, 101, NullBooleanData, False),
(data_obj, 102, NullBooleanData, None),
(data_obj, 120, PositiveIntegerData, 123456789),
(data_obj, 121, PositiveIntegerData, None),
(data_obj, 130, PositiveSmallIntegerData, 12),
(data_obj, 131, PositiveSmallIntegerData, None),
(data_obj, 140, SlugData, "this-is-a-slug"),
(data_obj, 141, SlugData, None),
(data_obj, 142, SlugData, ""),
(data_obj, 150, SmallData, 12),
(data_obj, 151, SmallData, -12),
(data_obj, 152, SmallData, 0),
(data_obj, 153, SmallData, None),
(data_obj, 160, TextData, """This is a long piece of text.
It contains line breaks.
Several of them.
The end."""),
(data_obj, 161, TextData, ""),
(data_obj, 162, TextData, None),
(data_obj, 170, TimeData, datetime.time(10,42,37)),
(data_obj, 171, TimeData, None),
(generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']),
(generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']),
(data_obj, 300, Anchor, "Anchor 1"),
(data_obj, 301, Anchor, "Anchor 2"),
(data_obj, 302, UniqueAnchor, "UAnchor 1"),
(fk_obj, 400, FKData, 300), # Post reference
(fk_obj, 401, FKData, 500), # Pre reference
(fk_obj, 402, FKData, None), # Empty reference
(m2m_obj, 410, M2MData, []), # Empty set
(m2m_obj, 411, M2MData, [300,301]), # Post reference
(m2m_obj, 412, M2MData, [500,501]), # Pre reference
(m2m_obj, 413, M2MData, [300,301,500,501]), # Pre and Post reference
(o2o_obj, None, O2OData, 300), # Post reference
(o2o_obj, None, O2OData, 500), # Pre reference
(fk_obj, 430, FKSelfData, 431), # Pre reference
(fk_obj, 431, FKSelfData, 430), # Post reference
(fk_obj, 432, FKSelfData, None), # Empty reference
(m2m_obj, 440, M2MSelfData, []),
(m2m_obj, 441, M2MSelfData, []),
(m2m_obj, 442, M2MSelfData, [440, 441]),
(m2m_obj, 443, M2MSelfData, [445, 446]),
(m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]),
(m2m_obj, 445, M2MSelfData, []),
(m2m_obj, 446, M2MSelfData, []),
(fk_obj, 450, FKDataToField, "UAnchor 1"),
(fk_obj, 451, FKDataToField, "UAnchor 2"),
(fk_obj, 452, FKDataToField, None),
(fk_obj, 460, FKDataToO2O, 300),
(im2m_obj, 470, M2MIntermediateData, None),
#testing post- and prereferences and extra fields
(im_obj, 480, Intermediate, {'right': 300, 'left': 470}),
(im_obj, 481, Intermediate, {'right': 300, 'left': 490}),
(im_obj, 482, Intermediate, {'right': 500, 'left': 470}),
(im_obj, 483, Intermediate, {'right': 500, 'left': 490}),
(im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}),
(im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}),
(im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}),
(im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}),
(im2m_obj, 490, M2MIntermediateData, []),
(data_obj, 500, Anchor, "Anchor 3"),
(data_obj, 501, Anchor, "Anchor 4"),
(data_obj, 502, UniqueAnchor, "UAnchor 2"),
(pk_obj, 601, BooleanPKData, True),
(pk_obj, 602, BooleanPKData, False),
(pk_obj, 610, CharPKData, "Test Char PKData"),
# (pk_obj, 620, DatePKData, datetime.date(2006,6,16)),
# (pk_obj, 630, DateTimePKData, datetime.datetime(2006,6,16,10,42,37)),
(pk_obj, 640, EmailPKData, "hovercraft@example.com"),
# (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'),
(pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"),
(pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')),
(pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')),
(pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')),
(pk_obj, 673, FloatPKData, 12.345),
(pk_obj, 674, FloatPKData, -12.345),
(pk_obj, 675, FloatPKData, 0.0),
(pk_obj, 680, IntegerPKData, 123456789),
(pk_obj, 681, IntegerPKData, -123456789),
(pk_obj, 682, IntegerPKData, 0),
# (XX, ImagePKData
(pk_obj, 690, IPAddressPKData, "127.0.0.1"),
(pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
# (pk_obj, 700, NullBooleanPKData, True),
# (pk_obj, 701, NullBooleanPKData, False),
(pk_obj, 720, PositiveIntegerPKData, 123456789),
(pk_obj, 730, PositiveSmallIntegerPKData, 12),
(pk_obj, 740, SlugPKData, "this-is-a-slug"),
(pk_obj, 750, SmallPKData, 12),
(pk_obj, 751, SmallPKData, -12),
(pk_obj, 752, SmallPKData, 0),
# (pk_obj, 760, TextPKData, """This is a long piece of text.
# It contains line breaks.
# Several of them.
# The end."""),
# (pk_obj, 770, TimePKData, datetime.time(10,42,37)),
# (pk_obj, 790, XMLPKData, "<foo></foo>"),
(data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 810, ModifyingSaveData, 42),
(inherited_obj, 900, InheritAbstractModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 910, ExplicitInheritBaseModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 920, InheritBaseModel, {'child_data':37,'parent_data':42}),
(data_obj, 1000, BigIntegerData, 9223372036854775807),
(data_obj, 1001, BigIntegerData, -9223372036854775808),
(data_obj, 1002, BigIntegerData, 0),
(data_obj, 1003, BigIntegerData, None),
(data_obj, 1004, LengthModel, 0),
(data_obj, 1005, LengthModel, 1),
]
natural_key_test_data = [
(data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"),
(fk_obj, 1101, FKDataNaturalKey, 1100),
(fk_obj, 1102, FKDataNaturalKey, None),
]
# Because Oracle treats the empty string as NULL, Oracle is expected to fail
# when field.empty_strings_allowed is True and the value is None; skip these
# tests.
if connection.features.interprets_empty_strings_as_nulls:
test_data = [data for data in test_data
if not (data[0] == data_obj and
data[2]._meta.get_field('data').empty_strings_allowed and
data[3] is None)]
# Regression test for #8651 -- a FK to an object iwth PK of 0
# This won't work on MySQL since it won't let you create an object
# with a primary key of 0,
if connection.features.allows_primary_key_0:
test_data.extend([
(data_obj, 0, Anchor, "Anchor 0"),
(fk_obj, 465, FKData, 0),
])
# Dynamically create serializer tests to ensure that all
# registered serializers are automatically tested.
class SerializerTests(TestCase):
def test_get_unknown_serializer(self):
"""
#15889: get_serializer('nonsense') raises a SerializerDoesNotExist
"""
with self.assertRaises(SerializerDoesNotExist):
serializers.get_serializer("nonsense")
with self.assertRaises(KeyError):
serializers.get_serializer("nonsense")
# SerializerDoesNotExist is instantiated with the nonexistent format
with self.assertRaises(SerializerDoesNotExist) as cm:
serializers.get_serializer("nonsense")
self.assertEqual(cm.exception.args, ("nonsense",))
def test_unregister_unkown_serializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.unregister_serializer("nonsense")
def test_get_unkown_deserializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.get_deserializer("nonsense")
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
@skipUnless(yaml, "PyYAML not installed")
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
def test_serialize_proxy_model(self):
BaseModel.objects.create(parent_data=1)
base_objects = BaseModel.objects.all()
proxy_objects = ProxyBaseModel.objects.all()
proxy_proxy_objects = ProxyProxyBaseModel.objects.all()
base_data = serializers.serialize("json", base_objects)
proxy_data = serializers.serialize("json", proxy_objects)
proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects)
self.assertEqual(base_data, proxy_data.replace('proxy', ''))
self.assertEqual(base_data, proxy_proxy_data.replace('proxy', ''))
def serializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Add the generic tagged objects to the object list
objects.extend(Tag.objects.all())
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
serializerTest = expectedFailure(serializerTest)
def naturalKeySerializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in natural_key_test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2,
use_natural_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in natural_key_test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def fieldsTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3'))
result = next(serializers.deserialize(format, serialized_data))
# Check that the deserialized object contains data in only the serialized fields.
self.assertEqual(result.object.field1, 'first')
self.assertEqual(result.object.field2, '')
self.assertEqual(result.object.field3, 'third')
def streamTest(format, self):
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save_base(raw=True)
# Serialize the test database to a stream
for stream in (six.StringIO(), HttpResponse()):
serializers.serialize(format, [obj], indent=2, stream=stream)
# Serialize normally for a comparison
string_data = serializers.serialize(format, [obj], indent=2)
# Check that the two are the same
if isinstance(stream, six.StringIO):
self.assertEqual(string_data, stream.getvalue())
else:
self.assertEqual(string_data, stream.content.decode('utf-8'))
for format in serializers.get_serializer_formats():
setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format))
setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format))
if format != 'python':
setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
class XmlDeserializerSecurityTests(TestCase):
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
|
bsd-3-clause
| -7,877,821,534,166,548,000
| 38.826007
| 115
| 0.668935
| false
| 3.377077
| true
| false
| false
|
ErickMurillo/geodjango-example
|
world/migrations/0001_initial.py
|
1
|
1422
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='WorldBorder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('area', models.IntegerField()),
('pop2005', models.IntegerField(verbose_name=b'Population 2005')),
('fips', models.CharField(max_length=2, verbose_name=b'FIPS Code')),
('iso2', models.CharField(max_length=2, verbose_name=b'2 Digit ISO')),
('iso3', models.CharField(max_length=3, verbose_name=b'3 Digit ISO')),
('un', models.IntegerField(verbose_name=b'United Nations Code')),
('region', models.IntegerField(verbose_name=b'Region Code')),
('subregion', models.IntegerField(verbose_name=b'Sub-Region Code')),
('lon', models.FloatField()),
('lat', models.FloatField()),
('mpoly', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
|
mit
| -1,354,117,714,438,932,500
| 39.628571
| 114
| 0.563291
| false
| 4.232143
| false
| false
| false
|
shiquanwang/numba
|
numba/support/cffi_support.py
|
1
|
2857
|
# -*- coding: utf-8 -*-
"""
Support for CFFI. Allows checking whether objects are CFFI functions and
obtaining the pointer and numba signature.
"""
from __future__ import print_function, division, absolute_import
from numba import *
from numba.minivect.minitypes import *
from numba.minivect import minitypes, minierror
try:
import cffi
ffi = cffi.FFI()
except ImportError:
ffi = None
def is_cffi_func(obj):
"Check whether the obj is a CFFI function"
try:
return type(obj) is cffi_func_type
# This is dangerous:
# >>> ffi.typeof("void (*)()")
# <ctype 'void(*)()'>
# return ffi.typeof(obj).kind == 'function'
except TypeError:
return False
def get_pointer(cffi_func):
"""
Get a pointer to the underlying function for a CFFI function as an
integer.
"""
return int(ffi.cast("uintptr_t", cffi_func))
def map_type(cffi_type):
"Map CFFI type to numba type"
if cffi_type.kind in ('struct', 'union'):
if cffi_type.kind == 'union':
result = None
else:
result = struct([(name, map_type(field_type))
for name, field_type in cffi_type.fields])
elif cffi_type.kind == 'function':
restype = map_type(cffi_type.result)
argtypes = [map_type(arg) for arg in cffi_type.args]
result = minitypes.FunctionType(restype, argtypes,
is_vararg=cffi_type.ellipsis).pointer()
else:
result = type_map.get(cffi_type)
if result is None:
raise minierror.UnmappableTypeError(cffi_type)
return result
def get_signature(cffi_func):
"Get the numba signature for a CFFI function"
return map_type(ffi.typeof(cffi_func)).base_type
if ffi is None:
# Disable cffi support
is_cffi_func = lambda x: False
type_map = None
else:
type_map = {
ffi.typeof('char') : char,
ffi.typeof('short') : short,
ffi.typeof('int') : int_,
ffi.typeof('long') : long_,
ffi.typeof('long long') : longlong,
ffi.typeof('unsigned char') : uchar,
ffi.typeof('unsigned short') : ushort,
ffi.typeof('unsigned int') : uint,
ffi.typeof('unsigned long') : ulong,
ffi.typeof('unsigned long long') : ulonglong,
ffi.typeof('float') : float_,
ffi.typeof('double') : double,
ffi.typeof('long double') : longdouble,
ffi.typeof('char *') : c_string_type,
ffi.typeof('ssize_t') : Py_ssize_t,
ffi.typeof('size_t') : size_t,
}
ffi.cdef("int printf(char *, ...);")
lib = ffi.dlopen(None)
cffi_func_type = type(lib.printf)
|
bsd-2-clause
| 2,206,667,430,541,992,200
| 31.83908
| 79
| 0.559328
| false
| 3.653453
| false
| false
| false
|
CosmosID/cosmosid-cli
|
cosmosid/api/analysis.py
|
1
|
3803
|
"""Representation of Analysis."""
import logging
import requests
from cosmosid.api.files import Runs
from cosmosid.helpers.exceptions import (AuthenticationFailed,
CosmosidException,
NotFoundException)
LOGGER = logging.getLogger(__name__)
class Analysis(object):
"""Runs analysis interface."""
__resource_path = '/api/metagenid/v1/runs/{run_id}/analysis'
def __init__(self, base_url=None, api_key=None):
self.base_url = base_url
self.logger = LOGGER
self.header = {'X-Api-Key': api_key}
self.request_url = "{}{}".format(self.base_url, self.__resource_path)
self.runs = Runs(base_url=self.base_url,
api_key=self.header['X-Api-Key'])
def __is_runid_in_file(self, run_id, file_id):
"""Get given run meta and check is the run in sample."""
single_run = self.runs.get_single_run(run_id)
if single_run:
if single_run['status']:
if single_run['file']['id'] == file_id:
return True
return False
def __get_analysis_by_file_id(self, file_id):
last_run = self.runs.get_last_run_for_file(file_id)
result_data = None
if last_run:
result_data = self.__get_analysis_by_run_id(last_run['id'])
return result_data
def __get_analysis_by_run_id(self, run_id):
request_url = self.request_url.format(run_id=run_id)
try:
single_run_meta = self.runs.get_single_run(run_id)
if not single_run_meta:
raise CosmosidException('Response from service is empty for '
'run id %s' % run_id)
if not single_run_meta['status']:
raise NotFoundException(single_run_meta['message'])
results = requests.get(request_url, headers=self.header)
if results.status_code == 403:
raise AuthenticationFailed('Authentication Failed. '
'Wrong API Key.')
if results.status_code == 404:
result_data = results.json()
result_data.update({'status': 0})
result_data.update({'run_meta': single_run_meta})
return result_data
if requests.codes.ok:
result_data = results.json()
result_data.update({'status': 1})
result_data.update({'run_meta': single_run_meta})
return result_data
results.raise_for_status()
except AuthenticationFailed:
self.logger.error('Authentication Failed')
except NotFoundException:
self.logger.error('Not Found')
except CosmosidException:
self.logger.error('Got Analysis data exception.')
except requests.exceptions.RequestException:
self.logger.debug('Debug', exc_info=True)
self.logger.error('Error occured during request')
self.logger.error('Response Status Code: %s', results.status_code)
def get_list(self, file_id=None, run_id=None):
"""Get analysis data.
cli analysis --id ID
"""
if file_id and run_id:
if self.__is_runid_in_file(run_id, file_id):
return self.__get_analysis_by_run_id(run_id)
msg = 'File %s does not contain Run %s' % (self.file_id,
self.run_id)
return {'status': 0,
'message': msg}
elif run_id and not file_id:
return self.__get_analysis_by_run_id(run_id)
elif file_id and not run_id:
return self.__get_analysis_by_file_id(file_id)
|
mit
| 7,192,464,085,135,019,000
| 40.336957
| 78
| 0.546148
| false
| 4.045745
| false
| false
| false
|
dongweiming/flask_reveal
|
social/strategies/base.py
|
1
|
6213
|
import time
import random
import hashlib
from social.utils import setting_name
from social.store import OpenIdStore
class BaseTemplateStrategy(object):
def __init__(self, strategy):
self.strategy = strategy
def render(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
return self.render_template(tpl, context)
else:
return self.render_string(html, context)
def render_template(self, tpl, context):
raise NotImplementedError('Implement in subclass')
def render_string(self, html, context):
raise NotImplementedError('Implement in subclass')
class BaseStrategy(object):
ALLOWED_CHARS = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789'
def __init__(self, backend=None, storage=None, request=None, tpl=None,
backends=None, *args, **kwargs):
tpl = tpl or BaseTemplateStrategy
if not isinstance(tpl, BaseTemplateStrategy):
tpl = tpl(self)
self.tpl = tpl
self.request = request
self.storage = storage
self.backends = backends
if backend:
self.backend_name = backend.name
self.backend = backend(strategy=self, *args, **kwargs)
else:
self.backend_name = None
self.backend = backend
def setting(self, name, default=None):
names = (setting_name(self.backend_name, name),
setting_name(name),
name)
for name in names:
try:
return self.get_setting(name)
except (AttributeError, KeyError):
pass
return default
def start(self):
# Clean any partial pipeline info before starting the process
self.clean_partial_pipeline()
if self.backend.uses_redirect():
return self.redirect(self.backend.auth_url())
else:
return self.html(self.backend.auth_html())
def complete(self, *args, **kwargs):
return self.backend.auth_complete(*args, **kwargs)
def continue_pipeline(self, *args, **kwargs):
return self.backend.continue_pipeline(*args, **kwargs)
def disconnect(self, user, association_id=None):
self.storage.user.disconnect(name=self.backend.name, user=user,
association_id=association_id)
def authenticate(self, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = self.backend
return self.backend.authenticate(*args, **kwargs)
def create_user(self, *args, **kwargs):
return self.storage.user.create_user(*args, **kwargs)
def get_user(self, *args, **kwargs):
return self.storage.user.get_user(*args, **kwargs)
def session_setdefault(self, name, value):
self.session_set(name, value)
return self.session_get(name)
def to_session(self, next, backend, *args, **kwargs):
return {
'next': next,
'backend': backend.name,
'args': args,
'kwargs': kwargs
}
def from_session(self, session):
return session['next'], session['backend'], \
session['args'], session['kwargs']
def clean_partial_pipeline(self):
self.session_pop('partial_pipeline')
def openid_store(self):
return OpenIdStore(self)
def get_pipeline(self):
return self.setting('PIPELINE', (
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
))
def random_string(self, length=12, chars=ALLOWED_CHARS):
# Implementation borrowed from django 1.4
try:
random.SystemRandom()
except NotImplementedError:
key = self.setting('SECRET_KEY', '')
seed = '%s%s%s' % (random.getstate(), time.time(), key)
random.seed(hashlib.sha256(seed.encode()).digest())
return ''.join([random.choice(chars) for i in range(length)])
def is_integrity_error(self, exception):
return self.storage.is_integrity_error(exception)
# Implement the following methods on strategies sub-classes
def redirect(self, url):
"""Return a response redirect to the given URL"""
raise NotImplementedError('Implement in subclass')
def get_setting(self, name):
"""Return value for given setting name"""
raise NotImplementedError('Implement in subclass')
def html(self, content):
"""Return HTTP response with given content"""
raise NotImplementedError('Implement in subclass')
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return self.tpl.render(tpl, html, context)
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
raise NotImplementedError('Implement in subclass')
def request_host(self):
"""Return current host value"""
raise NotImplementedError('Implement in subclass')
def session_get(self, name, default=None):
"""Return session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_set(self, name, value):
"""Set session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_pop(self, name):
"""Pop session value for given key"""
raise NotImplementedError('Implement in subclass')
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
raise NotImplementedError('Implement in subclass')
def is_response(self, value):
raise NotImplementedError('Implement in subclass')
|
bsd-3-clause
| -2,883,652,518,667,524,000
| 33.709497
| 74
| 0.617576
| false
| 4.479452
| false
| false
| false
|
FEniCS/ufl
|
ufl/finiteelement/hdivcurl.py
|
1
|
4145
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2016 Andrew T. T. McRae
#
# This file is part of UFL (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Massimiliano Leoni, 2016
from ufl.finiteelement.finiteelementbase import FiniteElementBase
from ufl.sobolevspace import HDiv, HCurl
class HDivElement(FiniteElementBase):
"""A div-conforming version of an outer product element, assuming
this makes mathematical sense."""
__slots__ = ("_element",)
def __init__(self, element):
self._element = element
self._repr = "HDivElement(%s)" % repr(element)
family = "TensorProductElement"
cell = element.cell()
degree = element.degree()
quad_scheme = element.quadrature_scheme()
value_shape = (element.cell().geometric_dimension(),)
reference_value_shape = (element.cell().topological_dimension(),)
# Skipping TensorProductElement constructor! Bad code smell, refactor to avoid this non-inheritance somehow.
FiniteElementBase.__init__(self, family, cell, degree,
quad_scheme, value_shape, reference_value_shape)
def mapping(self):
return "contravariant Piola"
def sobolev_space(self):
"Return the underlying Sobolev space."
return HDiv
def reconstruct(self, **kwargs):
return HDivElement(self._element.reconstruct(**kwargs))
def __str__(self):
return "HDivElement(%s)" % str(self._element)
def shortstr(self):
"Format as string for pretty printing."
return "HDivElement(%s)" % str(self._element.shortstr())
class HCurlElement(FiniteElementBase):
"""A curl-conforming version of an outer product element, assuming
this makes mathematical sense."""
__slots__ = ("_element",)
def __init__(self, element):
self._element = element
self._repr = "HCurlElement(%s)" % repr(element)
family = "TensorProductElement"
cell = element.cell()
degree = element.degree()
quad_scheme = element.quadrature_scheme()
cell = element.cell()
value_shape = (cell.geometric_dimension(),)
reference_value_shape = (cell.topological_dimension(),) # TODO: Is this right?
# Skipping TensorProductElement constructor! Bad code smell,
# refactor to avoid this non-inheritance somehow.
FiniteElementBase.__init__(self, family, cell, degree, quad_scheme,
value_shape, reference_value_shape)
def mapping(self):
return "covariant Piola"
def sobolev_space(self):
"Return the underlying Sobolev space."
return HCurl
def reconstruct(self, **kwargs):
return HCurlElement(self._element.reconstruct(**kwargs))
def __str__(self):
return "HCurlElement(%s)" % str(self._element)
def shortstr(self):
"Format as string for pretty printing."
return "HCurlElement(%s)" % str(self._element.shortstr())
class WithMapping(FiniteElementBase):
"""Specify an alternative mapping for the wrappee. For example,
to use identity mapping instead of Piola map with an element E,
write
remapped = WithMapping(E, "identity")
"""
def __init__(self, wrapee, mapping):
self._repr = "WithMapping(%s, %s)" % (repr(wrapee), mapping)
self._mapping = mapping
self.wrapee = wrapee
def __getattr__(self, attr):
try:
return getattr(self.wrapee, attr)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, attr))
def mapping(self):
return self._mapping
def reconstruct(self, **kwargs):
mapping = kwargs.pop("mapping", self._mapping)
wrapee = self.wrapee.reconstruct(**kwargs)
return type(self)(wrapee, mapping)
def __str__(self):
return "WithMapping(%s, mapping=%s)" % (self.wrapee, self._mapping)
def shortstr(self):
return "WithMapping(%s, %s)" % (self.wrapee.shortstr(), self._mapping)
|
lgpl-3.0
| 472,424,525,507,834,050
| 33.256198
| 116
| 0.626055
| false
| 3.892019
| false
| false
| false
|
python-control/python-control
|
control/tests/modelsimp_test.py
|
1
|
9062
|
"""modelsimp_array_test.py - test model reduction functions
RMM, 30 Mar 2011 (based on TestModelSimp from v0.4a)
"""
import numpy as np
import pytest
from control import StateSpace, forced_response, tf, rss, c2d
from control.exception import ControlMIMONotImplemented
from control.tests.conftest import slycotonly, matarrayin
from control.modelsimp import balred, hsvd, markov, modred
class TestModelsimp:
"""Test model reduction functions"""
@slycotonly
def testHSVD(self, matarrayout, matarrayin):
A = matarrayin([[1., -2.], [3., -4.]])
B = matarrayin([[5.], [7.]])
C = matarrayin([[6., 8.]])
D = matarrayin([[9.]])
sys = StateSpace(A, B, C, D)
hsv = hsvd(sys)
hsvtrue = np.array([24.42686, 0.5731395]) # from MATLAB
np.testing.assert_array_almost_equal(hsv, hsvtrue)
# test for correct return type: ALWAYS return ndarray, even when
# use_numpy_matrix(True) was used
assert isinstance(hsv, np.ndarray)
assert not isinstance(hsv, np.matrix)
def testMarkovSignature(self, matarrayout, matarrayin):
U = matarrayin([[1., 1., 1., 1., 1.]])
Y = U
m = 3
H = markov(Y, U, m, transpose=False)
Htrue = np.array([[1., 0., 0.]])
np.testing.assert_array_almost_equal(H, Htrue)
# Make sure that transposed data also works
H = markov(np.transpose(Y), np.transpose(U), m, transpose=True)
np.testing.assert_array_almost_equal(H, np.transpose(Htrue))
# Generate Markov parameters without any arguments
H = markov(Y, U, m)
np.testing.assert_array_almost_equal(H, Htrue)
# Test example from docstring
T = np.linspace(0, 10, 100)
U = np.ones((1, 100))
T, Y = forced_response(tf([1], [1, 0.5], True), T, U)
H = markov(Y, U, 3, transpose=False)
# Test example from issue #395
inp = np.array([1, 2])
outp = np.array([2, 4])
mrk = markov(outp, inp, 1, transpose=False)
# Make sure MIMO generates an error
U = np.ones((2, 100)) # 2 inputs (Y unchanged, with 1 output)
with pytest.raises(ControlMIMONotImplemented):
markov(Y, U, m)
# Make sure markov() returns the right answer
@pytest.mark.parametrize("k, m, n",
[(2, 2, 2),
(2, 5, 5),
(5, 2, 2),
(5, 5, 5),
(5, 10, 10)])
def testMarkovResults(self, k, m, n):
#
# Test over a range of parameters
#
# k = order of the system
# m = number of Markov parameters
# n = size of the data vector
#
# Values *should* match exactly for n = m, otherewise you get a
# close match but errors due to the assumption that C A^k B =
# 0 for k > m-2 (see modelsimp.py).
#
# Generate stable continuous time system
Hc = rss(k, 1, 1)
# Choose sampling time based on fastest time constant / 10
w, _ = np.linalg.eig(Hc.A)
Ts = np.min(-np.real(w)) / 10.
# Convert to a discrete time system via sampling
Hd = c2d(Hc, Ts, 'zoh')
# Compute the Markov parameters from state space
Mtrue = np.hstack([Hd.D] + [np.dot(
Hd.C, np.dot(np.linalg.matrix_power(Hd.A, i),
Hd.B)) for i in range(m-1)])
# Generate input/output data
T = np.array(range(n)) * Ts
U = np.cos(T) + np.sin(T/np.pi)
_, Y = forced_response(Hd, T, U, squeeze=True)
Mcomp = markov(Y, U, m)
# Compare to results from markov()
# experimentally determined probability to get non matching results
# with rtot=1e-6 and atol=1e-8 due to numerical errors
# for k=5, m=n=10: 0.015 %
np.testing.assert_allclose(Mtrue, Mcomp, rtol=1e-6, atol=1e-8)
def testModredMatchDC(self, matarrayin):
#balanced realization computed in matlab for the transfer function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-1.958, -1.194, 1.824, -1.464],
[-1.194, -0.8344, 2.563, -1.351],
[-1.824, -2.563, -1.124, 2.704],
[-1.464, -1.351, -2.704, -11.08]])
B = matarrayin([[-0.9057], [-0.4068], [-0.3263], [-0.3474]])
C = matarrayin([[-0.9057, -0.4068, 0.3263, -0.3474]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
rsys = modred(sys,[2, 3],'matchdc')
Artrue = np.array([[-4.431, -4.552], [-4.552, -5.361]])
Brtrue = np.array([[-1.362], [-1.031]])
Crtrue = np.array([[-1.362, -1.031]])
Drtrue = np.array([[-0.08384]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=3)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=2)
def testModredUnstable(self, matarrayin):
"""Check if an error is thrown when an unstable system is given"""
A = matarrayin(
[[4.5418, 3.3999, 5.0342, 4.3808],
[0.3890, 0.3599, 0.4195, 0.1760],
[-4.2117, -3.2395, -4.6760, -4.2180],
[0.0052, 0.0429, 0.0155, 0.2743]])
B = matarrayin([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
C = matarrayin([[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]])
D = matarrayin([[0.0, 0.0], [0.0, 0.0]])
sys = StateSpace(A, B, C, D)
np.testing.assert_raises(ValueError, modred, sys, [2, 3])
def testModredTruncate(self, matarrayin):
#balanced realization computed in matlab for the transfer function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-1.958, -1.194, 1.824, -1.464],
[-1.194, -0.8344, 2.563, -1.351],
[-1.824, -2.563, -1.124, 2.704],
[-1.464, -1.351, -2.704, -11.08]])
B = matarrayin([[-0.9057], [-0.4068], [-0.3263], [-0.3474]])
C = matarrayin([[-0.9057, -0.4068, 0.3263, -0.3474]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
rsys = modred(sys,[2, 3],'truncate')
Artrue = np.array([[-1.958, -1.194], [-1.194, -0.8344]])
Brtrue = np.array([[-0.9057], [-0.4068]])
Crtrue = np.array([[-0.9057, -0.4068]])
Drtrue = np.array([[0.]])
np.testing.assert_array_almost_equal(rsys.A, Artrue)
np.testing.assert_array_almost_equal(rsys.B, Brtrue)
np.testing.assert_array_almost_equal(rsys.C, Crtrue)
np.testing.assert_array_almost_equal(rsys.D, Drtrue)
@slycotonly
def testBalredTruncate(self, matarrayin):
# controlable canonical realization computed in matlab for the transfer
# function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-15., -7.5, -6.25, -1.875],
[8., 0., 0., 0.],
[0., 4., 0., 0.],
[0., 0., 1., 0.]])
B = matarrayin([[2.], [0.], [0.], [0.]])
C = matarrayin([[0.5, 0.6875, 0.7031, 0.5]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
orders = 2
rsys = balred(sys, orders, method='truncate')
Artrue = np.array([[-1.958, -1.194], [-1.194, -0.8344]])
Brtrue = np.array([[0.9057], [0.4068]])
Crtrue = np.array([[0.9057, 0.4068]])
Drtrue = np.array([[0.]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=2)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=4)
@slycotonly
def testBalredMatchDC(self, matarrayin):
# controlable canonical realization computed in matlab for the transfer
# function:
# num = [1 11 45 32], den = [1 15 60 200 60]
A = matarrayin(
[[-15., -7.5, -6.25, -1.875],
[8., 0., 0., 0.],
[0., 4., 0., 0.],
[0., 0., 1., 0.]])
B = matarrayin([[2.], [0.], [0.], [0.]])
C = matarrayin([[0.5, 0.6875, 0.7031, 0.5]])
D = matarrayin([[0.]])
sys = StateSpace(A, B, C, D)
orders = 2
rsys = balred(sys,orders,method='matchdc')
Artrue = np.array(
[[-4.43094773, -4.55232904],
[-4.55232904, -5.36195206]])
Brtrue = np.array([[1.36235673], [1.03114388]])
Crtrue = np.array([[1.36235673, 1.03114388]])
Drtrue = np.array([[-0.08383902]])
np.testing.assert_array_almost_equal(rsys.A, Artrue, decimal=2)
np.testing.assert_array_almost_equal(rsys.B, Brtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.C, Crtrue, decimal=4)
np.testing.assert_array_almost_equal(rsys.D, Drtrue, decimal=4)
|
bsd-3-clause
| -5,015,453,693,631,061,000
| 39.81982
| 79
| 0.537188
| false
| 2.980921
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.