text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
{
'name': "Blockchain Waves Synchro",
'version': '1.0',
'depends': ['base',
'sale',
'sales_team',
'delivery',
'barcodes',
'mail',
'report',
'portal_sale',
'website_portal',
'website_payment',],
'author': "Sergey Stepanets",
'category': 'Application',
'description': """
Module for blockchain synchro
""",
'data': [
'views/setting.xml',
'data/cron.xml',
'views/clients.xml',
'views/sale_order.xml',
'views/journal_signature.xml',
# 'views/report.xml',
],
}
|
stanta/darfchain
|
darfchain/__manifest__.py
|
Python
|
gpl-3.0
| 668
| 0.004491
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-13 20:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('kriegspiel', '0003_auto_20170113_2035'),
]
operations = [
migrations.AddField(
model_name='move',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
Kriegspiel/ks-python-api
|
kriegspiel_api_server/kriegspiel/migrations/0004_move_created_at.py
|
Python
|
mit
| 507
| 0
|
# Copyright (c) 2017-present, Facebook, Inc.
# All Rights Reserved.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import, division, print_function, unicode_literals
import pkgutil
# Indicate that hgext3rd is a namspace package, and other python path
# directories may still be searched for hgext3rd extensions.
__path__ = pkgutil.extend_path(__path__, __name__) # type: ignore # noqa: F821
|
facebookexperimental/eden-hg
|
eden/hg/eden/hgext3rd_init.py
|
Python
|
gpl-2.0
| 497
| 0.002012
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
distributions_py = distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag _sample_n."""
data_container = []
true_mvndiag_sample_n = distributions_py.MultivariateNormalDiag._sample_n
def _capturing_mvndiag_sample_n(self, n, seed=None):
samples = true_mvndiag_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.MultivariateNormalDiag._sample_n = (
_capturing_mvndiag_sample_n)
yield data_container
distributions_py.MultivariateNormalDiag._sample_n = true_mvndiag_sample_n
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal _sample_n."""
data_container = []
true_normal_sample_n = distributions_py.Normal._sample_n
def _capturing_normal_sample_n(self, n, seed=None):
samples = true_normal_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.Normal._sample_n = _capturing_normal_sample_n
yield data_container
distributions_py.Normal._sample_n = true_normal_sample_n
def make_univariate_mixture(batch_shape, num_components):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.Normal(
loc=np.float32(np.random.randn(*list(batch_shape))),
scale=np.float32(10 * np.random.rand(*list(batch_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
def make_multivariate_mixture(batch_shape, num_components, event_shape):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.MultivariateNormalDiag(
loc=np.float32(np.random.randn(*list(batch_shape + event_shape))),
scale_diag=np.float32(10 * np.random.rand(
*list(batch_shape + event_shape)))) for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
class MixtureTest(test.TestCase):
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(event_shape, dist.event_shape)
self.assertAllEqual(event_shape, dist.event_shape_tensor().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.5]), # 2 classes
[distributions_py.Normal(loc=1.0, scale=2.0)])
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
distributions_py.Mixture(
distributions_py.Categorical([-0.5, 0.5]), # scalar batch
[
distributions_py.Normal(
loc=1.0, scale=2.0), # scalar dist
distributions_py.Normal(
loc=[1.0, 1.0], scale=[2.0, 2.0])
])
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = array_ops.placeholder(shape=[1, None], dtype=dtypes.float32)
distributions_py.Mixture(
distributions_py.Categorical(cat_logits),
[distributions_py.Normal(
loc=[1.0], scale=[2.0])])
def testBrokenShapesDynamic(self):
with self.test_session():
d0_param = array_ops.placeholder(dtype=dtypes.float32)
d1_param = array_ops.placeholder(dtype=dtypes.float32)
d = distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.2]), [
distributions_py.Normal(
loc=d0_param, scale=d0_param), distributions_py.Normal(
loc=d1_param, scale=d1_param)
],
validate_args=True)
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
distributions_py.Mixture(None, [])
cat = distributions_py.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
distributions_py.Mixture(cat, [None])
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(loc=[1.0], scale=[2.0]),
distributions_py.Normal(loc=[np.float16(1.0)],
scale=[np.float16(2.0)]),
])
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
with self.assertRaisesWithPredicateMatch(TypeError,
"either be continuous or not"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(loc=[1.0], scale=[2.0]),
distributions_py.Bernoulli(dtype=dtypes.float32, logits=[1.0]),
])
def testMeanUnivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testProbScalarUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2)
for x in [
np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32),
np.random.randn(3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3])
for x in [
np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2)
for x in [
np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4])
for x in [
np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.test_session():
n = 100
random_seed.set_random_seed(654321)
components = [
distributions_py.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat1")
dist1 = distributions_py.Mixture(cat, components, name="mixture1")
samples1 = dist1.sample(n, seed=123456).eval()
random_seed.set_random_seed(654321)
components2 = [
distributions_py.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat2 = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat2")
dist2 = distributions_py.Mixture(cat2, components2, name="mixture2")
samples2 = dist2.sample(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2])
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def testSampleBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=num_components, event_shape=[4])
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((5, 2, 3, 4), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testEntropyLowerBoundMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
class MixtureBenchmark(test.Benchmark):
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
num_components, batch_size, num_features,
sample_size):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with session.Session(config=config, graph=ops.Graph()) as sess:
random_seed.set_random_seed(0)
with ops.device("/gpu:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(variables.global_variables_initializer())
reported = self.run_op_benchmark(
sess,
sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
(name, use_gpu, num_components, batch_size, num_features,
sample_size)))
logging.vlog(2, "\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) % (
use_gpu, num_components, batch_size, num_features, sample_size,
reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
logging.vlog(
2, "mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)
]
components = list(
distributions_py.MultivariateNormalDiag(
loc=mu, scale_diag=sigma) for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
logging.vlog(
2, "mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)
]
components = list(
distributions_py.MultivariateNormalTriL(
loc=mu, scale_tril=linalg_ops.cholesky(sigma))
for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
if __name__ == "__main__":
test.main()
|
AsimmHirani/ISpyPi
|
tensorflow/contrib/tensorflow-master/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py
|
Python
|
apache-2.0
| 24,371
| 0.006729
|
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
import bitcodin
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
from bitcodin.rest import RestClient
class GetStatisticsCurrentMonthTestCase(BitcodinTestCase):
def setUp(self):
super(GetStatisticsCurrentMonthTestCase, self).setUp()
def runTest(self):
response = RestClient.get(url=bitcodin.get_api_base()+'/statistics', headers=bitcodin.create_headers())
def tearDown(self):
super(GetStatisticsCurrentMonthTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
bitmovin/bitcodin-python
|
bitcodin/test/statistics/testcase_get_statistics_current.py
|
Python
|
unlicense
| 614
| 0.001629
|
"""Base integration test for provider implementations."""
import unittest
import json
import mock
from contextlib import contextmanager
from django import test
from django.contrib import auth
from django.contrib.auth import models as auth_models
from django.contrib.messages.storage import fallback
from django.contrib.sessions.backends import cache
from django.core.urlresolvers import reverse
from django.test import utils as django_utils
from django.conf import settings as django_settings
from social_core import actions, exceptions
from social_django import utils as social_utils
from social_django import views as social_views
from lms.djangoapps.commerce.tests import TEST_API_URL
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student import models as student_models
from student import views as student_views
from student.tests.factories import UserFactory
from student_account.views import account_settings_context
from third_party_auth import middleware, pipeline
from third_party_auth.tests import testutil
class IntegrationTestMixin(object):
"""
Mixin base class for third_party_auth integration tests.
This class is newer and simpler than the 'IntegrationTest' alternative below, but it is
currently less comprehensive. Some providers are tested with this, others with
IntegrationTest.
"""
# Provider information:
PROVIDER_NAME = "override"
PROVIDER_BACKEND = "override"
PROVIDER_ID = "override"
# Information about the user expected from the provider:
USER_EMAIL = "override"
USER_NAME = "override"
USER_USERNAME = "override"
def setUp(self):
super(IntegrationTestMixin, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
patcher = testutil.patch_mako_templates()
patcher.start()
self.addCleanup(patcher.stop)
# Override this method in a subclass and enable at least one provider.
def test_register(self, **extra_defaults):
# The user goes to the register page, and sees a button to register with the provider:
provider_register_url = self._check_register_page()
# The user clicks on the Dummy button:
try_login_response = self.client.get(provider_register_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
provider_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(provider_response.status_code, 302)
self.assertEqual(provider_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
tpa_context = register_response.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Check that the data (e.g. email) from the provider is displayed in the form:
form_data = register_response.context['data']['registration_form_desc']
form_fields = {field['name']: field for field in form_data['fields']}
self.assertEqual(form_fields['email']['defaultValue'], self.USER_EMAIL)
self.assertEqual(form_fields['name']['defaultValue'], self.USER_NAME)
self.assertEqual(form_fields['username']['defaultValue'], self.USER_USERNAME)
for field_name, value in extra_defaults.items():
self.assertEqual(form_fields[field_name]['defaultValue'], value)
registration_values = {
'email': 'email-edited@tpa-test.none',
'name': 'My Customized Name',
'username': 'new_username',
'honor_code': True,
}
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
registration_values
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again, whether or not we have yet verified the account:
self.client.logout()
self._test_return_login(user_is_activated=False)
self.client.logout()
self.verify_user_email('email-edited@tpa-test.none')
self._test_return_login(user_is_activated=True)
def test_login(self):
self.user = UserFactory.create() # pylint: disable=attribute-defined-outside-init
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
complete_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(complete_response.status_code, 302)
self.assertEqual(complete_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
tpa_context = login_response.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email': self.user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def do_provider_login(self, provider_redirect_url):
"""
mock logging in to the provider
Should end with loading self.complete_url, which should be returned
"""
raise NotImplementedError
def _test_return_login(self, user_is_activated=True, previous_session_timed_out=False):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's login button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider:
self.assertEqual(try_login_response.status_code, 302)
login_response = self.do_provider_login(try_login_response['Location'])
# If the previous session was manually logged out, there will be one weird redirect
# required to set the login cookie (it sticks around if the main session times out):
if not previous_session_timed_out:
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + self.complete_url)
# And then we should be redirected to the dashboard:
login_response = self.client.get(login_response['Location'])
self.assertEqual(login_response.status_code, 302)
if user_is_activated:
url_expected = reverse('dashboard')
else:
url_expected = reverse('third_party_inactive_redirect') + '?next=' + reverse('dashboard')
self.assertEqual(login_response['Location'], self.url_prefix + url_expected)
# Now we are logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 200)
def _check_login_page(self):
"""
Load the login form and check that it contains a button for the provider.
Return the URL for logging into that provider.
"""
return self._check_login_or_register_page(self.login_page_url, "loginUrl")
def _check_register_page(self):
"""
Load the registration form and check that it contains a button for the provider.
Return the URL for registering with that provider.
"""
return self._check_login_or_register_page(self.register_page_url, "registerUrl")
def _check_login_or_register_page(self, url, url_to_return):
""" Shared logic for _check_login_page() and _check_register_page() """
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.PROVIDER_NAME, response.content)
context_data = response.context['data']['third_party_auth']
provider_urls = {provider['id']: provider[url_to_return] for provider in context_data['providers']}
self.assertIn(self.PROVIDER_ID, provider_urls)
return provider_urls[self.PROVIDER_ID]
@property
def complete_url(self):
""" Get the auth completion URL for this provider """
return reverse('social:complete', kwargs={'backend': self.PROVIDER_BACKEND})
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in django_settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
@django_utils.override_settings() # For settings reversion on a method-by-method basis.
class IntegrationTest(testutil.TestCase, test.TestCase):
"""Abstract base class for provider integration tests."""
# Override setUp and set this:
provider = None
# Methods you must override in your children.
def get_response_data(self):
"""Gets a dict of response data of the form given by the provider.
To determine what the provider returns, drop into a debugger in your
provider's do_auth implementation. Providers may merge different kinds
of data (for example, data about the user and data about the user's
credentials).
"""
raise NotImplementedError
def get_username(self):
"""Gets username based on response data from a provider.
Each provider has different logic for username generation. Sadly,
this is not extracted into its own method in python-social-auth, so we
must provide a getter ourselves.
Note that this is the *initial* value the framework will attempt to use.
If it collides, the pipeline will generate a new username. We extract
it here so we can force collisions in a polymorphic way.
"""
raise NotImplementedError
# Asserts you can optionally override and make more specific.
def assert_redirect_to_provider_looks_correct(self, response):
"""Asserts the redirect to the provider's site looks correct.
When we hit /auth/login/<provider>, we should be redirected to the
provider's site. Here we check that we're redirected, but we don't know
enough about the provider to check what we're redirected to. Child test
implementations may optionally strengthen this assertion with, for
example, more details about the format of the Location header.
"""
self.assertEqual(302, response.status_code)
self.assertTrue(response.has_header('Location'))
def assert_register_response_in_pipeline_looks_correct(self, response, pipeline_kwargs, required_fields):
"""Performs spot checks of the rendered register.html page.
When we display the new account registration form after the user signs
in with a third party, we prepopulate the form with values sent back
from the provider. The exact set of values varies on a provider-by-
provider basis and is generated by
provider.BaseProvider.get_register_form_data. We provide some stock
assertions based on the provider's implementation; if you want more
assertions in your test, override this method.
"""
self.assertEqual(200, response.status_code)
# Check that the correct provider was selected.
self.assertIn('successfully signed in with <strong>%s</strong>' % self.provider.name, response.content)
# Expect that each truthy value we've prepopulated the register form
# with is actually present.
form_field_data = self.provider.get_register_form_data(pipeline_kwargs)
for prepopulated_form_data in form_field_data:
if prepopulated_form_data in required_fields:
self.assertIn(form_field_data[prepopulated_form_data], response.content.decode('utf-8'))
# Implementation details and actual tests past this point -- no more
# configuration needed.
def setUp(self):
super(IntegrationTest, self).setUp()
self.request_factory = test.RequestFactory()
@property
def backend_name(self):
""" Shortcut for the backend name """
return self.provider.backend_name
# pylint: disable=invalid-name
def assert_account_settings_context_looks_correct(self, context, duplicate=False, linked=None):
"""Asserts the user's account settings page context is in the expected state.
If duplicate is True, we expect context['duplicate_provider'] to contain
the duplicate provider backend name. If linked is passed, we conditionally
check that the provider is included in context['auth']['providers'] and
its connected state is correct.
"""
if duplicate:
self.assertEqual(context['duplicate_provider'], self.provider.backend_name)
else:
self.assertIsNone(context['duplicate_provider'])
if linked is not None:
expected_provider = [
provider for provider in context['auth']['providers'] if provider['name'] == self.provider.name
][0]
self.assertIsNotNone(expected_provider)
self.assertEqual(expected_provider['connected'], linked)
def assert_exception_redirect_looks_correct(self, expected_uri, auth_entry=None):
"""Tests middleware conditional redirection.
middleware.ExceptionMiddleware makes sure the user ends up in the right
place when they cancel authentication via the provider's UX.
"""
exception_middleware = middleware.ExceptionMiddleware()
request, _ = self.get_request_and_strategy(auth_entry=auth_entry)
response = exception_middleware.process_exception(
request, exceptions.AuthCanceled(request.backend))
location = response.get('Location')
self.assertEqual(302, response.status_code)
self.assertIn('canceled', location)
self.assertIn(self.backend_name, location)
self.assertTrue(location.startswith(expected_uri + '?'))
def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None):
"""Asserts first party auth was used in place of third party auth.
Args:
email: string. The user's email. If not None, will be set on POST.
password: string. The user's password. If not None, will be set on
POST.
success: None or bool. Whether we expect auth to be successful. Set
to None to indicate we expect the request to be invalid (meaning
one of username or password will be missing).
"""
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, email, password, self.get_username(), skip_social_auth=True)
strategy.request.POST = dict(strategy.request.POST)
if email:
strategy.request.POST['email'] = email
if password:
strategy.request.POST['password'] = 'bad_' + password if success is False else password
self.assert_pipeline_running(strategy.request)
payload = json.loads(student_views.login_user(strategy.request).content)
if success is None:
# Request malformed -- just one of email/password given.
self.assertFalse(payload.get('success'))
self.assertIn('There was an error receiving your login information', payload.get('value'))
elif success:
# Request well-formed and credentials good.
self.assertTrue(payload.get('success'))
else:
# Request well-formed but credentials bad.
self.assertFalse(payload.get('success'))
self.assertIn('incorrect', payload.get('value'))
def assert_json_failure_response_is_inactive_account(self, response):
"""Asserts failure on /login for inactive account looks right."""
self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure.
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('In order to sign in, you need to activate your account.', payload.get('value'))
def assert_json_failure_response_is_missing_social_auth(self, response):
"""Asserts failure on /login for missing social auth looks right."""
self.assertEqual(403, response.status_code)
self.assertIn(
"successfully logged into your %s account, but this account isn't linked" % self.provider.name,
response.content
)
def assert_json_failure_response_is_username_collision(self, response):
"""Asserts the json response indicates a username collision."""
self.assertEqual(400, response.status_code)
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('belongs to an existing account', payload.get('value'))
def assert_json_success_response_looks_correct(self, response):
"""Asserts the json response indicates success and redirection."""
self.assertEqual(200, response.status_code)
payload = json.loads(response.content)
self.assertTrue(payload.get('success'))
self.assertEqual(pipeline.get_complete_url(self.provider.backend_name), payload.get('redirect_url'))
def assert_login_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /login not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the login button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_login_response_in_pipeline_looks_correct(self, response):
"""Asserts a GET of /login in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
def assert_password_overridden_by_pipeline(self, username, password):
"""Verifies that the given password is not correct.
The pipeline overrides POST['password'], if any, with random data.
"""
self.assertIsNone(auth.authenticate(password=password, username=username))
def assert_pipeline_running(self, request):
"""Makes sure the given request is running an auth pipeline."""
self.assertTrue(pipeline.running(request))
def assert_redirect_to_dashboard_looks_correct(self, response):
"""Asserts a response would redirect to /dashboard."""
self.assertEqual(302, response.status_code)
# NOTE: Ideally we should use assertRedirects(), however it errors out due to the hostname, testserver,
# not being properly set. This may be an issue with the call made by PSA, but we are not certain.
# pylint: disable=protected-access
self.assertTrue(response.get('Location').endswith(django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL))
def assert_redirect_to_login_looks_correct(self, response):
"""Asserts a response would redirect to /login."""
self.assertEqual(302, response.status_code)
self.assertEqual('/login', response.get('Location'))
def assert_redirect_to_register_looks_correct(self, response):
"""Asserts a response would redirect to /register."""
self.assertEqual(302, response.status_code)
self.assertEqual('/register', response.get('Location'))
def assert_register_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /register not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the register button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_social_auth_does_not_exist_for_user(self, user, strategy):
"""Asserts a user does not have an auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(0, len(social_auths))
def assert_social_auth_exists_for_user(self, user, strategy):
"""Asserts a user has a social auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(1, len(social_auths))
self.assertEqual(self.backend_name, social_auths[0].provider)
def create_user_models_for_existing_account(self, strategy, email, password, username, skip_social_auth=False):
"""Creates user, profile, registration, and (usually) social auth.
This synthesizes what happens during /register.
See student.views.register and student.views._do_create_account.
"""
response_data = self.get_response_data()
uid = strategy.request.backend.get_user_id(response_data, response_data)
user = social_utils.Storage.user.create_user(email=email, password=password, username=username)
profile = student_models.UserProfile(user=user)
profile.save()
registration = student_models.Registration()
registration.register(user)
registration.save()
if not skip_social_auth:
social_utils.Storage.user.create_social_auth(user, uid, self.provider.backend_name)
return user
def fake_auth_complete(self, strategy):
"""Fake implementation of social_core.backends.BaseAuth.auth_complete.
Unlike what the docs say, it does not need to return a user instance.
Sometimes (like when directing users to the /register form) it instead
returns a response that 302s to /register.
"""
args = ()
kwargs = {
'request': strategy.request,
'backend': strategy.request.backend,
'user': None,
'response': self.get_response_data(),
}
return strategy.authenticate(*args, **kwargs)
def get_registration_post_vars(self, overrides=None):
"""POST vars generated by the registration form."""
defaults = {
'username': 'username',
'name': 'First Last',
'gender': '',
'year_of_birth': '',
'level_of_education': '',
'goals': '',
'honor_code': 'true',
'terms_of_service': 'true',
'password': 'password',
'mailing_address': '',
'email': 'user@email.com',
}
if overrides:
defaults.update(overrides)
return defaults
def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):
"""Gets a fully-configured request and strategy.
These two objects contain circular references, so we create them
together. The references themselves are a mixture of normal __init__
stuff and monkey-patching done by python-social-auth. See, for example,
social_django.utils.strategy().
"""
request = self.request_factory.get(
pipeline.get_complete_url(self.backend_name) +
'?redirect_state=redirect_state_value&code=code_value&state=state_value')
request.site = SiteFactory.create()
request.user = auth_models.AnonymousUser()
request.session = cache.SessionStore()
request.session[self.backend_name + '_state'] = 'state_value'
if auth_entry:
request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry
strategy = social_utils.load_strategy(request=request)
request.social_strategy = strategy
request.backend = social_utils.load_backend(strategy, self.backend_name, redirect_uri)
return request, strategy
@contextmanager
def _patch_edxmako_current_request(self, request):
"""Make ``request`` be the current request for edxmako template rendering."""
with mock.patch('edxmako.request_context.get_current_request', return_value=request):
yield
def get_user_by_email(self, strategy, email):
"""Gets a user by email, using the given strategy."""
return strategy.storage.user.user_model().objects.get(email=email)
def assert_logged_in_cookie_redirect(self, response):
"""Verify that the user was redirected in order to set the logged in cookie. """
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["Location"],
pipeline.get_complete_url(self.provider.backend_name)
)
self.assertEqual(response.cookies[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME].value, 'true')
self.assertIn(django_settings.EDXMKTG_USER_INFO_COOKIE_NAME, response.cookies)
def set_logged_in_cookies(self, request):
"""Simulate setting the marketing site cookie on the request. """
request.COOKIES[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME] = 'true'
request.COOKIES[django_settings.EDXMKTG_USER_INFO_COOKIE_NAME] = json.dumps({
'version': django_settings.EDXMKTG_USER_INFO_COOKIE_VERSION,
})
# Actual tests, executed once per child.
def test_canceling_authentication_redirects_to_login_when_auth_entry_login(self):
self.assert_exception_redirect_looks_correct('/login', auth_entry=pipeline.AUTH_ENTRY_LOGIN)
def test_canceling_authentication_redirects_to_register_when_auth_entry_register(self):
self.assert_exception_redirect_looks_correct('/register', auth_entry=pipeline.AUTH_ENTRY_REGISTER)
def test_canceling_authentication_redirects_to_account_settings_when_auth_entry_account_settings(self):
self.assert_exception_redirect_looks_correct(
'/account/settings', auth_entry=pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS
)
def test_canceling_authentication_redirects_to_root_when_auth_entry_not_set(self):
self.assert_exception_redirect_looks_correct('/')
def test_full_pipeline_succeeds_for_linking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
request.user = self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username(), skip_social_auth=True)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
# First we expect that we're in the unlinked state, and that there
# really is no association in the backend.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=False)
self.assert_social_auth_does_not_exist_for_user(request.user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
# Fire off the auth pipeline to link.
self.assert_redirect_to_dashboard_looks_correct( # pylint: disable=protected-access
actions.do_complete(
request.backend,
social_views._do_login,
request.user,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME
)
)
# Now we expect to be in the linked state, with a backend entry.
self.assert_social_auth_exists_for_user(request.user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
def test_full_pipeline_succeeds_for_unlinking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
# We're already logged in, so simulate that the cookie is set correctly
self.set_logged_in_cookies(request)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
with self._patch_edxmako_current_request(strategy.request):
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# First we expect that we're in the linked state, with a backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
self.assert_social_auth_exists_for_user(request.user, strategy)
# Fire off the disconnect pipeline to unlink.
self.assert_redirect_to_dashboard_looks_correct(
actions.do_disconnect(
request.backend,
request.user,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME
)
)
# Now we expect to be in the unlinked state, with no backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=False)
self.assert_social_auth_does_not_exist_for_user(user, strategy)
def test_linking_already_associated_account_raises_auth_already_associated(self):
# This is of a piece with
# test_already_associated_exception_populates_dashboard_with_error. It
# verifies the exception gets raised when we expect; the latter test
# covers exception handling.
email = 'user@example.com'
password = 'password'
username = self.get_username()
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
linked_user = self.create_user_models_for_existing_account(strategy, email, password, username)
unlinked_user = social_utils.Storage.user.create_user(
email='other_' + email, password=password, username='other_' + username)
self.assert_social_auth_exists_for_user(linked_user, strategy)
self.assert_social_auth_does_not_exist_for_user(unlinked_user, strategy)
with self.assertRaises(exceptions.AuthAlreadyAssociated):
# pylint: disable=protected-access
actions.do_complete(backend, social_views._do_login, user=unlinked_user)
def test_already_associated_exception_populates_dashboard_with_error(self):
# Instrument the pipeline with an exception. We test that the
# exception is raised correctly separately, so it's ok that we're
# raising it artificially here. This makes the linked=True artificial
# in the final assert because in practice the account would be
# unlinked, but getting that behavior is cumbersome here and already
# covered in other tests. Using linked=True does, however, let us test
# that the duplicate error has no effect on the state of the controls.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.client.get('/login')
self.client.get(pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
with self._patch_edxmako_current_request(strategy.request):
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# Monkey-patch storage for messaging; pylint: disable=protected-access
request._messages = fallback.FallbackStorage(request)
middleware.ExceptionMiddleware().process_exception(
request,
exceptions.AuthAlreadyAssociated(self.provider.backend_name, 'account is already in use.'))
self.assert_account_settings_context_looks_correct(
account_settings_context(request), duplicate=True, linked=True)
def test_full_pipeline_succeeds_for_signing_in_to_existing_active_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
user = self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.assertTrue(user.is_active)
# Begin! Ensure that the login form contains expected controls before
# the user starts the pipeline.
self.assert_login_response_before_pipeline_looks_correct(self.client.get('/login'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>
# to resume the pipeline.
# pylint: disable=protected-access
self.assert_redirect_to_login_looks_correct(actions.do_complete(request.backend, social_views._do_login))
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the login form and posts it via JS.
with self._patch_edxmako_current_request(strategy.request):
self.assert_login_response_in_pipeline_looks_correct(student_views.signin_user(strategy.request))
# Next, we invoke the view that handles the POST, and expect it
# redirects to /auth/complete. In the browser ajax handlers will
# redirect the user to the dashboard; we invoke it manually here.
self.assert_json_success_response_looks_correct(student_views.login_user(strategy.request))
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(request.backend, social_views._do_login, user=user))
self.assert_account_settings_context_looks_correct(account_settings_context(request))
def test_signin_fails_if_account_not_active(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(strategy, 'user@example.com', 'password', self.get_username())
user.is_active = False
user.save()
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_failure_response_is_inactive_account(student_views.login_user(strategy.request))
def test_signin_fails_if_no_account_associated(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username(), skip_social_auth=True)
self.assert_json_failure_response_is_missing_social_auth(student_views.login_user(strategy.request))
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_email_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(email='user@example.com')
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_password_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(password='password')
def test_first_party_auth_trumps_third_party_auth_and_fails_when_credentials_bad(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='user@example.com', password='password', success=False)
def test_first_party_auth_trumps_third_party_auth_and_succeeds_when_credentials_good(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='user@example.com', password='password', success=True)
def test_full_pipeline_succeeds_registering_new_account(self):
# First, create, the request and strategy that store pipeline state.
# Mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# Begin! Grab the registration page and check the login control on it.
self.assert_register_response_before_pipeline_looks_correct(self.client.get('/register'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>.
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(request.backend, social_views._do_login))
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the registration form.
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['kwargs'],
['name', 'username', 'email']
)
# Next, we invoke the view that handles the POST. Not all providers
# supply email. Manually add it as the user would have to; this
# also serves as a test of overriding provider values. Always provide a
# password for us to check that we override it properly.
overridden_password = strategy.request.POST.get('password')
email = 'new@example.com'
if not strategy.request.POST.get('email'):
strategy.request.POST = self.get_registration_post_vars({'email': email})
# The user must not exist yet...
with self.assertRaises(auth_models.User.DoesNotExist):
self.get_user_by_email(strategy, email)
# ...but when we invoke create_account the existing edX view will make
# it, but not social auths. The pipeline creates those later.
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_success_response_looks_correct(student_views.create_account(strategy.request))
# We've overridden the user's password, so authenticate() with the old
# value won't work:
created_user = self.get_user_by_email(strategy, email)
self.assert_password_overridden_by_pipeline(overridden_password, created_user.username)
# At this point the user object exists, but there is no associated
# social auth.
self.assert_social_auth_does_not_exist_for_user(created_user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(strategy.request.backend, social_views._do_login, user=created_user))
# Now the user has been redirected to the dashboard. Their third party account should now be linked.
self.assert_social_auth_exists_for_user(created_user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
def test_new_account_registration_assigns_distinct_username_on_collision(self):
original_username = self.get_username()
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
# Create a colliding username in the backend, then proceed with
# assignment via pipeline to make sure a distinct username is created.
strategy.storage.user.create_user(username=self.get_username(), email='user@email.com', password='password')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
distinct_username = pipeline.get(request)['kwargs']['username']
self.assertNotEqual(original_username, distinct_username)
def test_new_account_registration_fails_if_email_exists(self):
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['kwargs'],
['name', 'username', 'email']
)
with self._patch_edxmako_current_request(strategy.request):
strategy.request.POST = self.get_registration_post_vars()
# Create twice: once successfully, and once causing a collision.
student_views.create_account(strategy.request)
self.assert_json_failure_response_is_username_collision(student_views.create_account(strategy.request))
def test_pipeline_raises_auth_entry_error_if_auth_entry_invalid(self):
auth_entry = 'invalid'
self.assertNotIn(auth_entry, pipeline._AUTH_ENTRY_CHOICES) # pylint: disable=protected-access
_, strategy = self.get_request_and_strategy(auth_entry=auth_entry, redirect_uri='social:complete')
with self.assertRaises(pipeline.AuthEntryError):
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
def test_pipeline_assumes_login_if_auth_entry_missing(self):
_, strategy = self.get_request_and_strategy(auth_entry=None, redirect_uri='social:complete')
response = self.fake_auth_complete(strategy)
self.assertEqual(response.url, reverse('signin_user'))
# pylint: disable=test-inherits-tests, abstract-method
@django_utils.override_settings(ECOMMERCE_API_URL=TEST_API_URL)
class Oauth2IntegrationTest(IntegrationTest):
"""Base test case for integration tests of Oauth2 providers."""
# Dict of string -> object. Information about the token granted to the
# user. Override with test values in subclass; None to force a throw.
TOKEN_RESPONSE_DATA = None
# Dict of string -> object. Information about the user themself. Override
# with test values in subclass; None to force a throw.
USER_RESPONSE_DATA = None
def get_response_data(self):
"""Gets dict (string -> object) of merged data about the user."""
response_data = dict(self.TOKEN_RESPONSE_DATA)
response_data.update(self.USER_RESPONSE_DATA)
return response_data
|
lduarte1991/edx-platform
|
common/djangoapps/third_party_auth/tests/specs/base.py
|
Python
|
agpl-3.0
| 49,331
| 0.003324
|
#!/usr/bin/env
#(C) Mugfoundation 2014
#Available under MIT license
import click
import hashlib
c = hashlib.sha512()
@click.command()
@click.option('--setup', 'setup', help='Setup new project', type=str)
@click.option('-x', '--major', 'major', help='major version setter', type=int)
@click.option('-y', '--minor', 'minor', help='minor version setter', type=int)
@click.option('-z', '--patch', 'patch', help='patch version setter', type=int)
@click.option('-e', '--extras', 'extras', help='major version setter', type=int)
@click.option('-h', '--hash', 'hash', help='file to extract the sha512 hash', type=str)
@click.option('-sr', '--sign', 'sign', help='sign the release using open pgp (available only on linux)', type=str)
def main():
|
MugFoundation/versioneer
|
cli/versioneer.py
|
Python
|
mit
| 743
| 0.012113
|
'''
Copyright 2017 Digital Hills, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import threading
from time import sleep
from queue import Queue
from json import loads, dumps
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from random import choice, SystemRandom
from tcplib import query
from settings import targeting
from settings import node_settings
from display import cout, ctxt
# How much propagation history to take. Enforce int type
__NEXUS__PROP_HISTORY_LENGTH__ = 2000
__NEXUS__PROP_HISTORY_CUT__ = int(__NEXUS__PROP_HISTORY_LENGTH__ / 2)
# Network variables
__NEXUS__PING_TIMEOUT__ = 1
__NEXUS__PING_CAP__ = 10
# Time Variables
__NEXUS__BASE_EPOCH_SECONDS__ = 40
# Console out peers dropping offline
__NEXUS__SHOW_PEER_DROPS__ = False
class PeerNexus(threading.Thread):
def __init__(self, memPool, chain):
threading.Thread.__init__(self)
self.signal = True
# A logger
self.logger = logging.getLogger(__name__)
# Supress console messages from Nexus
self.supressText = False
'''
Time Information
'''
# Initialization time
self.uptime = datetime.utcnow()
# Network time - Exists as None until on a network
self.utc_network = None
# Block epochs for PoT
self.next_epoch = None
self.prev_epoch = None
# Number of epochs we've been alive for
self.epochs_passed = 0
# Get seconds from a time difference
# use: self.timeDeltaToSeconds(endTime - StartTime)
self.timeDeltaToSeconds = lambda x: int(str(x).split(".")[0].split(":")[2])
'''
References to the DHNode Information
-memPool = Transaction pool (yet-to-be-mined)
-chain = The current block chain
'''
self.memPool = memPool
self.chain = chain
'''
Network/ Peer Information
-propagated = A list that ensures we don't send
previouslt sent requests
-peers = List of active (online) peers
'''
# Things that we've already propageted
self.propagated = []
# All known peers - Load from whatever file
self.peers = []
# Know thyself - Ensure we list ourselves as a node.
# Ignored by the '_ALL_' propagation tag
self.addNewPeer("127.0.0.1", node_settings["port"], self.next_epoch, self.uptime)
# Eventual constant node
self.addNewPeer("digitalhills.space", node_settings["port"], None, None)
# Different outgoing types to talk with peers
self.peerQueueEvent = {
"sync_MemPool": "smp",
"sync_NextBlock": "snb",
"sync_KnownNodes": "skn",
"sync_BlockChain": "sbc",
"info_ReleasedBlock": "irb",
# Programmed into Handler or in-use elsewhere
"ping_peers": "pp",
"data_propagate": "dp"
}
# Queue of all things that need to be pushed to all peers
# - Updates to mempool, current block set aside (about to go out - to sync all blocks released)
# Messages will be sent every cycle of self.run
self.pushToPeers = Queue()
'''
Before we start running, load a list of expected peers, and attempt contact
The first ones we come in contact with will be our basis of synchronization
'''
# Load file
# Attempt pings
# If accept: Sync block chain - Sync epochs
cout("cyan", "\n> Initializing Peer Nexus\n> Attempting to join known peers...")
known_nodes = None
try:
known_nodes = open("dpeer/known_nodes.json", "rb").read().decode('utf-8')
except IOError:
cout("fail", "Unable to open known peers file - Assuming peer is alone")
if known_nodes is not None:
try:
known_nodes = loads(known_nodes)
except:
cout("fail", "Error loading known_nodes.json p Assuming peer is alone")
known_nodes = None
# Add nodes from file
for node in known_nodes:
self.addNewPeer(node["address"], node["port"], node["epochTime"], node["personalTime"])
# Attempt to sync with all nodes
self.synchronizeEpoch()
'''
Push an event into the pushToPeers Queue
'''
def addItemToPeerQueue(self, _peerQueueEvent, _peer, data=None):
self.pushToPeers.put((_peer, _peerQueueEvent, data))
'''
Add new peer
'''
def addNewPeer(self, address, port, epochTime, personalTime):
# To ensure nobody attempts changing what we think about ourselves
if address == "127.0.0.1":
port = node_settings["port"]
epochTime = str(self.next_epoch)
personalTime = str(self.uptime)
# Use address + str(port) to uniquely identify nodes behind same address
cleanse = None
for peer in self.peers:
if (peer["address"] + str(peer["port"])) == address + str(port):
cleanse = address + str(port)
# If the item exists, remove it to append with new information!
if cleanse is not None:
self.peers = [peer for peer in self.peers if (peer["address"] + str(peer["port"])) != cleanse]
# Append the peer as-if its a new peer
self.peers.append({
"address": str(address),
"port": int(port),
"lastConnect": str(datetime.now()),
"failedPings": 0,
"online": True,
"epochTime": epochTime,
"personalTime": personalTime
})
'''
Perform all pushes to peers
- Should only be called by this thread's run loop
'''
def __pushEventsToPeers(self):
while not self.pushToPeers.empty():
# Get an event from the queue, and break into its pieces
peerEvent = self.pushToPeers.get()
_peer = peerEvent[0]
_event = peerEvent[1]
_data = peerEvent[2]
'''
Build list of peers that will be receiving the information
'''
outgoing = []
if _peer == "_ALL_":
outgoing = [peer for peer in self.peers if peer["address"] != "127.0.0.1"]
else:
''' Find the peer to ensure it exists - Here we accept sending to local address '''
for _p in self.peers:
if _p["address"] == _peer:
outgoing.append(_p)
if len(outgoing) == 0:
self.logger.info("<Nexus unable to locate peer [{}] in peer list>".format(_peer))
if not self.supressText:
cout("fail", ("<Nexus unable to locate peer [{}] in peer list>".format(_peer)))
return
'''
Push the data to the selected peers
- Handle all self.peerQueueEvent
'''
for _p in outgoing:
'''
Ping Request
'''
if _event == self.peerQueueEvent["ping_peers"]:
query(_p["address"], _p["port"], "*", self.logger)
# Need to add all self.peerQueueEvent items here
'''
Data Propagation Request
'''
if _event == self.peerQueueEvent["data_propagate"]:
if _data == None:
self.logger.warning("Propagation of 'None' data triggered")
else:
self.logger.info("Propagating data")
if _p["address"] != "127.0.0.1":
query(_p["address"], _p["port"], _data, self.logger)
self.propagated = _data
# Indicate that the task from the queue is compelete
self.pushToPeers.task_done()
'''
Trigger shutdown
'''
def triggerShutdown(self):
self.signal = False
# We are at the end of the run, write out active nodes
try:
open("dpeer/known_nodes.json", "w+").write(dumps(self.peers, indent=4))
except IOError:
cout("fail", "Unable to save TX to file.")
'''
Request handler
- Handles all incomming requests from other peers
- Need to send back string-type even on failures
- The events handled here are listed in :
Documentation -> RequestProtocol.md
'''
def handle(self, data):
'''
Ping received - Send back a pong
'''
if data == b'*':
return "___PONG___"
'''
Split the data into chunks, each request should follow the
request protocol chunking defined in 'RequestProtocol.md'
'''
original_data = data
data = data.decode()
data = data.split('\n')
if len(data) < 3:
return("_INVALID_DATA_CHUNK_")
# Ensure that we don't repeat ourselves
if original_data in self.propagated:
self.logger.info("Handler got request to propagate previously propageted data")
return("_PREVIOUSLY_PROPAGETED_")
'''
Information Pushed (IP) - Broadcast received
'''
if data[0] == "broadcast":
'''
IP.0
'''
if data[1] == "new_client":
# Ensure there isn't anything silly with \n happening
ensured = ''.join(data[2:])
try:
ensured = loads(ensured)
except:
return ("_INVALID_UNABLE_TO_LOADS_DATA_")
try:
addr = ensured["address"]
prt = ensured["port"]
except:
return("_INVALID_JSON_DATA_")
# Attempt to ping the peer they asked us to add. If we can't reach them, we wont try
res = None or query(addr, prt, "*", self.logger)
if res is None or res == -1:
return ("_CANT_REACH_GIVEN_PEER_")
try:
self.addNewPeer(ensured["address"], ensured["port"], ensured["epochTime"], ensured["personalTime"])
except:
return("_INVALID_JSON_DATA_")
# Propagate peer to network
self.addItemToPeerQueue(self.peerQueueEvent["data_propagate"], "_ALL_", original_data)
return("_REGISTERED_")
'''
IP.1
'''
if data[1] == "new_transaction":
# Add to mempool, and propagate
_memaddresult = self.memPool.insert_transaction(''.join(data[2:]))
if _memaddresult == "_NEW_TX_CREATED_":
self.addItemToPeerQueue(self.peerQueueEvent["data_propagate"], "_ALL_", original_data)
return _memaddresult
'''
Synchronization Request (SR)
'''
if data[0] == "synchronize":
print("\nA synchronization request was picked up!")
'''
Information Request (IR)
'''
if data[0] == "information":
''' IR.0 '''
if data[1] == "timestamp":
return str(datetime.now())
''' IR.1 '''
if data[1] == "numBlocks":
return str(self.chain.head)
''' IR.2 '''
if data[1] == "numPeers":
return str(len(self.peers))
''' IR.3 '''
if data[1] == "numPool":
temp = self.memPool.request_pool_size()
''' IR.4 '''
if data[1] == "uptime":
return str(self.uptime)
cout("fail", "\tNEXUS->Handle->data:")
cout("lightgreen", data)
'''
If we reach the bottom, that means there was an issue with the
data, return something to indicate the issue
'''
return "_NULL_"
'''
Sync the EPOCH time and network time with peers
'''
def synchronizeEpoch(self):
cout("fail", "NEED TO SYNC WITH PEERS ON NEXUS LAUNCH - NYP")
if not self.supressText and self.prev_epoch is None and self.next_epoch is None:
cout("yellow", "...Attempting to sync from previously known nodes...")
# Nodes to attempt conenct with
attempt = []
# Attempt ping
for peer in self.peers:
res = None or query(peer["address"], peer["port"], "*", self.logger, timeout=2)
if res is None or res == -1:
attempt_sync = False
if not self.supressText:
print(
ctxt("fail", "Failed to ping ["),
ctxt("yellow", (peer["address"] + "@" + str(peer["port"]))),
ctxt("fail", "] - Wont attempt to sync")
)
else:
attempt_sync = True
if attempt_sync and peer["address"] != "127.0.0.1":
if not self.supressText:
cout("yellow", "Adding external peer to attempt sync with")
attempt.append[peer]
if not self.supressText:
cout("yellow", ("Ping-scan for sync complete, " + str(len(attempt)) + " nodes to attempt sync with"))
# We are either starting alone, or we were cut off from the network
if len(attempt) == 0:
self.next_epoch = None
return
# Grab random peer that is available to sync with
sysRand = SystemRandom()
peer_to_sync = sysRand.choice(attempt)
print("NOT YET DONE ------ ")
print("Sync with: ", peer_to_sync)
'''
Epoch triggered, perform mining process
'''
def performCurrentEpoch(self):
self.epochs_passed += 1
return
'''
Main running loop
'''
def run(self):
cout("lightgreen", "> Peer Nexus Online <")
# If we dont sync on launch, this will be true
if self.next_epoch is None and self.prev_epoch is None:
print(
ctxt("fail", "(No peers discovered)") +
ctxt("yellow", " . Assuming pillar status at [") +
ctxt("cyan", str(datetime.now())) +
ctxt("yellow", "]")
)
self.prev_epoch = datetime.utcnow()
self.next_epoch = self.prev_epoch + timedelta(seconds=__NEXUS__BASE_EPOCH_SECONDS__)
while self.signal:
'''
Ensure list of propagated data isn't getting out of hand
'''
if len(self.propagated) > __NEXUS__PROP_HISTORY_LENGTH__:
self.propagated = self.propagated[__NEXUS__PROP_HISTORY_CUT__:]
'''
Ping all online peers to ensure connectivity
'''
for peer in self.peers:
res = None or query(peer["address"], peer["port"], "*", self.logger, timeout=__NEXUS__PING_TIMEOUT__)
if res is None or res == -1:
peer["failedPings"] += 1
else:
peer["online"] = True
if peer["failedPings"] > __NEXUS__PING_CAP__:
peer["online"] = False
# Indicate their change in status
if not self.supressText and __NEXUS__SHOW_PEER_DROPS__:
print(
ctxt("fail", "Peer ["), ctxt("yellow", peer["address"]),
ctxt("fail", "] has gone offline.")
)
self.logger.info(("Peer [" + peer["address"] + "] went offline"))
# Only keep online peers
self.peers = [peer for peer in self.peers if peer["online"]]
'''
Push events to all online peers
'''
if not self.pushToPeers.empty():
self.__pushEventsToPeers()
'''
Check for EPOCH
'''
if self.next_epoch <= datetime.utcnow():
# Perofrm the current epoch
self.performCurrentEpoch()
# Once we have mined the block, and ensured changes set, set next epoch
self.prev_epoch = self.next_epoch
self.next_epoch = datetime.utcnow()+ timedelta(seconds=__NEXUS__BASE_EPOCH_SECONDS__)
# No, the CPU doesn't like grindcore
sleep(0.1)
|
DigitalHills/Esse
|
dpeer/nexus.py
|
Python
|
apache-2.0
| 17,520
| 0.00468
|
'''
Appresponse module contains ApplicationResponse and FileDescriptor
classes which are used to parse response.
Usage:
>>> response = ApplicationResponse(xml_message)
>>> response.is_accepted() # Checks was the request accepted
'''
import os
import base64
import gzip
import logging
from lxml import etree
try:
from bankws.signature import validate
except ImportError:
from signature import validate
class ApplicationResponse():
""" ApplicationResponse class is used to parse certificate responses
Public methods::
is_accepted: Checks if request was accepted (responsecode 00)
content: Returns content of message
references: Returns filereferences list.
@type _customerid: string
@ivar _customerid: Customer that send request.
@type _timestamp: string
@ivar _timestamp: Time and date when Application Response header was made.
@type _responsecode: string
@ivar _responsecode: Result of the request.
@type _responsetext: string
@ivar _responsetext: Human readable text telling meaning of response code.
@type _executionserial: string
@ivar _executionserial: Unique identifier for operation. [0..1]
@type _encrypted: boolean
@ivar _encrypted: Is content encrypted.
@type _encryptionmethod: string
@ivar _encryptionmethod: Name of the encryption algorithm.
@type _compressed: boolean
@ivar _compressed: Is content compressed.
@type _compressionmethod: string
@ivar _compressionmethod: Name of the compression algorithm.
@type _amounttotal: string
@ivar _amounttotal: Total sum of amounts in request.
@type _transactioncount: string
@ivar _transactioncount: Total number of transactions in the data.
@type _customerextension: Element
@ivar _customerextension: Extensions for schema.
@type _file_descriptors: List<FileDescriptor>
@ivar _file_descriptors: List of files founded from bank.
@type _filetype: string
@ivar _filetype: Type of the file.
@type _content: string
@ivar _content: Content of response (Usually empty, used in downloadfile
and schema validation error responses.)
"""
def __init__(self, message):
"""
Initializes ApplicationResponse class.
@type message: string
@param message: ApplicationResponse xml-message.
@raise ValueError: If message doesn't follow xml schema or
signature is invalid.
"""
self.logger = logging.getLogger("bankws")
self._accepted = True
# validate using schema
if not self._validate_with_schema(message):
# Some errors return invalid xml.
self.logger.error("Message doesn't follow schema.")
self._accepted = False
# raise ValueError('Failed to validate against schema')
# Check signature
if not validate(message):
raise ValueError('Failed to verify signature')
descriptors = None
self._content = None
tree = etree.fromstring(message)
# Parse elements from tree to variables.
for element in tree.iter():
if element.tag == "{http://bxd.fi/xmldata/}CustomerId":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerid = element.text
if element.tag == "{http://bxd.fi/xmldata/}Timestamp":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._timestamp = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseCode":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsecode = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseText":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsetext = element.text
if element.tag == "{http://bxd.fi/xmldata/}ExecutionSerial":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._executionserial = element.text
if element.tag == "{http://bxd.fi/xmldata/}Encrypted":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
self._encrypted = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}EncryptionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._encryptionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}Compressed":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
if value == '1':
value = 'true'
self._compressed = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}CompressionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._compressionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}AmountTotal":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._amounttotal = element.text
if element.tag == "{http://bxd.fi/xmldata/}TransactionCount":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._transactioncount = element.text
if element.tag == "{http://bxd.fi/xmldata/}CustomerExtension":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerextension = element
if element.tag == "{http://bxd.fi/xmldata/}FileDescriptors":
descriptors = element
if element.tag == "{http://bxd.fi/xmldata/}FileType":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._filetype = element.text
if element.tag == "{http://bxd.fi/xmldata/}Content":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
bytestring = bytes(element.text, 'utf-8')
self._content = base64.b64decode(bytestring)
# Parse filedescriptors
if descriptors is not None:
self._file_descriptors = []
for descriptor in descriptors:
fd = FileDescriptor()
for element in descriptor.iter():
if element.tag == "{http://bxd.fi/xmldata/}FileReference":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.reference = element.text
if element.tag == "{http://bxd.fi/xmldata/}TargetId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.target = element.text
if element.tag == "{http://bxd.fi/xmldata/}ServiceId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceid = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ServiceIdOwnerName"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceidownername = element.text
if element.tag == "{http://bxd.fi/xmldata/}UserFilename":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.userfilename = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ParentFileReference"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.parentfile = element.text
if element.tag == ("{http://bxd.fi/xmldata/}FileType"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.filetype = element.text
if element.tag == "{http://bxd.fi/xmldata/}FileTimestamp":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.timestamp = element.text
if element.tag == "{http://bxd.fi/xmldata/}Status":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.status = element.text
self._file_descriptors.append(fd)
def is_accepted(self):
""" Was applicationrequest accepted or not.
@rtype: boolean
@return: True if response code was 00 (OK)
"""
try:
if self._responsecode != "00" or self._accepted == False:
self.logger.error(
"ApplicationResponse:{0}:{1}".format(self._responsecode,
self._responsetext)
)
return False
return True
except AttributeError as e:
self.logger.exception(e)
self.logger.error("Unable to find responsecode and response text.")
return False
def _get_content(self):
""" Returns content of xml string in clear text
@rtype: string or None
@return: Data saved to content field.
"""
data = ""
try:
if self._compressed is True:
if self._get_compressionmethod() != None:
if self._get_compressionmethod() == "RFC1952":
data = gzip.decompress(bytes(self._content))
else:
raise TypeError("Unsupported compression method")
else:
data = gzip.decompress(bytes(self._content))
else:
data = self._content
return str(data, 'utf-8')
except AttributeError:
return self._content
content = property(_get_content)
def _get_compressionmethod(self):
""" Returns compression method used
@rtype: string or None
@return: Compression algorithm.
"""
try:
if self._compressed is True:
return self._compressionmethod
except AttributeError:
return None
def _get_filedescriptors(self):
""" Returns list containing file descriptors.
@rtype: list<L{FileDescriptor}>
@return: FileDescriptors found from message.
"""
return self._file_descriptors
references = property(_get_filedescriptors)
def _validate_with_schema(self, xml_string):
""" Validates given xml string against xml schema.
@type xml_string: string
@param xml_string: Xml string to be validated.
@rtype: boolean
@return: Is string valid against schema or not.
"""
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"resources/ApplicationResponse_20080918.xsd"
)
schema_doc = etree.parse(path)
xml_schema = etree.XMLSchema(schema_doc)
try:
doc = etree.fromstring(xml_string)
except etree.XMLSyntaxError:
self.logger.error("Invalid XML-data.")
return False
try:
xml_schema.assertValid(doc)
except etree.DocumentInvalid as e:
self.logger.error(e)
return xml_schema.validate(doc)
class FileDescriptor():
"""
FileDescriptor class holds data that can be found under Filedescriptor tag.
@type _filerefence: string
@ivar _filerefence: Unique identifier for file (given by the bank)
@type _target: string
@ivar _target: Name of the folder where file is stored in bank.
@type _serviceid: string
@ivar _serviceid: Additional identification information of the customer.
@type _serviceidownername: string
@ivar _serviceidownername: Owner of the service identified by ServiceId.
@type _userfilename: string
@ivar _userfilename: Filename that user gave in uploadfile operation.
@type _parentfile: string
@ivar _parentfile: File reference to file which this file is related.
@type _filetype: string
@ivar _filetype: Type of the file.
@type _timestamp: string (ISODateTime)
@ivar _timestamp: Moment of file creation in the banks system.
@type _status: string
@ivar _status: State of file processing.
"""
def __init__(self):
""" Initializes FileDescriptor class. """
self._reference = ""
self._target = ""
self._serviceid = ""
self._serviceidownername = ""
self._userfilename = ""
self._parentfile = ""
self._filetype = ""
self._timestamp = ""
self._status = ""
def __str__(self):
ret_val = "".join(["{}: {}\n".format(key[1:].title(), value)
for key, value in self.__dict__.items()])
return ret_val
def _set_reference(self, reference):
self._reference = reference
def _get_reference(self):
return self._reference
reference = property(_get_reference, _set_reference)
def _set_target(self, target_folder):
self._target = target_folder
def _get_target(self):
return self._target
target = property(_get_target, _set_target)
def _set_serviceid(self, serviceid):
self._serviceid = serviceid
def _get_serviceid(self):
return self._serviceid
serviceid = property(_get_serviceid, _set_serviceid)
def _set_serviceidowner(self, name):
self._serviceidownername = name
def _get_serviceidowner(self):
return self._serviceidownername
serviceidownername = property(_get_serviceidowner, _set_serviceidowner)
def _set_userfilename(self, filename):
self._userfilename = filename
def _get_userfilename(self):
return self._userfilename
userfilename = property(_get_userfilename, _set_userfilename)
def _set_parent(self, parent_id):
self._parentfile = parent_id
def _get_parent(self):
return self._parentfile
parentfile = property(_get_parent, _set_parent)
def _set_filetype(self, filetype):
self._filetype = filetype
def _get_filetype(self):
return self._filetype
filetype = property(_get_filetype, _set_filetype)
def _set_filetimestamp(self, timestamp):
self._timestamp = timestamp
def _get_filetimestamp(self):
return self._timestamp
timestamp = property(_get_filetimestamp, _set_filetimestamp)
def _set_status(self, status):
self._status = status
"""codes = ["WFP", "WFC", "FWD", "DLD", "DEL", "NEW", "KIN"]
if status in codes:
self._status = status
else:
raise ValueError("Unknown code")"""
def _get_status(self):
return self._status
status = property(_get_status, _set_status)
|
luojus/bankws
|
bankws/appresponse.py
|
Python
|
mit
| 15,773
| 0.000127
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice, AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def usage(n):
sys.stderr.write("Usage: " + n + " port...\n")
def run(args, communicator):
ports = []
for arg in args[1:]:
if arg[0] == '-':
sys.stderr.write(args[0] + ": unknown option `" + arg + "'\n")
usage(args[0])
return False
ports.append(int(arg))
if len(ports) == 0:
sys.stderr.write(args[0] + ": no ports specified\n")
usage(args[0])
return False
try:
AllTests.allTests(communicator, ports)
except:
traceback.print_exc()
test(False)
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
#
# This test aborts servers, so we don't want warnings.
#
initData.properties.setProperty('Ice.Warn.Connections', '0')
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
joshmoore/zeroc-ice
|
py/test/Ice/faultTolerance/Client.py
|
Python
|
gpl-2.0
| 1,608
| 0.006219
|
input_name = '../examples/linear_elasticity/linear_viscoelastic.py'
output_name_trunk = 'test_linear_viscoelastic'
from tests_basic import TestInputEvolutionary
class Test(TestInputEvolutionary):
pass
|
RexFuzzle/sfepy
|
tests/test_input_linear_viscoelastic.py
|
Python
|
bsd-3-clause
| 207
| 0.009662
|
#!/usr/bin/env python3
"""
Item Related Objects.
"""
#-*- coding: utf-8 -*-
import re
from datetime import datetime
from db_related import DBConnect
class Item_Lookup(object):
"""
Returned Item Lookup Dictionary Structure:
item = {
upc: text
description: text
cost: decimal
price: decimal
taxable: True or False
on_hand_qty: decimal
stx: decimal
}
"""
def __init__(self, upc):
self.upc = upc
def GetBasics(self):
query = '''SELECT upc, description, cost, retail, taxable, onhandqty
FROM item_detailed
WHERE upc=(?)'''
data = [self.upc,]
returnd = DBConnect(query, data).ALL()
|
fuspu/RHP-POS
|
item_utils.py
|
Python
|
mit
| 837
| 0.009558
|
# coding: utf-8
import sys
from collections import defaultdict
sys.path.append('/project/nakamura-lab01/Work/yusuke-o/python')
from data.reader import dlmread
def addfeature(fs, fid, name, mode):
if mode == 'dev' or name in fid:
fs.append(fid[name])
def main():
if len(sys.argv) != 6:
print('USAGE: python3 makeliblin_greedy.py \\')
print(' <str: mode ("dev" or "test")>')
print(' <in-file: input sentence with POS> \\')
print(' <in-file: splitter table> \\')
print(' <(dev)out-file, (test)in-file: feature ID table> \\')
print(' <out-file: LIBLINEAR input data>')
return
mode = sys.argv[1]
fname_pos = sys.argv[2]
fname_splitter = sys.argv[3]
fname_fid = sys.argv[4]
fname_liblin = sys.argv[5]
if mode not in ['dev', 'test']:
sys.stderr.write('ERROR: unknown mode.\n')
return
# load word and pos
corpus_in_pos = [x for x in dlmread(fname_pos, ' ')]
for i in range(len(corpus_in_pos)):
corpus_in_pos[i] = [w.split('_') for w in corpus_in_pos[i]]
# load splitter
tab_sp = defaultdict(lambda: [])
with open(fname_splitter, 'r', encoding='utf-8') as fp:
for l in fp:
lineno, wordno = tuple(int(x) for x in l.strip().split(' '))
tab_sp[lineno].append(wordno)
# load or new feature id table
fid = defaultdict(lambda: len(fid)+1)
if mode == 'test':
with open(fname_fid, 'r', encoding='utf-8') as fp:
for l in fp:
ls = l.split()
k = ls[0]
v = int(ls[1])
fid[k] = v
# make/save training data
n = 0
with open(fname_liblin, 'w', encoding='utf-8') as fp:
for i in range(len(corpus_in_pos)):
data = [['<s>', '<s>']] * 2 + corpus_in_pos[i] + [['</s>', '</s>']] * 2
for j in range(len(data)-5): # ignore end of sentence
jj = j+2
features = []
# unigram words
# addfeature(features, fid, 'WORD[-2]=%s' % data[jj-2][0], mode)
addfeature(features, fid, 'WORD[-1]=%s' % data[jj-1][0], mode)
addfeature(features, fid, 'WORD[0]=%s' % data[jj+0][0], mode)
addfeature(features, fid, 'WORD[+1]=%s' % data[jj+1][0], mode)
addfeature(features, fid, 'WORD[+2]=%s' % data[jj+2][0], mode)
# unigram POSes
# addfeature(features, fid, 'POS[-2]=%s' % data[jj-2][1], mode)
addfeature(features, fid, 'POS[-1]=%s' % data[jj-1][1], mode)
addfeature(features, fid, 'POS[0]=%s' % data[jj+0][1], mode)
addfeature(features, fid, 'POS[+1]=%s' % data[jj+1][1], mode)
addfeature(features, fid, 'POS[+2]=%s' % data[jj+2][1], mode)
# bigram words
# addfeature(features, fid, 'WORD[-2:-1]=%s_%s' % (data[jj-2][0], data[jj-1][0]), mode)
addfeature(features, fid, 'WORD[-1:0]=%s_%s' % (data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[0:+1]=%s_%s' % (data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[+1:+2]=%s_%s' % (data[jj+1][0], data[jj+2][0]), mode)
# bigram POSes
# addfeature(features, fid, 'POS[-2:-1]=%s_%s' % (data[jj-2][1], data[jj-1][1]), mode)
addfeature(features, fid, 'POS[-1:0]=%s_%s' % (data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[0:+1]=%s_%s' % (data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[+1:+2]=%s_%s' % (data[jj+1][1], data[jj+2][1]), mode)
# trigram words
# addfeature(features, fid, 'WORD[-2:0]=%s_%s_%s' % (data[jj-2][0], data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[-1:+1]=%s_%s_%s' % (data[jj-1][0], data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[0:+2]=%s_%s_%s' % (data[jj+0][0], data[jj+1][0], data[jj+2][0]), mode)
# trigram POSes
# addfeature(features, fid, 'POS[-2:0]=%s_%s_%s' % (data[jj-2][1], data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[-1:+1]=%s_%s_%s' % (data[jj-1][1], data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[0:+2]=%s_%s_%s' % (data[jj+0][1], data[jj+1][1], data[jj+2][1]), mode)
line = '1 ' if j in tab_sp[i] else '2 '
line += ' '.join('%d:1'%f for f in sorted(features))
fp.write(line+'\n')
n += 1
# save feature id table
if mode == 'dev':
with open(fname_fid, 'w', encoding='utf-8') as fp:
for k, v in fid.items():
fp.write('%s\t%d\n' % (k, v))
if __name__ == '__main__':
main()
|
ahclab/greedyseg
|
makeliblin.py
|
Python
|
gpl-2.0
| 4,282
| 0.02639
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field',
'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.creation.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.rel.to
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(**kwargs)
def get_internal_type(self):
return "UUIDField"
def get_prep_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
if isinstance(value, six.string_types):
return value.replace('-', '')
return value
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
|
andyzsf/django
|
django/db/models/fields/__init__.py
|
Python
|
bsd-3-clause
| 84,287
| 0.000641
|
import numpy as np
from numpy.testing import (run_module_suite,
assert_,
assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_raises)
from dipy.align.streamlinear import (compose_matrix44,
decompose_matrix44,
BundleSumDistanceMatrixMetric,
BundleMinDistanceMatrixMetric,
BundleMinDistanceMetric,
StreamlineLinearRegistration,
StreamlineDistanceMetric)
from dipy.tracking.streamline import (center_streamlines,
unlist_streamlines,
relist_streamlines,
transform_streamlines,
set_number_of_points)
from dipy.core.geometry import compose_matrix
from dipy.data import get_data, two_cingulum_bundles
from nibabel import trackvis as tv
from dipy.align.bundlemin import (_bundle_minimum_distance_matrix,
_bundle_minimum_distance,
distance_matrix_mdf)
def simulated_bundle(no_streamlines=10, waves=False, no_pts=12):
t = np.linspace(-10, 10, 200)
# parallel waves or parallel lines
bundle = []
for i in np.linspace(-5, 5, no_streamlines):
if waves:
pts = np.vstack((np.cos(t), t, i * np.ones(t.shape))).T
else:
pts = np.vstack((np.zeros(t.shape), t, i * np.ones(t.shape))).T
pts = set_number_of_points(pts, no_pts)
bundle.append(pts)
return bundle
def fornix_streamlines(no_pts=12):
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
return streamlines
def evaluate_convergence(bundle, new_bundle2):
pts_static = np.concatenate(bundle, axis=0)
pts_moved = np.concatenate(new_bundle2, axis=0)
assert_array_almost_equal(pts_static, pts_moved, 3)
def test_rigid_parallel_lines():
bundle_initial = simulated_bundle()
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([20, 0, 10, 0, 40, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
options = {'maxcor': 100, 'ftol': 1e-9, 'gtol': 1e-16, 'eps': 1e-3}
srr = StreamlineLinearRegistration(metric=bundle_sum_distance,
x0=np.zeros(6),
method='L-BFGS-B',
bounds=None,
options=options)
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_rigid_real_bundles():
bundle_initial = fornix_streamlines()[:20]
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([0, 0, 20, 45., 0, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_sum_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
bundle_min_distance = BundleMinDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_min_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
def test_rigid_partial_real_bundles():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
moving_center, shift2 = center_streamlines(moving)
print(shift2)
mat = compose_matrix(translate=np.array([0, 0, 0.]),
angles=np.deg2rad([40, 0, 0.]))
moved = transform_streamlines(moving_center, mat)
srr = StreamlineLinearRegistration()
srm = srr.optimize(static_center, moved)
print(srm.fopt)
print(srm.iterations)
print(srm.funcs)
moving_back = srm.transform(moved)
print(srm.matrix)
static_center = set_number_of_points(static_center, 100)
moving_center = set_number_of_points(moving_back, 100)
vol = np.zeros((100, 100, 100))
spts = np.concatenate(static_center, axis=0)
spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])
mpts = np.concatenate(moving_center, axis=0)
mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])
for index in spts:
i, j, k = index
vol[i, j, k] = 1
vol2 = np.zeros((100, 100, 100))
for index in mpts:
i, j, k = index
vol2[i, j, k] = 1
overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))
assert_equal(overlap * 100 > 40, True)
def test_stream_rigid():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
mat = compose_matrix44([0, 0, 0, 0, 40, 0])
moving = transform_streamlines(moving, mat)
srr = StreamlineLinearRegistration()
sr_params = srr.optimize(static, moving)
moved = transform_streamlines(moving, sr_params.matrix)
srr = StreamlineLinearRegistration(verbose=True)
srm = srr.optimize(static, moving)
moved2 = transform_streamlines(moving, srm.matrix)
moved3 = srm.transform(moving)
assert_array_almost_equal(moved[0], moved2[0], decimal=3)
assert_array_almost_equal(moved2[0], moved3[0], decimal=3)
def test_min_vs_min_fast_precision():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
static = [s.astype('f8') for s in static]
moving = [m.astype('f8') for m in moving]
bmd = BundleMinDistanceMatrixMetric()
bmd.setup(static, moving)
bmdf = BundleMinDistanceMetric()
bmdf.setup(static, moving)
x_test = [0.01, 0, 0, 0, 0, 0]
print(bmd.distance(x_test))
print(bmdf.distance(x_test))
assert_equal(bmd.distance(x_test), bmdf.distance(x_test))
def test_same_number_of_points():
A = [np.random.rand(10, 3), np.random.rand(20, 3)]
B = [np.random.rand(21, 3), np.random.rand(30, 3)]
C = [np.random.rand(10, 3), np.random.rand(10, 3)]
D = [np.random.rand(20, 3), np.random.rand(20, 3)]
slr = StreamlineLinearRegistration()
assert_raises(ValueError, slr.optimize, A, B)
assert_raises(ValueError, slr.optimize, C, D)
assert_raises(ValueError, slr.optimize, C, B)
def test_efficient_bmd():
a = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
streamlines = [a, a + 2, a + 4]
points, offsets = unlist_streamlines(streamlines)
points = points.astype(np.double)
points2 = points.copy()
D = np.zeros((len(offsets), len(offsets)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
assert_equal(np.sum(np.diag(D)), 0)
points2 += 2
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
streamlines2 = relist_streamlines(points2, offsets)
D2 = distance_matrix_mdf(streamlines, streamlines2)
assert_array_almost_equal(D, D2)
cols = D2.shape[1]
rows = D2.shape[0]
dist = 0.25 * (np.sum(np.min(D2, axis=0)) / float(cols) +
np.sum(np.min(D2, axis=1)) / float(rows)) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets),
a.shape[0])
assert_almost_equal(dist, dist2)
def test_openmp_locks():
static = []
moving = []
pts = 20
for i in range(1000):
s = np.random.rand(pts, 3)
static.append(s)
moving.append(s + 2)
moving = moving[2:]
points, offsets = unlist_streamlines(static)
points2, offsets2 = unlist_streamlines(moving)
D = np.zeros((len(offsets), len(offsets2)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets2),
pts, D)
dist1 = 0.25 * (np.sum(np.min(D, axis=0)) / float(D.shape[1]) +
np.sum(np.min(D, axis=1)) / float(D.shape[0])) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets2),
pts)
assert_almost_equal(dist1, dist2, 6)
def test_from_to_rigid():
t = np.array([10, 2, 3, 0.1, 20., 30.])
mat = compose_matrix44(t)
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(t, vec)
t = np.array([0, 0, 0, 180, 0., 0.])
mat = np.eye(4)
mat[0, 0] = -1
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(-t, vec)
def test_matrix44():
assert_raises(ValueError, compose_matrix44, np.ones(5))
assert_raises(ValueError, compose_matrix44, np.ones(9))
assert_raises(ValueError, compose_matrix44, np.ones(16))
def test_abstract_metric_class():
class DummyStreamlineMetric(StreamlineDistanceMetric):
def test():
pass
assert_raises(TypeError, DummyStreamlineMetric)
def test_evolution_of_previous_iterations():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
moving = [m + np.array([10., 0., 0.]) for m in moving]
slr = StreamlineLinearRegistration(evolution=True)
from dipy.core.optimize import SCIPY_LESS_0_12
if not SCIPY_LESS_0_12:
slm = slr.optimize(static, moving)
assert_equal(len(slm.matrix_history), slm.iterations)
def test_similarity_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 0, 10, 0, 0, 0, 1.5]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
metric = BundleMinDistanceMatrixMetric()
x0 = np.array([0, 0, 0, 0, 0, 0, 1], 'f8')
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='Powell',
bounds=None,
verbose=False)
slm = slr.optimize(bundle, bundle2)
new_bundle2 = slm.transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_affine_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 4, 2, 0, 10, 10, 1.2, 1.1, 1., 0., 0.2, 0.]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])
x = 25
bounds = [(-x, x), (-x, x), (-x, x),
(-x, x), (-x, x), (-x, x),
(0.1, 1.5), (0.1, 1.5), (0.1, 1.5),
(-1, 1), (-1, 1), (-1, 1)]
options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5, 'eps': 1e-8}
metric = BundleMinDistanceMatrixMetric()
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='L-BFGS-B',
bounds=bounds,
verbose=True,
options=options)
slm = slr.optimize(bundle, bundle2)
new_bundle2 = slm.transform(bundle2)
slr2 = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='Powell',
bounds=None,
verbose=True,
options=None)
slm2 = slr2.optimize(bundle, new_bundle2)
new_bundle2 = slm2.transform(new_bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_vectorize_streamlines():
cingulum_bundles = two_cingulum_bundles()
cb_subj1 = cingulum_bundles[0]
cb_subj1 = set_number_of_points(cb_subj1, 10)
cb_subj1_pts_no = np.array([s.shape[0] for s in cb_subj1])
assert_equal(np.all(cb_subj1_pts_no == 10), True)
def test_x0_input():
for x0 in [6, 7, 12, "Rigid", 'rigid', "similarity", "Affine"]:
StreamlineLinearRegistration(x0=x0)
for x0 in [np.random.rand(6), np.random.rand(7), np.random.rand(12)]:
StreamlineLinearRegistration(x0=x0)
for x0 in [8, 20, "Whatever", np.random.rand(20), np.random.rand(20, 3)]:
assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)
x0 = np.random.rand(4, 3)
assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)
x0_6 = np.zeros(6)
x0_7 = np.array([0, 0, 0, 0, 0, 0, 1.])
x0_12 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])
x0_s = [x0_6, x0_7, x0_12, x0_6, x0_7, x0_12]
for i, x0 in enumerate([6, 7, 12, "Rigid", "similarity", "Affine"]):
slr = StreamlineLinearRegistration(x0=x0)
assert_equal(slr.x0, x0_s[i])
def test_compose_decompose_matrix44():
for i in range(20):
x0 = np.random.rand(12)
mat = compose_matrix44(x0[:6])
assert_array_almost_equal(x0[:6], decompose_matrix44(mat, size=6))
mat = compose_matrix44(x0[:7])
assert_array_almost_equal(x0[:7], decompose_matrix44(mat, size=7))
mat = compose_matrix44(x0[:12])
assert_array_almost_equal(x0[:12], decompose_matrix44(mat, size=12))
assert_raises(ValueError, decompose_matrix44, mat, 20)
def test_cascade_of_optimizations_and_threading():
cingulum_bundles = two_cingulum_bundles()
cb1 = cingulum_bundles[0]
cb1 = set_number_of_points(cb1, 20)
test_x0 = np.array([10, 4, 3, 0, 20, 10, 1.5, 1.5, 1.5, 0., 0.2, 0])
cb2 = transform_streamlines(cingulum_bundles[0],
compose_matrix44(test_x0))
cb2 = set_number_of_points(cb2, 20)
print('first rigid')
slr = StreamlineLinearRegistration(x0=6, num_threads=1)
slm = slr.optimize(cb1, cb2)
print('then similarity')
slr2 = StreamlineLinearRegistration(x0=7, num_threads=2)
slm2 = slr2.optimize(cb1, cb2, slm.matrix)
print('then affine')
slr3 = StreamlineLinearRegistration(x0=12, options={'maxiter': 50},
num_threads=None)
slm3 = slr3.optimize(cb1, cb2, slm2.matrix)
assert_(slm2.fopt < slm.fopt)
assert_(slm3.fopt < slm2.fopt)
if __name__ == '__main__':
run_module_suite()
|
StongeEtienne/dipy
|
dipy/align/tests/test_streamlinear.py
|
Python
|
bsd-3-clause
| 15,316
| 0
|
import torch
def rmsprop(opfunc, x, config, state=None):
""" An implementation of RMSprop
ARGS:
- 'opfunc' : a function that takes a single input (X), the point
of a evaluation, and returns f(X) and df/dX
- 'x' : the initial point
- 'config` : a table with configuration parameters for the optimizer
- 'config['learningRate']' : learning rate
- 'config['alpha']' : smoothing constant
- 'config['epsilon']' : value with which to initialise m
- 'config['weightDecay']' : weight decay
- 'state' : a table describing the state of the optimizer;
after each call the state is modified
- 'state['m']' : leaky sum of squares of parameter gradients,
- 'state['tmp']' : and the square root (with epsilon smoothing)
RETURN:
- `x` : the new x vector
- `f(x)` : the function, evaluated before the update
"""
# (0) get/update state
if config is None and state is None:
raise ValueError("rmsprop requires a dictionary to retain state between iterations")
state = state if state is not None else config
lr = config.get('learningRate', 1e-2)
alpha = config.get('alpha', 0.99)
epsilon = config.get('epsilon', 1e-8)
wd = config.get('weightDecay', 0)
# (1) evaluate f(x) and df/dx
fx, dfdx = opfunc(x)
# (2) weight decay
if wd != 0:
dfdx.add_(wd, x)
# (3) initialize mean square values and square gradient storage
if 'm' not in state:
state['m'] = x.new().resize_as_(dfdx).zero_()
state['tmp'] = x.new().resize_as_(dfdx)
# (4) calculate new (leaky) mean squared values
state['m'].mul_(alpha)
state['m'].addcmul_(1.0 - alpha, dfdx, dfdx)
# (5) perform update
torch.sqrt(state['m'], out=state['tmp']).add_(epsilon)
x.addcdiv_(-lr, dfdx, state['tmp'])
# return x*, f(x) before optimization
return x, fx
|
RPGOne/Skynet
|
pytorch-master/torch/legacy/optim/rmsprop.py
|
Python
|
bsd-3-clause
| 2,014
| 0.001986
|
from .nuimo import ControllerManager, ControllerManagerListener, Controller, ControllerListener, GestureEvent, Gesture, LedMatrix
|
getsenic/nuimo-linux-python
|
nuimo/__init__.py
|
Python
|
mit
| 130
| 0.007692
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponsePermanentRedirect, Http404, HttpResponseRedirect
from django.views.decorators.http import require_GET
from django.contrib.auth import login, authenticate
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib import messages
from django.views.decorators.cache import cache_page
from .forms import UrlCreateForm
from .models import Url
@cache_page(60 * 60)
@require_GET
def redirect(request, short_code):
"""
Redirects Url
"""
if short_code:
try:
url = Url.objects.get(short_code=short_code)
except Url.DoesNotExist:
raise Http404()
return HttpResponsePermanentRedirect(url.original_url)
def register_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
username = request.POST['username']
password = request.POST['password1']
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, 'User registered and logged in with success.')
return HttpResponseRedirect(reverse_lazy('index'))
else:
context = {'user_register_form': form}
else:
context = {'user_register_form': UserCreationForm()}
return render(request, 'register.html', context)
def user_url_list(user, page, limit=20):
"""
Returns a paginator of a queryset with users Url's.
"""
url_list = Url.objects.filter(user=user)
paginator = Paginator(url_list, limit)
try:
url_list = paginator.page(page)
except PageNotAnInteger:
url_list = paginator.page(1)
except EmptyPage:
url_list = paginator.page(paginator.num_pages)
return url_list
def index(request):
"""
Main View, show form and list Url`s of the authenticated user.
"""
if request.user.is_authenticated():
context = {
# Returns the users ``Url.objects`` QuerySet or None if Anonymous.
'url_list': user_url_list(request.user, request.GET.get('page')),
'absolute_uri': request.build_absolute_uri(),
'user': request.user
}
else:
context = {
'user_login_form': AuthenticationForm(),
'user_register_form': UserCreationForm()
}
if request.method == "POST":
form = UrlCreateForm(request.POST)
if form.is_valid():
form.instance.user = (
request.user if request.user.is_authenticated() else None
)
instance = form.save()
context['short_url'] = request.build_absolute_uri() + instance.short_code
else:
form = UrlCreateForm()
context['change_form'] = form
return render(request, 'index.html', context)
|
ricardodani/django-simple-url-shortner
|
simple_url_shortner/urlshortener/views.py
|
Python
|
gpl-2.0
| 3,074
| 0.001627
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, HttpResponseRedirect
from questionnaire.models import *
from django.shortcuts import render_to_response, get_object_or_404
import sys
from searchengine.models import *
rem = 1
qu = get_object_or_404(Questionnaire, id=rem)
qsets = qu.questionsets()
for qs in qsets:
expected = qs.questions()
for q in expected:
slugs = Slugs.objects.filter(description__exact=q.text)
if len(slugs)!=1:
print "Error (multiple slugs to the description): " + q.number
for s in slugs:
try:
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
except:
print s.slug1 + "| " + str(s.question.pk)
continue
s = slugs[0]
if (s.slug1 != q.slug):
print "Error (slug1!=slug): " + q.number
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
continue
if (s.question.pk!=q.pk):
print "Error (q.pk!=pk): " + q.number
continue
|
bioinformatics-ua/catalogue
|
emif/utils/validate_questionnaire.py
|
Python
|
gpl-3.0
| 1,852
| 0.00432
|
# Copyright 2012 NetApp
# Copyright 2015 Chuck Fouts
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import warnings
from manilaclient.v2 import shares
warnings.warn("Module manilaclient.v1.shares is deprecated (taken as "
"a basis for manilaclient.v2.shares). "
"The preferable way to get a client class or object is to use "
"the manilaclient.client module.")
class MovedModule(object):
def __init__(self, new_module):
self.new_module = new_module
def __getattr__(self, attr):
return getattr(self.new_module, attr)
sys.modules["manilaclient.v1.shares"] = MovedModule(shares)
|
sniperganso/python-manilaclient
|
manilaclient/v1/shares.py
|
Python
|
apache-2.0
| 1,210
| 0.000826
|
# Generated by Django 1.11.13 on 2018-08-14 17:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [
("user", "0001_initial"),
("user", "0002_rename_account_tables"),
("user", "0003_auto_20151226_1110"),
]
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"password",
models.CharField(max_length=128, verbose_name="password"),
),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True,
max_length=254,
verbose_name="email address",
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"db_table": "auth_user",
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name="TeamMember",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("roles", models.CharField(blank=True, max_length=100)),
(
"leader",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="leader",
to=settings.AUTH_USER_MODEL,
),
),
(
"member",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="member",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("about", models.TextField(blank=True, max_length=500)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="teammember",
unique_together=set([("leader", "member")]),
),
]
|
fiduswriter/fiduswriter
|
fiduswriter/user/migrations/0001_squashed_0003_auto_20151226_1110.py
|
Python
|
agpl-3.0
| 6,929
| 0.00101
|
# -*- coding: utf-8 -*-
# © 2012 spirit <hiddenspirit@gmail.com>
# © 2013-2014 Steven Myint
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""LanguageTool through server mode."""
import atexit
import glob
import http.client
import locale
import os
import re
import socket
import sys
import urllib.parse
import urllib.request
from collections import OrderedDict
from functools import total_ordering
from weakref import WeakValueDictionary
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
from .backports import subprocess
from .which import which
__version__ = '1.3.1'
__all__ = ['LanguageTool', 'Error', 'get_languages', 'correct', 'get_version',
'get_directory', 'set_directory']
JAR_NAMES = [
'languagetool-server.jar',
'languagetool-standalone*.jar', # 2.1
'LanguageTool.jar',
'LanguageTool.uno.jar'
]
FAILSAFE_LANGUAGE = 'en'
# https://mail.python.org/pipermail/python-dev/2011-July/112551.html
USE_URLOPEN_RESOURCE_WARNING_FIX = (3, 1) < sys.version_info < (3, 4)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
cache = {}
class Error(Exception):
"""LanguageTool Error."""
class ServerError(Error):
pass
class JavaError(Error):
pass
class PathError(Error):
pass
def get_replacement_list(string, sep='#'):
if isinstance(string, list):
return string
return string.split(sep) if string else []
def auto_type(string):
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return string
@total_ordering
class Match:
"""Hold information about where a rule matches text."""
_SLOTS = OrderedDict([
('fromy', int), ('fromx', int), ('toy', int), ('tox', int),
('ruleId', str), ('subId', str), ('msg', str),
('replacements', get_replacement_list),
('context', str), ('contextoffset', int),
('offset', int), ('errorlength', int),
('url', str), ('category', str), ('locqualityissuetype', str),
])
def __init__(self, attrib):
for k, v in attrib.items():
setattr(self, k, v)
def __repr__(self):
def _ordered_dict_repr():
slots = list(self._SLOTS)
slots += list(set(self.__dict__).difference(slots))
attrs = [slot for slot in slots
if slot in self.__dict__ and not slot.startswith('_')]
return '{{{}}}'.format(
', '.join([
'{!r}: {!r}'.format(attr, getattr(self, attr))
for attr in attrs
])
)
return '{}({})'.format(self.__class__.__name__, _ordered_dict_repr())
def __str__(self):
ruleId = self.ruleId
if self.subId is not None:
ruleId += '[{}]'.format(self.subId)
s = 'Line {}, column {}, Rule ID: {}'.format(
self.fromy + 1, self.fromx + 1, ruleId)
if self.msg:
s += '\nMessage: {}'.format(self.msg)
if self.replacements:
s += '\nSuggestion: {}'.format('; '.join(self.replacements))
s += '\n{}\n{}'.format(
self.context, ' ' * self.contextoffset + '^' * self.errorlength
)
return s
def __eq__(self, other):
return list(self) == list(other)
def __lt__(self, other):
return list(self) < list(other)
def __iter__(self):
return iter(getattr(self, attr) for attr in self._SLOTS)
def __setattr__(self, name, value):
try:
value = self._SLOTS[name](value)
except KeyError:
value = auto_type(value)
super().__setattr__(name, value)
def __getattr__(self, name):
if name not in self._SLOTS:
raise AttributeError('{!r} object has no attribute {!r}'
.format(self.__class__.__name__, name))
class LanguageTool:
"""Main class used for checking text against different rules."""
_HOST = socket.gethostbyname('localhost')
_MIN_PORT = 8081
_MAX_PORT = 8083
_TIMEOUT = 60
_port = _MIN_PORT
_server = None
_instances = WeakValueDictionary()
_PORT_RE = re.compile(r"(?:https?://.*:|port\s+)(\d+)", re.I)
def __init__(self, language=None, motherTongue=None):
if not self._server_is_alive():
self._start_server_on_free_port()
if language is None:
try:
language = get_locale_language()
except ValueError:
language = FAILSAFE_LANGUAGE
self._language = LanguageTag(language)
self.motherTongue = motherTongue
# spell check rules are disabled by default
self.disabled = {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE', 'YOUR_NN', 'TRY_AND', 'PRP_PAST_PART',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
self.enabled = set()
self._instances[id(self)] = self
def __del__(self):
if not self._instances and self._server_is_alive():
#self._terminate_server()
pass
def __repr__(self):
return '{}(language={!r}, motherTongue={!r})'.format(
self.__class__.__name__, self.language, self.motherTongue)
@property
def language(self):
"""The language to be used."""
return self._language
@language.setter
def language(self, language):
self._language = LanguageTag(language)
self.disabled.clear()
self.enabled.clear()
@property
def motherTongue(self):
"""The user's mother tongue or None.
The mother tongue may also be used as a source language for
checking bilingual texts.
"""
return self._motherTongue
@motherTongue.setter
def motherTongue(self, motherTongue):
self._motherTongue = (None if motherTongue is None
else LanguageTag(motherTongue))
@property
def _spell_checking_rules(self):
return {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
def check(self, text: str, srctext=None) -> [Match]:
"""Match text against enabled rules."""
root = self._get_root(self._url, self._encode(text, srctext))
return [Match(e.attrib) for e in root if e.tag == 'error']
def _check_api(self, text: str, srctext=None) -> bytes:
"""Match text against enabled rules (result in XML format)."""
root = self._get_root(self._url, self._encode(text, srctext))
return (b'<?xml version="1.0" encoding="UTF-8"?>\n' +
ElementTree.tostring(root) + b"\n")
def _encode(self, text, srctext=None):
params = {'language': self.language, 'text': text.encode('utf-8')}
if srctext is not None:
params['srctext'] = srctext.encode('utf-8')
if self.motherTongue is not None:
params['motherTongue'] = self.motherTongue
if self.disabled:
params['disabled'] = ','.join(self.disabled)
if self.enabled:
params['enabled'] = ','.join(self.enabled)
return urllib.parse.urlencode(params).encode()
def correct(self, text: str, srctext=None) -> str:
"""Automatically apply suggestions to the text."""
return correct(text, self.check(text, srctext))
def enable_spellchecking(self):
"""Enable spell-checking rules."""
self.disabled.difference_update(self._spell_checking_rules)
def disable_spellchecking(self):
"""Disable spell-checking rules."""
self.disabled.update(self._spell_checking_rules)
@classmethod
def _get_languages(cls) -> set:
"""Get supported languages (by querying the server)."""
if not cls._server_is_alive():
cls._start_server_on_free_port()
url = urllib.parse.urljoin(cls._url, 'Languages')
languages = set()
for e in cls._get_root(url, num_tries=1):
languages.add(e.get('abbr'))
languages.add(e.get('abbrWithVariant'))
return languages
@classmethod
def _get_attrib(cls):
"""Get matches element attributes."""
if not cls._server_is_alive():
cls._start_server_on_free_port()
params = {'language': FAILSAFE_LANGUAGE, 'text': ''}
data = urllib.parse.urlencode(params).encode()
root = cls._get_root(cls._url, data, num_tries=1)
return root.attrib
@classmethod
def _get_root(cls, url, data=None, num_tries=2):
for n in range(num_tries):
try:
with urlopen(url, data, cls._TIMEOUT) as f:
return ElementTree.parse(f).getroot()
except (IOError, http.client.HTTPException) as e:
cls._terminate_server()
cls._start_server()
if n + 1 >= num_tries:
raise Error('{}: {}'.format(cls._url, e))
@classmethod
def _start_server_on_free_port(cls):
while True:
cls._url = 'http://{}:{}'.format(cls._HOST, cls._port)
try:
cls._start_server()
break
except ServerError:
if cls._MIN_PORT <= cls._port < cls._MAX_PORT:
cls._port += 1
else:
raise
@classmethod
def _start_server(cls):
err = None
try:
server_cmd = get_server_cmd(cls._port)
except PathError as e:
# Can't find path to LanguageTool.
err = e
else:
# Need to PIPE all handles: http://bugs.python.org/issue3905
cls._server = subprocess.Popen(
server_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
startupinfo=startupinfo
)
# Python 2.7 compatibility
# for line in cls._server.stdout:
match = None
while True:
line = cls._server.stdout.readline()
if not line:
break
match = cls._PORT_RE.search(line)
if match:
port = int(match.group(1))
if port != cls._port:
raise Error('requested port {}, but got {}'
.format(cls._port, port))
break
if not match:
cls._terminate_server()
err_msg = cls._server.communicate()[1].strip()
cls._server = None
match = cls._PORT_RE.search(err_msg)
if not match:
raise Error(err_msg)
port = int(match.group(1))
if port != cls._port:
raise Error(err_msg)
if not cls._server:
# Couldn't start the server, so maybe there is already one running.
params = {'language': FAILSAFE_LANGUAGE, 'text': ''}
data = urllib.parse.urlencode(params).encode()
try:
with urlopen(cls._url, data, cls._TIMEOUT) as f:
tree = ElementTree.parse(f)
except (IOError, http.client.HTTPException) as e:
if err:
raise err
raise ServerError('{}: {}'.format(cls._url, e))
root = tree.getroot()
# LanguageTool 1.9+
if root.get('software') != 'LanguageTool':
raise ServerError('unexpected software from {}: {!r}'
.format(cls._url, root.get('software')))
@classmethod
def _server_is_alive(cls):
return cls._server and cls._server.poll() is None
@classmethod
def _terminate_server(cls):
try:
cls._server.terminate()
except OSError:
pass
@total_ordering
class LanguageTag(str):
"""Language tag supported by LanguageTool."""
_LANGUAGE_RE = re.compile(r"^([a-z]{2,3})(?:[_-]([a-z]{2}))?$", re.I)
def __new__(cls, tag):
# Can't use super() here because of 3to2.
return str.__new__(cls, cls._normalize(tag))
def __eq__(self, other):
try:
other = self._normalize(other)
except ValueError:
pass
return str(self) == other
def __lt__(self, other):
try:
other = self._normalize(other)
except ValueError:
pass
return str(self) < other
@classmethod
def _normalize(cls, tag):
if not tag:
raise ValueError('empty language tag')
languages = {language.lower().replace('-', '_'): language
for language in get_languages()}
try:
return languages[tag.lower().replace('-', '_')]
except KeyError:
try:
return languages[cls._LANGUAGE_RE.match(tag).group(1).lower()]
except (KeyError, AttributeError):
raise ValueError('unsupported language: {!r}'.format(tag))
def correct(text: str, matches: [Match]) -> str:
"""Automatically apply suggestions to the text."""
ltext = list(text)
matches = [match for match in matches if match.replacements]
errors = [ltext[match.offset:match.offset + match.errorlength]
for match in matches]
correct_offset = 0
for n, match in enumerate(matches):
frompos, topos = (correct_offset + match.offset,
correct_offset + match.offset + match.errorlength)
if ltext[frompos:topos] != errors[n]:
continue
repl = match.replacements[0]
ltext[frompos:topos] = list(repl)
correct_offset += len(repl) - len(errors[n])
return ''.join(ltext)
def _get_attrib():
try:
attrib = cache['attrib']
except KeyError:
attrib = LanguageTool._get_attrib()
cache['attrib'] = attrib
return attrib
def get_version():
"""Get LanguageTool version."""
version = _get_attrib().get('version')
if not version:
match = re.search(r"LanguageTool-?.*?(\S+)$", get_directory())
if match:
version = match.group(1)
return version
def get_build_date():
"""Get LanguageTool build date."""
return _get_attrib().get('buildDate')
def get_languages() -> set:
"""Get supported languages."""
try:
languages = cache['languages']
except KeyError:
languages = LanguageTool._get_languages()
cache['languages'] = languages
return languages
def get_directory():
"""Get LanguageTool directory."""
try:
language_check_dir = cache['language_check_dir']
except KeyError:
def version_key(string):
return [int(e) if e.isdigit() else e
for e in re.split(r"(\d+)", string)]
def get_lt_dir(base_dir):
paths = [
path for path in
glob.glob(os.path.join(base_dir, 'LanguageTool*'))
if os.path.isdir(path)
]
return max(paths, key=version_key) if paths else None
base_dir = os.path.dirname(sys.argv[0])
language_check_dir = get_lt_dir(base_dir)
if not language_check_dir:
try:
base_dir = os.path.dirname(os.path.abspath(__file__))
except NameError:
pass
else:
language_check_dir = get_lt_dir(base_dir)
if not language_check_dir:
raise PathError("can't find LanguageTool directory in {!r}"
.format(base_dir))
cache['language_check_dir'] = language_check_dir
return language_check_dir
def set_directory(path=None):
"""Set LanguageTool directory."""
old_path = get_directory()
terminate_server()
cache.clear()
if path:
cache['language_check_dir'] = path
try:
get_jar_info()
except Error:
cache['language_check_dir'] = old_path
raise
def get_server_cmd(port=None):
try:
cmd = cache['server_cmd']
except KeyError:
java_path, jar_path = get_jar_info()
cmd = [java_path, '-cp', jar_path,
'org.languagetool.server.HTTPServer']
cache['server_cmd'] = cmd
return cmd if port is None else cmd + ['-p', str(port)]
def get_jar_info():
try:
java_path, jar_path = cache['jar_info']
except KeyError:
java_path = which('java')
if not java_path:
raise JavaError("can't find Java")
dir_name = get_directory()
jar_path = None
for jar_name in JAR_NAMES:
for jar_path in glob.glob(os.path.join(dir_name, jar_name)):
if os.path.isfile(jar_path):
break
else:
jar_path = None
if jar_path:
break
else:
raise PathError("can't find languagetool-standalone in {!r}"
.format(dir_name))
cache['jar_info'] = java_path, jar_path
return java_path, jar_path
def get_locale_language():
"""Get the language code for the current locale setting."""
return locale.getlocale()[0] or locale.getdefaultlocale()[0]
@atexit.register
def terminate_server():
"""Terminate the server."""
if LanguageTool._server_is_alive():
LanguageTool._terminate_server()
if USE_URLOPEN_RESOURCE_WARNING_FIX:
class ClosingHTTPResponse(http.client.HTTPResponse):
def __init__(self, sock, *args, **kwargs):
super().__init__(sock, *args, **kwargs)
self._socket_close = sock.close
def close(self):
super().close()
self._socket_close()
class ClosingHTTPConnection(http.client.HTTPConnection):
response_class = ClosingHTTPResponse
class ClosingHTTPHandler(urllib.request.HTTPHandler):
def http_open(self, req):
return self.do_open(ClosingHTTPConnection, req)
urlopen = urllib.request.build_opener(ClosingHTTPHandler).open
else:
try:
urllib.response.addinfourl.__exit__
except AttributeError:
from contextlib import closing
def urlopen(*args, **kwargs):
return closing(urllib.request.urlopen(*args, **kwargs))
else:
urlopen = urllib.request.urlopen
|
viraja1/grammar-check
|
grammar_check/__init__.py
|
Python
|
lgpl-3.0
| 19,319
| 0.000207
|
def extractDanielyangNinja(item):
'''
Parser for 'danielyang.ninja'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDanielyangNinja.py
|
Python
|
bsd-3-clause
| 362
| 0.035912
|
#!/usr/bin/env python
# coding=utf-8
"""30. Digit fifth powers
https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written as the sum of
fourth powers of their digits:
> 1634 = 14 \+ 64 \+ 34 \+ 44
> 8208 = 84 \+ 24 \+ 04 \+ 84
> 9474 = 94 \+ 44 \+ 74 \+ 44
As 1 = 14 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers
of their digits.
"""
|
openqt/algorithms
|
projecteuler/pe030-digit-fifth-powers.py
|
Python
|
gpl-3.0
| 507
| 0.021696
|
import unittest
"""
Given an unordered array of integers, find the length of longest increasing subsequence.
Input: 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
Output: 6 (0, 2, 6, 9, 11, 15)
"""
"""
A great explanation of the approach appears here:
http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/
"""
def find_ceil_index(list_of_numbers, ele):
"""
Returns the smallest element in list_of_numbers greater than or equal to ele.
"""
low = 0
high = len(list_of_numbers)-1
ans = -1
while low <= high:
mid = (low + high) / 2
if list_of_numbers[mid] >= ele:
ans = mid
high = mid - 1
else:
low = mid + 1
return ans
def find_longest_increasing_subsequence_length(list_of_numbers):
LCS = [list_of_numbers[0]]
for i in range(1, len(list_of_numbers)):
cur_ele = list_of_numbers[i]
k = find_ceil_index(LCS, cur_ele)
if k == -1:
LCS.append(cur_ele)
else:
LCS[k] = cur_ele
return len(LCS)
class TestLIS(unittest.TestCase):
def test_longest_increasing_subsequence(self):
list_of_numbers = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [2, 5, 3, 1, 2, 3, 4, 5, 6]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [5, 4, 3, 2, 1]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 1)
|
prathamtandon/g4gproblems
|
Arrays/longest_increasing_subsequence_nlogn.py
|
Python
|
mit
| 1,625
| 0.004308
|
import math
import inspect
import numpy as np
import numpy.linalg as linalg
import scipy as sp
import scipy.optimize
import scipy.io
from itertools import product
import trep
import _trep
from _trep import _System
from frame import Frame
from finput import Input
from config import Config
from force import Force
from constraint import Constraint
from potential import Potential
from util import dynamics_indexing_decorator
class System(_System):
"""
The System class represents a complete mechanical system
comprising coordinate frames, configuration variables, potential
energies, constraints, and forces.
"""
def __init__(self):
"""
Create a new mechanical system.
"""
_System.__init__(self)
# _System variables need to be initialized (cleaner here than in C w/ ref counting)
self._frames = tuple()
self._configs = tuple()
self._dyn_configs = tuple()
self._kin_configs = tuple()
self._potentials = tuple()
self._forces = tuple()
self._inputs = tuple()
self._constraints = tuple()
self._masses = tuple()
self._hold_structure_changes = 0
self._structure_changed_funcs = []
# Hold off the initial structure update until we have a world
# frame.
self._hold_structure_changes = 1
self._world_frame = Frame(self, trep.WORLD, None, name="World")
self._hold_structure_changes = 0
self._structure_changed()
def __repr__(self):
return '<System %d configs, %d frames, %d potentials, %d constraints, %d forces, %d inputs>' % (
len(self.configs),
len(self.frames),
len(self.potentials),
len(self.constraints),
len(self.forces),
len(self.inputs))
@property
def nQ(self):
"""Number of configuration variables in the system."""
return len(self.configs)
@property
def nQd(self):
"""Number of dynamic configuration variables in the system."""
return len(self.dyn_configs)
@property
def nQk(self):
"""Number of kinematic configuration variables in the system."""
return len(self.kin_configs)
@property
def nu(self):
"""Number of inputs in the system."""
return len(self.inputs)
@property
def nc(self):
"""Number of constraints in the system."""
return len(self.constraints)
@property
def t(self):
"""Current time of the system."""
return self._time
@t.setter
def t(self, value):
self._clear_cache()
self._time = value
def get_frame(self, identifier):
"""
get_frame(identifier) -> Frame,None
Return the first frame with the matching identifier. The
identifier can be the frame name, index, or the frame itself.
Raise an exception if no match is found.
"""
return self._get_object(identifier, Frame, self.frames)
def get_config(self, identifier):
"""
get_config(identifier) -> Config,None
Return the first configuration variable with the matching
identifier. The identifier can be the config name, index, or
the config itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Config, self.configs)
def get_potential(self, identifier):
"""
get_potential(identifier) -> Potential,None
Return the first potential with the matching identifier. The
identifier can be the constraint name, index, or the
constraint itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Potential, self.potentials)
def get_constraint(self, identifier):
"""
get_constraint(identifier) -> Constraint,None
Return the first constraint with the matching identifier. The
identifier can be the constraint name, index, or the
constraint itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Constraint, self.constraints)
def get_force(self, identifier):
"""
get_force(identifier) -> Force,None
Return the first force with the matching identifier. The
identifier can be the force name, index, or the
force itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Force, self.forces)
def get_input(self, identifier):
"""
get_input(identifier) -> Input,None
Return the first input with the matching identifier. The
identifier can be the input name, index, or the
input itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Input, self.inputs)
def satisfy_constraints(self, tolerance=1e-10, verbose=False,
keep_kinematic=False, constant_q_list=None):
"""
Modify the current configuration to satisfy the system
constraints.
The configuration velocity (ie, config.dq) is simply set to
zero. This should be fixed in the future.
Passing True to keep_kinematic will not allow method to modify
kinematic configuration variables.
Passing a list (or tuple) of configurations to constant_q_list
will keep all elements in list constant. The method uses
trep.System.get_config so the list may contain configuration
objects, indices in Q, or names. Passing anything for
constant_list_q will overwrite value for keep_kinematic.
"""
self.dq = 0
if keep_kinematic:
names = [q.name for q in self.dyn_configs]
q0 = self.qd
else:
names = [q.name for q in self.configs]
q0 = self.q
if constant_q_list:
connames = [self.get_config(q).name for q in constant_q_list]
names = []
for q in self.configs:
if q.name not in connames:
names.append(q.name)
q0 = np.array([self.q[self.get_config(name).index] for name in names])
def func(q):
v = (q - q0)
return np.dot(v,v)
def fprime(q):
return 2*(q-q0)
def f_eqcons(q):
self.q = dict(zip(names,q))
return np.array([c.h() for c in self.constraints])
def fprime_eqcons(q):
self.q = dict(zip(names,q))
return np.array([[c.h_dq(self.get_config(q)) for q in names] for c in self.constraints])
(q_opt, fx, its, imode, smode) = sp.optimize.fmin_slsqp(func, q0, f_eqcons=f_eqcons,
fprime=fprime, fprime_eqcons=fprime_eqcons,
acc=tolerance, iter=100*self.nQ,
iprint=0, full_output=True)
if imode != 0:
raise StandardError("Minimization failed: %s" % smode)
self.q = dict(zip(names,q_opt))
return self.q
def minimize_potential_energy(self, tolerance=1e-10, verbose=False,
keep_kinematic=False, constant_q_list=None):
"""
Find a nearby configuration where the potential energy is
minimized. Useful for finding nearby equilibrium points.
If minimum is found, all constraints will be found as well
The configuration velocity (ie, config.dq) is set to
zero which ensures the kinetic energy is zero.
Passing True to keep_kinematic will not allow method to modify
kinematic configuration variables.
Passing a list (or tuple) of configurations to constant_q_list
will keep all elements in list constant. The method uses
trep.System.get_config so the list may contain configuration
objects, indices in Q, or names. Passing anything for
constant_list_q will overwrite value for keep_kinematic.
"""
self.dq = 0
if keep_kinematic:
names = [q.name for q in self.dyn_configs]
q0 = self.qd
else:
names = [q.name for q in self.configs]
q0 = self.q
if constant_q_list:
connames = [self.get_config(q).name for q in constant_q_list]
names = []
for q in self.configs:
if q.name not in connames:
names.append(q.name)
q0 = np.array([self.q[self.get_config(name).index] for name in names])
def func(q):
self.q = dict(zip(names,q))
return -self.L()
def fprime(q):
return [-self.L_dq(self.get_config(name)) for name in names]
def f_eqcons(q):
self.q = dict(zip(names,q))
return np.array([c.h() for c in self.constraints])
def fprime_eqcons(q):
self.q = dict(zip(names,q))
return np.array([[c.h_dq(self.get_config(q)) for q in names] for c in self.constraints])
(q_opt, fx, its, imode, smode) = sp.optimize.fmin_slsqp(func, q0, f_eqcons=f_eqcons,
fprime=fprime, fprime_eqcons=fprime_eqcons,
acc=tolerance, iter=100*self.nQ,
iprint=0, full_output=True)
if imode != 0:
raise StandardError("Minimization failed: %s" % smode)
self.q = dict(zip(names,q_opt))
return self.q
def set_state(self, q=None, dq=None, u=None, ddqk=None, t=None):
"""
Set the current state of the system, not including the "output" ddqd.
"""
if q is not None: self.q = q
if dq is not None: self.dq = dq
if u is not None: self.u = u
if ddqk is not None: self.ddqk = ddqk
if t is not None: self.t = t
def import_frames(self, children):
"""
Adds children to this system's world frame using a special
frame definition. See Frame.import_frames() for details.
"""
self.world_frame.import_frames(children)
def export_frames(self, system_name='system', frames_name='frames', tab_size=4):
"""
Create python source code to define this system's frames.
"""
txt = ''
txt += '#'*80 + '\n'
txt += '# Frame tree definition generated by System.%s()\n\n' % inspect.stack()[0][3]
txt += 'from trep import %s\n' % ', '.join(sorted(trep.frame.frame_def_mapping.values()))
txt += '%s = [\n' % frames_name
txt += ',\n'.join([child.export_frames(1, tab_size) for child in self.world_frame.children]) + '\n'
txt += ' '*tab_size + ']\n'
txt += '%s.import_frames(%s)\n' % (system_name, frames_name)
txt += '#'*80 + '\n'
return txt
@property
def q(self):
"""Current configuration of the system."""
return np.array([q.q for q in self.configs])
@q.setter
def q(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.configs:
q.q = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).q = v
else:
for q,v in zip(self.configs, value):
q.q = v
@property
def dq(self):
""" Current configuration velocity of the system """
return np.array([q.dq for q in self.configs])
@dq.setter
def dq(self, value):
# Writing c.dq will clear system cache
if isinstance(value, (int, float)):
for q in self.configs:
q.dq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).dq = v
else:
for q,v in zip(self.configs, value):
q.dq = v
@property
def ddq(self):
""" Current configuration acceleration of the system """
return np.array([q.ddq for q in self.configs])
@ddq.setter
def ddq(self, value):
# Writing c.ddq will clear system cache
if isinstance(value, (int, float)):
for q in self.configs:
q.ddq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).ddq = v
else:
for q,v in zip(self.configs, value):
q.ddq = v
@property
def qd(self):
"""Dynamic part of the system's current configuration."""
return np.array([q.q for q in self.dyn_configs])
@qd.setter
def qd(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.dyn_configs:
q.q = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).q = v
else:
for q,v in zip(self.dyn_configs, value):
q.q = v
@property
def dqd(self):
"""Dynamic part of the system's current configuration velocity."""
return np.array([q.dq for q in self.dyn_configs])
@dqd.setter
def dqd(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.dyn_configs:
q.dq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).dq = v
else:
for q,v in zip(self.dyn_configs, value):
q.dq = v
@property
def ddqd(self):
"""Dynamic part of the system's current configuration acceleration."""
return np.array([q.ddq for q in self.dyn_configs])
@ddqd.setter
def ddqd(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.dyn_configs:
q.ddq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).ddq = v
else:
for q,v in zip(self.dyn_configs, value):
q.ddq = v
@property
def qk(self):
"""Kinematic part of the system's current configuration."""
return np.array([q.q for q in self.kin_configs])
@qk.setter
def qk(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.kin_configs:
q.q = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).q = v
else:
for q,v in zip(self.kin_configs, value):
q.q = v
@property
def dqk(self):
"""Kinematic part of the system's current configuration velocity."""
return np.array([q.dq for q in self.kin_configs])
@dqk.setter
def dqk(self, value):
# Writing c.q will clear system cache
if isinstance(value, (int, float)):
for q in self.kin_configs:
q.dq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).dq = v
else:
for q,v in zip(self.kin_configs, value):
q.dq = v
@property
def ddqk(self):
"""Kinematic part of the system's current configuration acceleration."""
return np.array([q.ddq for q in self.kin_configs])
@ddqk.setter
def ddqk(self, value):
# Writing c.ddq will clear system cache
if isinstance(value, (int, float)):
for q in self.kin_configs:
q.ddq = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_config(name).ddq = v
else:
for q,v in zip(self.kin_configs, value):
q.ddq = v
@property
def u(self):
"""Current input vector of the system."""
return np.array([u.u for u in self.inputs])
@u.setter
def u(self, value):
# Writing u.u will clear system cache
if isinstance(value, (int, float)):
for u in self.inputs:
u.u = value
elif isinstance(value, dict):
for name, v in value.iteritems():
self.get_input(name).u = v
else:
for u,v in zip(self.inputs, value):
u.u = v
@property
def world_frame(self):
"The root spatial frame of the system."
return self._world_frame
@property
def frames(self):
"Tuple of all the frames in the system."
return self._frames
@property
def configs(self):
"""
Tuple of all the configuration variables in the system.
This is always equal to self.dyn_configs + self.kin_configs
"""
return self._configs
@property
def dyn_configs(self):
"""
Tuple of all the dynamic configuration variables in the system.
"""
return self._dyn_configs
@property
def kin_configs(self):
"""
Tuple of all the kinematic configuration variables in the
system.
"""
return self._kin_configs
@property
def potentials(self):
"Tuple of all the potentials in the system."
return self._potentials
@property
def forces(self):
"Tuple of all the forces in the system."
return self._forces
@property
def inputs(self):
"Tuple of all the input variables in the system."
return self._inputs
@property
def constraints(self):
"Tuple of all the constraints in the system."
return self._constraints
@property
def masses(self):
"Tuple of all the frames with non-zero inertias."
return self._masses
def _clear_cache(self):
"""Clear the system cache."""
self._cache = 0
self._state_counter += 1
def _get_object(self, identifier, objtype, array):
"""
_get_object(identifier, objtype, array) -> object,None
Return the first item in array with a matching identifier.
The type of 'identifier' defines how the object is identified.
type(identifier) -> how identifier is used
None -> return None
int -> return array[identifier]
name -> return item in array such that item.name == identifier
objtype -> return identifier
Raise an exception if 'identifier' is a different type or
there is an error/no match.
"""
if identifier == None:
return None
elif isinstance(identifier, objtype):
return identifier
elif isinstance(identifier, int):
return array[identifier]
elif isinstance(identifier, str):
for item in array:
if item.name == identifier:
return item
raise KeyError("%s with name '%s' not found" % (objtype, identifier))
else:
raise TypeError()
def _add_kin_config(self, config):
"""
_add_kin_config(config) -> Append config to the kin_configs
tuple.
"""
assert isinstance(config, trep.Config)
self._kin_configs += (config,)
def _add_dyn_config(self, config):
"""
_add_dyn_config(config) -> Append config to the dyn_configs
tuple.
"""
assert isinstance(config, trep.Config)
self._dyn_configs += (config,)
def _add_constraint(self, constraint):
"""
_add_constraint(constraint) -> Append constraint to the
constraint tuple.
"""
assert isinstance(constraint, trep.Constraint)
self._constraints += (constraint,)
def _add_potential(self, potential):
"""
_add_potential(potential) -> Append potential to the
potentials tuple.
"""
assert isinstance(potential, trep.Potential)
self._potentials += (potential,)
def _add_input(self, finput):
"""
_add_input(finput) -> Append input to the inputs tuple.
"""
assert isinstance(finput, trep.Input)
self._inputs += (finput,)
def _add_force(self, force):
"""
_add_force(force) -> Append force to the forces tuple.
"""
assert isinstance(force, trep.Force)
self._forces += (force,)
def add_structure_changed_func(self, function):
"""
Register a function to call whenever the system structure
changes. This includes adding and removing frames,
configuration variables, constraints, potentials, and forces.
"""
self._structure_changed_funcs.append(function)
def hold_structure_changes(self):
"""
Prevent the system from calling System._update_structure()
(mostly). Useful when building a large system to avoid
needlessly allocating and deallocating memory.
"""
self._hold_structure_changes += 1
def resume_structure_changes(self):
"""
Stop preventing the system from calling
System._update_structure(). The structure will only be
updated once every hold has been removed, so calling this does
not guarantee that the structure will be immediately upated.
"""
if self._hold_structure_changes == 0:
raise StandardError("System.resume_structure_changes() called" \
" when _hold_structure_changes is 0")
self._hold_structure_changes -= 1
if self._hold_structure_changes == 0:
self._structure_changed()
def _structure_changed(self):
"""
Updates variables so that System is internally consistent.
There is a lot of duplicate information throughout a System,
for either convenience or performance reasons. For duplicate
information, one place is considered the 'master'. These are
places that other functions manipulate. The other duplicates
are created from the 'master'.
The variables controlled by this function include:
system.frames - This tuple is built by descending the frames
tree and collecting each frame.
system.configs - This tuple is built by concatenating
system.dyn_configs and system.kin_configs.
config.config_gen - config_gen is set by descending down the
tree while keeping track of how many configuration variables
have been seen.
config.index - 'index' is set using the config's index in
system.configs
config.k_index - 'k_index' is set using the config's index in
system.kin_configs or to -1 for dynamic configuration
variables.
system.masses - This tuple is set by running through
system.frames and collecting any frame that has non-zero
inertia properties.
frame.cache_index - Built for each frame by descending up the
tree and collecting every configuration variable that is
encountered. This is set in Frame._structure_changed()
config.masses - Built for each config by looking at each frame
in self.masses and collecting those that depend on the config.
Finally, we call all the registered structure update functions
for any external objects that need to update their own
structures.
"""
# When we build big systems, we waste a lot of time building
# the cache over and over again. Instead, we can turn off the
# updating for a bit, and then do it once when we're
# done.
if self._hold_structure_changes != 0:
return
# Cache value dependencies:
# system.frames :depends on: none
# system.configs :depends on: none
# config.config_gen :depends on: none
# config.index :depend on: system.configs
# system.masses :depends on: none
# frame.cache_index :depends on: config.config_gen
# config.masses :depends on: frame.cache_index, system.masses
self._frames = tuple(self.world_frame.flatten_tree())
self._configs = self.dyn_configs + self.kin_configs
# Initialize config_gens to be N+1. Configs that do not drive
# frame transformations will retain this value
for config in self.configs:
config._config_gen = len(self._configs)
def update_config_gen(frame, index):
if frame.config != None:
frame.config._config_gen = index;
index += 1
for child in frame.children:
update_config_gen(child, index)
update_config_gen(self.world_frame, 0)
for (i, config) in enumerate(self.configs):
config._index = i
config._k_index = -1
for (i, config) in enumerate(self.kin_configs):
config._k_index = i
for (i, constraint) in enumerate(self.constraints):
constraint._index = i
for (i, finput) in enumerate(self.inputs):
finput._index = i
# Find all frames with non-zero masses
self._masses = tuple([f for f in self.frames
if f.mass != 0.0
or f.Ixx != 0.0
or f.Iyy != 0.0
or f.Izz != 0.0])
self.world_frame._structure_changed()
for config in self.configs:
config._masses = tuple([f for f in self._masses
if config in f._cache_index])
# Create numpy arrays used for calculation and storage
self._f = np.zeros( (self.nQd,), np.double, 'C')
self._lambda = np.zeros( (self.nc,), np.double, 'C')
self._D = np.zeros( (self.nQd,), np.double, 'C')
self._Ad = np.zeros((self.nc, self.nQd), np.double, 'C')
self._AdT = np.zeros((self.nQd, self.nc), np.double, 'C')
self._M_lu = np.zeros((self.nQd, self.nQd), np.double, 'C')
self._M_lu_index = np.zeros((self.nQd,), np.int, 'C')
self._A_proj_lu = np.zeros((self.nc, self.nc), np.double, 'C')
self._A_proj_lu_index = np.zeros((self.nc, ), np.int, 'C')
self._Ak = np.zeros( (self.nc, self.nQk), np.double, 'C')
self._Adt = np.zeros( (self.nc, self.nQ), np.double, 'C')
self._Ad_dq = np.zeros( (self.nQ, self.nc, self.nQd), np.double, 'C')
self._Ak_dq = np.zeros( (self.nQ, self.nc, self.nQk), np.double, 'C')
self._Adt_dq = np.zeros( (self.nQ, self.nc, self.nQ), np.double, 'C')
self._D_dq = np.zeros( (self.nQ, self.nQd), np.double, 'C')
self._D_ddq = np.zeros( (self.nQ, self.nQd), np.double, 'C')
self._D_du = np.zeros( (self.nu, self.nQd), np.double, 'C')
self._D_dk = np.zeros( (self.nQk, self.nQd), np.double, 'C')
self._f_dq = np.zeros( (self.nQ, self.nQd), np.double, 'C')
self._f_ddq = np.zeros( (self.nQ, self.nQd), np.double, 'C')
self._f_du = np.zeros( (self.nu, self.nQd), np.double, 'C')
self._f_dk = np.zeros( (self.nQk, self.nQd), np.double, 'C')
self._lambda_dq = np.zeros( (self.nQ, self.nc), np.double, 'C')
self._lambda_ddq = np.zeros( (self.nQ, self.nc), np.double, 'C')
self._lambda_du = np.zeros( (self.nu, self.nc), np.double, 'C')
self._lambda_dk = np.zeros( (self.nQk, self.nc), np.double, 'C')
self._Ad_dqdq = np.zeros( (self.nQ, self.nQ, self.nc, self.nQd), np.double, 'C')
self._Ak_dqdq = np.zeros( (self.nQ, self.nQ, self.nc, self.nQk), np.double, 'C')
self._Adt_dqdq = np.zeros( (self.nQ, self.nQ, self.nc, self.nQ), np.double, 'C')
self._D_dqdq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._D_ddqdq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._D_ddqddq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._D_dkdq = np.zeros( (self.nQk, self.nQ, self.nQd), np.double, 'C')
self._D_dudq = np.zeros( (self.nu, self.nQ, self.nQd), np.double, 'C')
self._D_duddq = np.zeros( (self.nu, self.nQ, self.nQd), np.double, 'C')
self._D_dudu = np.zeros( (self.nu, self.nu, self.nQd), np.double, 'C')
self._f_dqdq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._f_ddqdq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._f_ddqddq = np.zeros( (self.nQ, self.nQ, self.nQd), np.double, 'C')
self._f_dkdq = np.zeros( (self.nQk, self.nQ, self.nQd), np.double, 'C')
self._f_dudq = np.zeros( (self.nu, self.nQ, self.nQd), np.double, 'C')
self._f_duddq = np.zeros( (self.nu, self.nQ, self.nQd), np.double, 'C')
self._f_dudu = np.zeros( (self.nu, self.nu, self.nQd), np.double, 'C')
self._lambda_dqdq = np.zeros( (self.nQ, self.nQ, self.nc), np.double, 'C')
self._lambda_ddqdq = np.zeros( (self.nQ, self.nQ, self.nc), np.double, 'C')
self._lambda_ddqddq = np.zeros( (self.nQ, self.nQ, self.nc), np.double, 'C')
self._lambda_dkdq = np.zeros( (self.nQk, self.nQ, self.nc), np.double, 'C')
self._lambda_dudq = np.zeros( (self.nu, self.nQ, self.nc), np.double, 'C')
self._lambda_duddq = np.zeros( (self.nu, self.nQ, self.nc), np.double, 'C')
self._lambda_dudu = np.zeros( (self.nu, self.nu, self.nc), np.double, 'C')
self._temp_nd = np.zeros( (self.nQd,), np.double, 'C')
self._temp_ndnc = np.zeros( (self.nQd, self.nc), np.double, 'C')
self._M_dq = np.zeros( (self.nQ, self.nQ, self.nQ), np.double, 'C')
self._M_dqdq = np.zeros( (self.nQ, self.nQ, self.nQ, self.nQ), np.double, 'C')
for func in self._structure_changed_funcs:
func()
def total_energy(self):
"""Calculate the total energy in the current state."""
return self._total_energy()
def L(self):
"""Calculate the Lagrangian at the current state."""
return self._L()
def L_dq(self, q1):
"""
Calculate the derivative of the Lagrangian with respect to the
value of q1.
"""
assert isinstance(q1, _trep._Config)
return self._L_dq(q1)
def L_dqdq(self, q1, q2):
"""
Calculate the second derivative of the Lagrangian with respect
to the value of q1 and the value of q2.
"""
assert isinstance(q1, _trep._Config)
assert isinstance(q2, _trep._Config)
return self._L_dqdq(q1, q2)
def L_dqdqdq(self, q1, q2, q3):
"""
Calculate the third derivative of the Lagrangian with respect
to the value of q1, the value of q2, and the value of q3.
"""
assert isinstance(q1, _trep._Config)
assert isinstance(q2, _trep._Config)
assert isinstance(q3, _trep._Config)
return self._L_dqdqdq(q1, q2, q3)
def L_ddq(self, dq1):
"""
Calculate the derivative of the Lagrangian with respect
to the velocity of dq1.
"""
assert isinstance(dq1, _trep._Config)
return self._L_ddq(dq1)
def L_ddqdq(self, dq1, q2):
"""
Calculate the second derivative of the Lagrangian with respect
to the velocity of dq1 and the value of q2.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
return self._L_ddqdq(dq1, q2)
def L_ddqdqdq(self, dq1, q2, q3):
"""
Calculate the third derivative of the Lagrangian with respect
to the velocity of dq1, the value of q2, and the value of q3.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
assert isinstance(q3, _trep._Config)
return self._L_ddqdqdq(dq1, q2, q3)
def L_ddqdqdqdq(self, dq1, q2, q3, q4):
"""
Calculate the fourth derivative of the Lagrangian with respect
to the velocity of dq1, the value of q2, the value of q3, and
the value of q4.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
assert isinstance(q3, _trep._Config)
assert isinstance(q4, _trep._Config)
return self._L_ddqdqdqdq(dq1, q2, q3, q4)
def L_ddqddq(self, dq1, dq2):
"""
Calculate the second derivative of the Lagrangian with respect
to the velocity of dq1 and the velocity of dq2.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
return self._L_ddqddq(dq1, dq2)
def L_ddqddqdq(self, dq1, dq2, q3):
"""
Calculate the third derivative of the Lagrangian with respect
to the velocity of dq1, the velocity of dq2, and the value of
q3.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
assert isinstance( q3, _trep._Config)
return self._L_ddqddqdq(dq1, dq2, q3)
def L_ddqddqdqdq(self, dq1, dq2, q3, q4):
"""
Calculate the fourth derivative of the Lagrangian with respect
to the velocity of dq1, the velocity of dq2, the value of q3,
and the value of q4.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
assert isinstance( q3, _trep._Config)
assert isinstance( q4, _trep._Config)
return self._L_ddqddqdqdq(dq1, dq2, q3, q4)
@dynamics_indexing_decorator('d')
def f(self, q=None):
"""
Calculate the dynamics at the current state.
See documentation for details.
"""
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS)
return self._f[q].copy()
@dynamics_indexing_decorator('dq')
def f_dq(self, q=None, q1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_dq[q1, q].T.copy()
@dynamics_indexing_decorator('dq')
def f_ddq(self, q=None, dq1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_ddq[dq1, q].T.copy()
@dynamics_indexing_decorator('dk')
def f_dddk(self, q=None, k1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_dk[k1, q].T.copy()
@dynamics_indexing_decorator('du')
def f_du(self, q=None, u1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_du[u1, q].T.copy()
@dynamics_indexing_decorator('dqq')
def f_dqdq(self, q=None, q1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dqdq[q1, q2, q].copy()
@dynamics_indexing_decorator('dqq')
def f_ddqdq(self, q=None, dq1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_ddqdq[dq1, q2, q].copy()
@dynamics_indexing_decorator('dqq')
def f_ddqddq(self, q=None, dq1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_ddqddq[dq1, dq2, q].copy()
@dynamics_indexing_decorator('dkq')
def f_dddkdq(self, q=None, k1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dkdq[k1, q2, q].copy()
@dynamics_indexing_decorator('duq')
def f_dudq(self, q=None, u1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dudq[u1, q2, q].copy()
@dynamics_indexing_decorator('duq')
def f_duddq(self, q=None, u1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_duddq[u1, dq2, q].copy()
@dynamics_indexing_decorator('duu')
def f_dudu(self, q=None, u1=None, u2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dudu[u1, u2, q].copy()
@dynamics_indexing_decorator('c')
def lambda_(self, constraint=None):
"""
Calculate the constraint forces at the current state.
"""
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS)
return self._lambda[constraint].copy()
@dynamics_indexing_decorator('cq')
def lambda_dq(self, constraint=None, q1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_dq[q1, constraint].T.copy()
@dynamics_indexing_decorator('cq')
def lambda_ddq(self, constraint=None, dq1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_ddq[dq1, constraint].T.copy()
@dynamics_indexing_decorator('ck')
def lambda_dddk(self, constraint=None, k1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_dk[k1, constraint].T.copy()
@dynamics_indexing_decorator('cu')
def lambda_du(self, constraint=None, u1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_du[u1, constraint].T.copy()
@dynamics_indexing_decorator('cqq')
def lambda_dqdq(self, constraint=None, q1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dqdq[q1, q2, constraint].copy()
@dynamics_indexing_decorator('cqq')
def lambda_ddqdq(self, constraint=None, dq1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_ddqdq[dq1, q2, constraint].copy()
@dynamics_indexing_decorator('cqq')
def lambda_ddqddq(self, constraint=None, dq1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_ddqddq[dq1, dq2, constraint].copy()
@dynamics_indexing_decorator('ckq')
def lambda_dddkdq(self, constraint=None, k1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dkdq[k1, q2, constraint].copy()
@dynamics_indexing_decorator('cuq')
def lambda_dudq(self, constraint=None, u1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dudq[u1, q2, constraint].copy()
@dynamics_indexing_decorator('cuq')
def lambda_duddq(self, constraint=None, u1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_duddq[u1, dq2, constraint].copy()
@dynamics_indexing_decorator('cuu')
def lambda_dudu(self, constraint=None, u1=None, u2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dudu[u1, u2, constraint].copy()
def test_derivative_dq(self, func, func_dq, delta=1e-6, tolerance=1e-7,
verbose=False, test_name='<unnamed>'):
"""
Test the derivative of a function with respect to a
configuration variable value against its numerical
approximation.
func -> Callable taking no arguments and returning float or np.array
func_dq -> Callable taking one configuration variable argument
and returning a float or np.array.
delta -> perturbation to the current configuration to
calculate the numeric approximation.
Returns stuff
"""
q0 = self.q
tests_total = 0
tests_failed = 0
for q in self.configs:
self.q = q0
dy_exact = func_dq(q)
delta_q = q0.copy()
delta_q[q.index] -= delta
self.q = delta_q
y0 = func()
delta_q = q0.copy()
delta_q[q.index] += delta
self.q = delta_q
y1 = func()
dy_approx = (y1 - y0)/(2*delta)
error = np.linalg.norm(dy_exact - dy_approx)
tests_total += 1
if math.isnan(error) or error > tolerance:
tests_failed += 1
if verbose:
print "Test '%s' failed for dq derivative of '%s'." % (test_name, q)
print " Error: %f > %f" % (error, tolerance)
print " Approx dy: %s" % dy_approx
print " Exact dy: %s" % dy_exact
if verbose:
if tests_failed == 0:
print "%d tests passing." % tests_total
else:
print "%d/%d tests FAILED. <#######" % (tests_failed, tests_total)
# Reset configuration
self.q = q0
return not tests_failed
def test_derivative_ddq(self, func, func_ddq, delta=1e-6, tolerance=1e-7,
verbose=False, test_name='<unnamed>'):
"""
Test the derivative of a function with respect to a
configuration variable's time derivative and its numerical
approximation.
func -> Callable taking no arguments and returning float or np.array
func_ddq -> Callable taking one configuration variable argument
and returning a float or np.array.
delta -> perturbation to the current configuration to
calculate the numeric approximation.
tolerance -> acceptable difference between the approximation
and exact value. (|exact - approx| <= tolerance)
verbose -> Boolean indicating if a message should be printed for failures.
name -> String identifier to print out when reporting messages
when verbose is true.
Returns False if any tests fail and True otherwise.
"""
dq0 = self.dq
tests_total = 0
tests_failed = 0
for q in self.configs:
self.dq = dq0
dy_exact = func_ddq(q)
delta_dq = dq0.copy()
delta_dq[q.index] -= delta
self.dq = delta_dq
y0 = func()
delta_dq = dq0.copy()
delta_dq[q.index] += delta
self.dq = delta_dq
y1 = func()
dy_approx = (y1 - y0)/(2*delta)
error = np.linalg.norm(dy_exact - dy_approx)
tests_total += 1
if math.isnan(error) or error > tolerance:
tests_failed += 1
if verbose:
print "Test '%s' failed for dq derivative of '%s'." % (test_name, q)
print " Error: %f > %f" % (error, tolerance)
print " Approx dy: %f" % dy_approx
print " Exact dy: %f" % dy_exact
if verbose:
if tests_failed == 0:
print "%d tests passing." % tests_total
else:
print "%d/%d tests FAILED. <#######" % (tests_failed, tests_total)
# Reset velocity
self.dq = dq0
return not tests_failed
# Supressing a scipy.io.savemat warning.
import warnings
warnings.simplefilter("ignore", FutureWarning)
def save_trajectory(filename, system, t, Q=None, p=None, v=None, u=None, rho=None):
# Save trajectory to a matlab file. t is a 1D numpy array.
# q,p,u,and rho are expected to be numpy arrays of the appropriate
# dimensions or None
t = np.array(t)
data = { 'time' : np.array(t) }
if Q is not None: data['Q'] = np.array(Q)
if p is not None: data['p'] = np.array(p)
if v is not None: data['v'] = np.array(v)
if u is not None: data['u'] = np.array(u)
if rho is not None: data['rho'] = np.array(rho)
# Build indices - Convert to cells so they are well behaved in matlab
data['Q_index'] = np.array([q.name for q in system.configs], dtype=np.object)
data['p_index'] = np.array([q.name for q in system.dyn_configs], dtype=np.object)
data['v_index'] = np.array([q.name for q in system.kin_configs], dtype=np.object)
data['u_index'] = np.array([u.name for u in system.inputs], dtype=np.object)
data['rho_index'] = np.array([q.name for q in system.kin_configs], dtype=np.object)
sp.io.savemat(filename, data)
def load_trajectory(filename, system=None):
data = sp.io.loadmat(filename)
# Load time as a 1D array
t = data['time'].squeeze()
Q_in = data.get('Q', None)
p_in = data.get('p', None)
v_in = data.get('v', None)
u_in = data.get('u', None)
rho_in = data.get('rho', None)
Q_index = [str(s[0]).strip() for s in data['Q_index'].ravel()]
p_index = [str(s[0]).strip() for s in data['p_index'].ravel()]
v_index = [str(s[0]).strip() for s in data['v_index'].ravel()]
u_index = [str(s[0]).strip() for s in data['u_index'].ravel()]
rho_index = [str(s[0]).strip() for s in data['rho_index'].ravel()]
# If no system was given, just return the data as it was along
# with the indices.
if system is None:
return (t,
(Q_index, Q_in),
(p_index, p_in),
(v_index, v_in),
(u_index, u_in),
(rho_index, rho_in))
else:
# If a system was specified, reorganize the data to match the
# system's layout.
if Q_in is not None:
Q = np.zeros((len(t), system.nQ))
for config in system.configs:
if config.name in Q_index:
Q[:,config.index] = Q_in[:, Q_index.index(config.name)]
else:
Q = None
if p_in is not None:
p = np.zeros((len(t), system.nQd))
for config in system.dyn_configs:
if config.name in p_index:
p[:,config.index] = p_in[:, p_index.index(config.name)]
else:
p = None
if v_in is not None:
v = np.zeros((len(t), system.nQk))
for config in system.kin_configs:
if config.name in v_index:
v[:,config.k_index] = v_in[:, v_index.index(config.name)]
else:
v = None
if u_in is not None:
u = np.zeros((len(t)-1, system.nu))
for finput in system.inputs:
if finput.name in u_index:
u[:,finput.index] = u_in[:, u_index.index(finput.name)]
else:
u = None
if rho_in is not None:
rho = np.zeros((len(t)-1, system.nQk))
for config in system.kin_configs:
if config.name in rho_index:
rho[:,config.k_index] = rho_in[:, rho_index.index(config.name)]
else:
rho = None
return t,Q,p,v,u,rho
|
hilario/trep
|
src/system.py
|
Python
|
gpl-3.0
| 46,931
| 0.00358
|
__all__ = (
'TokenList',
)
import collections
from .errors import TokenTypeError
class TokenList(collections.Sized, collections.Iterable, collections.Container):
def __init__(self, init=None, *, token_type=None):
if token_type is None:
token_type = object
self._token_type = token_type
self._tokens = collections.deque()
if init:
if not hasattr(init, '__iter__'):
raise TypeError("invalid value {!r}: not an iterable".format(init))
for token in init:
self.add(token)
@property
def token_type(self):
return self._token_type
def add(self, token, *, count=1):
if not isinstance(token, self._token_type):
raise TokenTypeError("invalid token {!r}: type is not {}".format(token, self._token_type.__name__))
for i in range(count):
self._tokens.append(token)
def pop(self):
return self._tokens.popleft()
def remove(self, token):
for c, t in enumerate(self._tokens):
if t is token:
break
else:
return
del self._tokens[c]
#self._tokens.remove(token)
def copy(self):
return self.__class__(init=self, token_type=self.token_type)
def __iter__(self):
yield from self._tokens
def __len__(self):
return len(self._tokens)
def clear(self):
self._tokens.clear()
def extend(self, values):
if self._token_type is object:
self._tokens.extend(values)
else:
for value in values:
self.add(value)
def __contains__(self, value):
return value in self._tokens
def __repr__(self):
args = []
if self:
args.append(repr(list(self._tokens)))
if self._token_type is not object:
args.append("token_type={}".format(self._token_type.__name__))
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
if isinstance(other, TokenList):
if self._token_type != other.token_type:
return False
return self._tokens == other._tokens
else:
if len(self._tokens) != len(other):
return False
for a, b in zip(self._tokens, other):
if a != b:
return False
return True
|
simone-campagna/petra
|
petra/token_list.py
|
Python
|
apache-2.0
| 2,481
| 0.003628
|
from sqlalchemy.testing import eq_, is_
from sqlalchemy.orm import backref, configure_mappers
from sqlalchemy import testing
from sqlalchemy import desc, select, func, exc
from sqlalchemy.orm import mapper, relationship, create_session, Query, \
attributes, exc as orm_exc, Session
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import AssertsCompiledSQL, \
assert_raises_message, assert_raises
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, lazy="dynamic",
**addresses_args)
})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
**items_args
)
})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='dynamic')
})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by":
addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(id=7,
addresses=[
Address(id=1, email_address='jack@bean.com')
])
]
)
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(items_args={
"backref": backref("orders", lazy="dynamic")
})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude('mysql', 'between',
((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={
"query_class": MyAppenderQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
assert hasattr(col, 'add')
eq_(type(col).__name__, 'MyAppenderQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
assert not hasattr(q, 'add')
eq_(type(q).__name__, 'MyQuery')
class UOWTest(_DynamicFixture, _fixtures.FixtureTest,
testing.AssertsExecutionResults):
run_inserts = None
def test_persistence(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture()
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='foo')
sess.add_all([u1, a1])
sess.flush()
eq_(
testing.db.scalar(
select([func.count(1)]).where(addresses.c.user_id != None)
),
0
)
u1 = sess.query(User).get(u1.id)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
u1.addresses.remove(a1)
sess.flush()
eq_(
testing.db.scalar(
select([func.count(1)]).where(addresses.c.user_id != None)
),
0
)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
a2 = Address(email_address='bar')
u1.addresses.remove(a1)
u1.addresses.append(a2)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a2.id, u1.id, 'bar')]
)
def test_merge(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.email_address})
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
u1.addresses.append(a2)
u1.addresses.append(a3)
sess.add_all([u1, a1])
sess.flush()
u1 = User(id=u1.id, name='jack')
u1.addresses.append(a1)
u1.addresses.append(a3)
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, 'addresses'), (
[a1],
[a3],
[a2]
))
sess.flush()
eq_(
list(u1.addresses),
[a1, a3]
)
def test_hasattr(self):
User, Address = self._user_address_fixture()
u1 = User(name='jack')
assert 'addresses' not in u1.__dict__
u1.addresses = [Address(email_address='test')]
assert 'addresses' in u1.__dict__
def test_collection_set(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.email_address})
sess = create_session(autoflush=True, autocommit=False)
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
a4 = Address(email_address='a4')
sess.add(u1)
u1.addresses = [a1, a3]
eq_(list(u1.addresses), [a1, a3])
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_append(self):
# test that a load of User.addresses is not emitted
# when flushing an append
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.append(Address(email_address='a2'))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}]
)
)
def test_noload_remove(self):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address='a2')
u1.addresses.append(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
lambda ctx: [{'param_1': a2_id}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
)
)
def test_rollback(self):
User, Address = self._user_address_fixture()
sess = create_session(
expire_on_commit=False, autocommit=False, autoflush=True)
u1 = User(name='jack')
u1.addresses.append(Address(email_address='lala@hoho.com'))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.append(Address(email_address='foo@bar.com'))
eq_(
u1.addresses.order_by(Address.id).all(),
[
Address(email_address='lala@hoho.com'),
Address(email_address='foo@bar.com')
]
)
sess.rollback()
eq_(
u1.addresses.all(),
[Address(email_address='lala@hoho.com')]
)
def _test_delete_cascade(self, expected):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update" if expected \
else "all, delete"
})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
sess.commit()
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 0)
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 6)
sess.delete(u)
sess.commit()
if expected:
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 6)
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 0)
else:
eq_(testing.db.scalar(addresses.count()), 0)
def test_delete_nocascade(self):
self._test_delete_cascade(True)
def test_delete_cascade(self):
self._test_delete_cascade(False)
def test_remove_orphans(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan"
})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
for a in u.addresses.filter(
Address.email_address.in_(['c', 'e', 'f'])):
u.addresses.remove(a)
eq_(
set(ad for ad, in sess.query(Address.email_address)),
set(['a', 'b', 'd'])
)
def _backref_test(self, autoflush, saveuser):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
sess = create_session(autoflush=autoflush, autocommit=False)
u = User(name='buffy')
a = Address(email_address='foo@bar.com')
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(list(u.addresses), [a])
a.user = None
if not autoflush:
eq_(list(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(list(u.addresses), [])
def test_backref_autoflush_saveuser(self):
self._backref_test(True, True)
def test_backref_autoflush_savead(self):
self._backref_test(True, False)
def test_backref_saveuser(self):
self._backref_test(False, True)
def test_backref_savead(self):
self._backref_test(False, False)
def test_backref_events(self):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_no_deref(self):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
session = create_session()
user = User()
user.name = 'joe'
user.fullname = 'Joe User'
user.password = 'Joe\'s secret'
address = Address()
address.email_address = 'joe@joesdomain.example'
address.user = user
session.add(user)
session.flush()
session.expunge_all()
def query1():
session = create_session(testing.db)
user = session.query(User).first()
return user.addresses.all()
def query2():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
def query3():
session = create_session(testing.db)
user = session.query(User).first()
return session.query(User).first().addresses.all()
eq_(query1(), [Address(email_address='joe@joesdomain.example')])
eq_(query2(), [Address(email_address='joe@joesdomain.example')])
eq_(query3(), [Address(email_address='joe@joesdomain.example')])
class HistoryTest(_DynamicFixture, _fixtures.FixtureTest):
run_inserts = None
def _transient_fixture(self, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
def _persistent_fixture(self, autoflush=True, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User(name='u1')
a1 = Address(email_address='a1')
s = Session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
def _persistent_m2m_fixture(self, autoflush=True, items_args={}):
Order, Item = self._order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = Session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
eq_(
attributes.get_history(obj, attrname),
compare
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(obj, attrname,
attributes.LOAD_AGAINST_COMMITTED),
compare_passive
)
def test_append_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_append_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_remove_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [])
)
def test_backref_pop_transient(self):
u1, a1 = self._transient_fixture(addresses_args={"backref": "user"})
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], []),
)
a1.user = None
# removed from added
self._assert_history(u1,
([], [], []),
)
def test_remove_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [a1])
)
def test_backref_pop_persistent_autoflush_o2m_active_hist(self):
u1, a1, s = self._persistent_fixture(
addresses_args={"backref":
backref("user", active_history=True)})
u1.addresses.append(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1,
([], [], [a1]),
)
def test_backref_pop_persistent_autoflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"})
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_backref_pop_persistent_noflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False)
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_unchanged_persistent(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture()
a2, a3 = Address(email_address='a2'), Address(email_address='a3')
u1.addresses.append(a1)
u1.addresses.append(a2)
s.flush()
u1.addresses.append(a3)
u1.addresses.remove(a2)
self._assert_history(u1,
([a3], [a1], [a2]),
compare_passive=([a3], [], [a2])
)
def test_replace_transient(self):
Address = self.classes.Address
u1, a1 = self._transient_fixture()
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_noflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=False)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_autoflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=True)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1])
)
def test_persistent_but_readded_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_persistent_but_readded_autoflush(self):
u1, a1, s = self._persistent_fixture(autoflush=True)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], []),
compare_passive=([], [], [a1])
)
|
mitsuhiko/sqlalchemy
|
test/orm/test_dynamic.py
|
Python
|
mit
| 30,088
| 0.002526
|
from django.shortcuts import render
from .models import Document, Corpus
from django.http import JsonResponse
from django.conf import settings
import json
import os
import re
import nltk
from nltk.corpus import *
from nltk.collocations import *
import string
import logging
logger = logging.getLogger('nltksite.nltkapp')
# Create your views here.
# this is horrible
def clearencoding(str):
try:
json.dumps(str)
if len(str) == 1 and ord(str) > 128:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (repr(str), ord(str)))
str = ""
except UnicodeDecodeError:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (str, repr(str)))
str = str.decode('utf8', 'ignore')
return str
def index(request):
logger.debug("index requested.")
corpora = Corpus.objects.all()
context = {'corpora': corpora}
return render(request, 'nltkapp/index.html', context)
def sayhello(request):
logger.debug("say hello.")
return JsonResponse({'message': 'Hello World'})
def getdocuments(request):
corpus_id = request.GET.get('corpus_id', None)
c = Corpus.objects.get(pk=corpus_id)
logger.debug("Getting list of documents for corpus %s (id=%s)" % (c.name,corpus_id))
documents = c.document_set.all()
documents_list = []
for d in documents:
documents_list.append({'id': d.id, 'name': d.file.name})
return JsonResponse({'documents': documents_list})
def get_sentences(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
word = request.GET.get('word', None)
logger.debug("corpus_id=%s, document_ids=%s, word=%s" % (corpus_id, str(document_ids), word))
finalResult = {}
corpus, internal_filter = open_corpus(corpus_id, document_ids)
# \b is a word boundary match in regex, so we get government but not governmentally
pattern = "\\b" + word + "\\b"
# Chosen corpus is an nltk internal corpus (gutenberg, bible, inaugural addresses, etc...).
# We treat those slightly differently than user-mode corpora
fileids = []
if internal_filter:
fileids = [internal_filter]
else:
# Get array of fileids used by the NLTK corpus object from our own document ids
fileids = corpus.fileids()
logger.debug("fileids=%s", fileids)
for fileid in fileids:
if fileid in corpus.fileids():
sents = corpus.sents(fileid)
results = []
for sentence in sents:
combined = clearencoding(' '.join(sentence))
if re.search(pattern, combined):
results.append(combined)
if len(results) > 0:
finalResult[fileid] = results
# wdmatrix is a word-document matrix. finalResult['facebook.txt'] = [sentences]
return JsonResponse({'word': word, 'wdmatrix':finalResult})
def wordfreq(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
ngram = request.GET.get('ngram', None)
scoring_method = request.GET.get('scoring_method', None)
logger.debug("corpus_id=%s, document_ids=%s, ngram=%s, scoring_method=%s" % (corpus_id, str(document_ids), ngram, scoring_method))
corpus, internal_filter = open_corpus(corpus_id, document_ids)
if not internal_filter:
words = corpus.words()
else:
words = corpus.words(internal_filter)
logger.debug("PlaintextCorpusReader on files: %s" % corpus.fileids())
if ngram == "1":
return onegram_collocation(words)
elif ngram == "2":
first_word_list, fdist = bigram_collocation(words, scoring_method)
elif ngram == "3":
first_word_list, fdist = trigram_collocation(words, scoring_method)
else:
logger.debug("Invalid ngram value specified. " + ngram)
word_list = []
for b in first_word_list:
for sample in fdist:
if b == sample:
worddict = {'word': clearencoding(' '.join(sample)), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': ''}
break
word_list.append(worddict)
return JsonResponse({'list':word_list})
def onegram_collocation(words):
fdist = nltk.FreqDist(words)
unusual_list = unusual_words(words)
word_list = []
for sample in fdist:
contains_punctuation = False
all_punctuation = True
for c in sample:
if c in string.punctuation:
contains_punctuation = True
else:
all_punctuation = False
# If word contains punctuation OR occurs less than 3 times OR is a stop word, SKIP IT
if (contains_punctuation or fdist[sample] < 3 or sample in stopwords.words('english')):
continue
if (clearencoding(sample.lower()) in unusual_list):
unusual = True
else:
unusual = False
if (len(clearencoding(sample)) > 0):
word_list.append({'word': clearencoding(sample), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': '', 'unusual': unusual})
return JsonResponse({'list':word_list})
def bigram_collocation(words, score):
ignored_words = stopwords.words('english')
bigrams = nltk.bigrams(words)
fdist = nltk.FreqDist(bigrams)
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(words)
# Only select bigrams that appear at least 3 times
finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
# return the 10 bigrams with the highest PMI
method = bigram_measures.pmi
if "student_t" in score:
method = bigram_measures.student_t
elif "chi_sq" in score:
method = bigram_measures.chi_sq
elif "pmi" in score:
method = bigram_measures.pmi
elif "likelihood_ratio" in score:
method = bigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = bigram_measures.poisson_stirling
elif "jaccard" in score:
method = bigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
def trigram_collocation(words, score):
ignored_words = stopwords.words('english')
trigrams = nltk.trigrams(words)
fdist = nltk.FreqDist(trigrams)
trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_words(words)
#finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
method = trigram_measures.pmi
if "student_t" in score:
method = trigram_measures.student_t
elif "chi_sq" in score:
method = trigram_measures.chi_sq
elif "pmi" in score:
method = trigram_measures.pmi
elif "likelihood_ratio" in score:
method = trigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = trigram_measures.poisson_stirling
elif "jaccard" in score:
method = trigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
# Given an array of words, connect to wordnet and return the part of speech, definition, etc...
def wordnet_data(request):
words = json.loads(request.GET.get('words', None))
logger.debug("wordnet_data words=%s" % str(words))
results = []
for w in words:
syns = wordnet.synsets(w)
if len(syns) > 0:
root_word = syns[0].lemmas()[0].name()
pos = syns[0].pos()
definition = syns[0].definition()
synonyms = ''
for syn in syns:
if (syn.lemmas()[0].name() != root_word):
synonyms += syn.lemmas()[0].name() + ', '
examples = syns[0].examples()
results.append({'word': w,
'root': root_word,
'pos': pos,
'definition': definition,
'synonyms': synonyms[:-2],
'examples': examples
})
else:
results.append({'word': w,
'root': 'undefined',
'pos': 'undefined',
'definition': 'undefined',
'synonyms': 'undefined',
'examples': 'undefined'
})
return JsonResponse({'results': results})
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
def open_corpus(corpus_id, document_ids):
c = Corpus.objects.get(pk=corpus_id)
if c.internal_nltk_name:
return eval(c.internal_nltk_name), c.internal_nltk_filter
fileids = []
for d in document_ids:
d = int(d)
# we want entire corpus
if (d == -1):
fileids = '.*\.txt'
break
document = Document.objects.get(pk=d)
fileids.append(os.path.basename(document.file.name))
# Kareem March 5, 2015: Added encoding=None. This prevents NLTK from assuming any specific encoding like utf8
# Without encoding=None, we got UnicodeDecodeErrors. This avoids it, but we have to handle decoding ourselves now. We can try encoding="latin-1"
return PlaintextCorpusReader(c.get_path(), fileids, encoding="latin-1"), ""
|
wtamu-cisresearch/nltksite
|
nltkapp/views.py
|
Python
|
mit
| 8,449
| 0.034797
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class ReferenceContribution:
def __init__(self,src,dest,me,cont):
self.theSource = src
self.theDestination = dest
self.theMeansEnd = me
self.theContribution = cont
def source(self): return self.theSource
def destination(self): return self.theDestination
def meansEnd(self): return self.theMeansEnd
def contribution(self): return self.theContribution
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/ReferenceContribution.py
|
Python
|
apache-2.0
| 1,176
| 0.007653
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from cinder.brick.initiator import connector
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import coraid
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
return '{0}K'.format(to_coraid_kb(gb))
fake_esm_ipaddress = "192.168.0.1"
fake_esm_username = "darmok"
fake_esm_group = "tanagra"
fake_esm_group_id = 1
fake_esm_password = "12345678"
fake_coraid_repository_key = 'repository_key'
fake_volume_name = "volume-12345678-1234-1234-1234-1234567890ab"
fake_clone_name = "volume-ffffffff-1234-1234-1234-1234567890ab"
fake_volume_size = 10
fake_repository_name = "A-B:C:D"
fake_pool_name = "FakePool"
fake_aoetarget = 4081
fake_shelf = 16
fake_lun = 241
fake_str_aoetarget = str(fake_aoetarget)
fake_lun_addr = {"shelf": fake_shelf, "lun": fake_lun}
fake_volume_type = {'id': 1}
fake_volume = {"id": fake_volume_name,
"name": fake_volume_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_big_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size + 1,
"volume_type": fake_volume_type}
fake_volume_info = {"pool": fake_pool_name,
"repo": fake_repository_name,
"vsxidx": fake_aoetarget,
"index": fake_lun,
"shelf": fake_shelf}
fake_lun_info = {"shelf": fake_shelf, "lun": fake_lun}
fake_snapshot_name = "snapshot-12345678-8888-8888-1234-1234567890ab"
fake_snapshot_id = "12345678-8888-8888-1234-1234567890ab"
fake_volume_id = "12345678-1234-1234-1234-1234567890ab"
fake_snapshot = {"id": fake_snapshot_id,
"name": fake_snapshot_name,
"volume_id": fake_volume_id,
"volume_name": fake_volume_name,
"volume_size": int(fake_volume_size) - 1,
"volume": fake_volume}
fake_configure_data = [{"addr": "cms", "data": "FAKE"}]
fake_esm_fetch = [[
{"command": "super_fake_command"},
{"reply": [
{"lv":
{"containingPool": fake_pool_name,
"lunIndex": fake_aoetarget,
"name": fake_volume_name,
"lvStatus":
{"exportedLun":
{"lun": fake_lun,
"shelf": fake_shelf}}
},
"repoName": fake_repository_name}]}]]
fake_esm_fetch_no_volume = [[
{"command": "super_fake_command"},
{"reply": []}]]
fake_esm_success = {"category": "provider",
"tracking": False,
"configState": "completedSuccessfully",
"heldPending": False,
"metaCROp": "noAction",
"message": None}
fake_group_fullpath = "admin group:%s" % (fake_esm_group)
fake_group_id = 4
fake_login_reply = {"values": [
{"fullPath": fake_group_fullpath,
"groupId": fake_group_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
fake_group_fail_fullpath = "fail group:%s" % (fake_esm_group)
fake_group_fail_id = 5
fake_login_reply_group_fail = {"values": [
{"fullPath": fake_group_fail_fullpath,
"groupId": fake_group_fail_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
def compare(a, b):
if type(a) != type(b):
return False
if type(a) == list or type(a) == tuple:
if len(a) != len(b):
return False
return all(map(lambda t: compare(t[0], t[1]), zip(a, b)))
elif type(a) == dict:
if len(a) != len(b):
return False
for k, v in a.items():
if not compare(v, b[k]):
return False
return True
else:
return a == b
def pack_data(request):
request['data'] = jsonutils.dumps(request['data'])
class FakeRpcBadRequest(Exception):
pass
class FakeRpcIsNotCalled(Exception):
def __init__(self, handle, url_params, data):
self.handle = handle
self.url_params = url_params
self.data = data
def __str__(self):
return 'Fake Rpc handle for {0}/{1}/{2} not found'.format(
self.handle, self.url_params, self.data)
class FakeRpcHandle(object):
def __init__(self, handle, url_params, data, result):
self.handle = handle
self.url_params = url_params
self.data = data
self.result = result
self._is_called = False
def set_called(self):
self._is_called = True
def __call__(self, handle, url_params, data,
allow_empty_response=False):
if handle != self.handle:
raise FakeRpcBadRequest(
'Unexpected handle name {0}. Expected {1}.'
.format(handle, self.handle))
if not compare(url_params, self.url_params):
raise FakeRpcBadRequest('Unexpected url params: {0} / {1}'
.format(url_params, self.url_params))
if not compare(data, self.data):
raise FakeRpcBadRequest('Unexpected data: {0}/{1}'
.format(data, self.data))
if callable(self.result):
return self.result()
else:
return self.result
class FakeRpc(object):
def __init__(self):
self._handles = []
def handle(self, handle, url_params, data, result):
self._handles.append(FakeRpcHandle(handle, url_params, data, result))
def __call__(self, handle_name, url_params, data,
allow_empty_response=False):
for handle in self._handles:
if (handle.handle == handle_name and
compare(handle.url_params, url_params) and
compare(handle.data, handle.data)):
handle.set_called()
return handle(handle_name, url_params, data,
allow_empty_response)
raise FakeRpcIsNotCalled(handle_name, url_params, data)
class CoraidDriverTestCase(test.TestCase):
def setUp(self):
super(CoraidDriverTestCase, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.coraid_default_repository = 'default_repository'
configuration.coraid_esm_address = fake_esm_ipaddress
configuration.coraid_user = fake_esm_username
configuration.coraid_group = fake_esm_group
configuration.coraid_password = fake_esm_password
configuration.volume_name_template = "volume-%s"
configuration.snapshot_name_template = "snapshot-%s"
configuration.coraid_repository_key = fake_coraid_repository_key
configuration.use_multipath_for_image_xfer = False
configuration.enforce_multipath_for_image_xfer = False
configuration.num_volume_device_scan_tries = 3
configuration.volume_dd_blocksize = '1M'
self.fake_rpc = FakeRpc()
self.stubs.Set(coraid.CoraidRESTClient, 'rpc', self.fake_rpc)
self.driver = coraid.CoraidDriver(configuration=configuration)
self.driver.do_setup({})
def mock_volume_types(self, repositories=None):
if not repositories:
repositories = [fake_repository_name]
self.mox.StubOutWithMock(volume_types, 'get_volume_type_extra_specs')
for repository in repositories:
(volume_types
.get_volume_type_extra_specs(fake_volume_type['id'],
fake_coraid_repository_key)
.AndReturn('<in> {0}'.format(repository)))
class CoraidDriverLoginSuccessTestCase(CoraidDriverTestCase):
def setUp(self):
super(CoraidDriverLoginSuccessTestCase, self).setUp()
login_results = {'state': 'adminSucceed',
'values': [
{'fullPath':
'admin group:{0}'.format(fake_esm_group),
'groupId': fake_esm_group_id
}]}
self.fake_rpc.handle('admin', {'op': 'login',
'username': fake_esm_username,
'password': fake_esm_password},
'Login', login_results)
self.fake_rpc.handle('admin', {'op': 'setRbacGroup',
'groupId': fake_esm_group_id},
'Group', {'state': 'adminSucceed'})
class CoraidDriverApplianceTestCase(CoraidDriverLoginSuccessTestCase):
def test_resize_volume(self):
new_volume_size = int(fake_volume_size) + 1
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
reply = {'configState': 'completedSuccessfully'}
resize_volume_request = {'addr': 'cms',
'data': {
'lvName': fake_volume_name,
'newLvName': fake_volume_name + '-resize',
'size':
coraid_volume_size(new_volume_size),
'repoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'resize'}
pack_data(resize_volume_request)
self.fake_rpc.handle('configure', {}, [resize_volume_request],
reply)
real_reply = self.driver.appliance.resize_volume(fake_volume_name,
new_volume_size)
self.assertEqual(reply['configState'], real_reply['configState'])
class CoraidDriverIntegrationalTestCase(CoraidDriverLoginSuccessTestCase):
def setUp(self):
super(CoraidDriverIntegrationalTestCase, self).setUp()
self.appliance = self.driver.appliance
# NOTE(nsobolevsky) prevent re-creation esm appliance
self.stubs.Set(coraid.CoraidDriver, 'appliance', self.appliance)
def test_create_volume(self):
self.mock_volume_types()
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.mox.ReplayAll()
self.driver.create_volume(fake_volume)
self.mox.VerifyAll()
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
def test_create_volume_volume_type_no_repo_key(self, volume_specs_mock):
"""Test volume creation without repo specified in volume type."""
volume_specs_mock.return_value = None
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': 'default_repository',
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.driver.create_volume(fake_volume)
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
def test_create_volume_volume_type_no_repo_data(self, volume_specs_mock):
"""Test volume creation w/o repo in volume type nor config."""
volume_specs_mock.return_value = None
self.driver.configuration.coraid_default_repository = None
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': 'default_repository',
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.assertRaises(exception.CoraidException,
self.driver.create_volume, fake_volume)
def test_delete_volume(self):
delete_volume_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun/verified',
'args': 'delete'}
pack_data(delete_volume_request)
self.fake_rpc.handle('configure', {}, [delete_volume_request],
{'configState': 'completedSuccessfully'})
self.fake_rpc.handle('fetch', {'orchStrRepo': '',
'shelf': 'cms',
'lv': fake_volume_name},
None,
fake_esm_fetch)
self.mox.ReplayAll()
self.driver.delete_volume(fake_volume)
self.mox.VerifyAll()
def test_ping_ok(self):
self.fake_rpc.handle('fetch', {}, None, '')
self.mox.ReplayAll()
self.driver.appliance.ping()
self.mox.VerifyAll()
def test_ping_failed(self):
def rpc(handle, url_params, data,
allow_empty_response=True):
raise test.TestingException("Some exception")
self.stubs.Set(self.driver.appliance, 'rpc', rpc)
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMNotAvailable,
self.driver.appliance.ping)
self.mox.VerifyAll()
def test_delete_not_existing_lun(self):
delete_volume_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun/verified',
'args': 'delete'}
pack_data(delete_volume_request)
self.fake_rpc.handle('configure', {}, [delete_volume_request],
{'configState': 'completedSuccessfully'})
self.fake_rpc.handle('fetch', {'orchStrRepo': '',
'shelf': 'cms',
'lv': fake_volume_name},
None,
fake_esm_fetch_no_volume)
self.mox.ReplayAll()
self.assertRaises(
exception.VolumeNotFound,
self.driver.appliance.delete_lun,
fake_volume['name'])
self.mox.VerifyAll()
def test_delete_not_existing_volumeappliance_is_ok(self):
def delete_lun(volume_name):
raise exception.VolumeNotFound(volume_id=fake_volume['name'])
self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun)
def ping():
pass
self.stubs.Set(self.driver.appliance, 'ping', ping)
self.mox.ReplayAll()
self.driver.delete_volume(fake_volume)
self.mox.VerifyAll()
def test_delete_not_existing_volume_sleepingappliance(self):
def delete_lun(volume_name):
raise exception.VolumeNotFound(volume_id=fake_volume['name'])
self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun)
def ping():
raise exception.CoraidESMNotAvailable(reason="Any reason")
self.stubs.Set(self.driver.appliance, 'ping', ping)
self.driver.appliance.ping = ping
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMNotAvailable,
self.driver.delete_volume,
fake_volume)
self.mox.VerifyAll()
def test_create_snapshot(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
create_snapshot_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name,
'newLvName': fake_snapshot_name},
'op': 'orchStrLunMods',
'args': 'addClSnap'}
pack_data(create_snapshot_request)
self.fake_rpc.handle('configure', {}, [create_snapshot_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_snapshot(fake_snapshot)
self.mox.VerifyAll()
def test_delete_snapshot(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_snapshot_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
delete_snapshot_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_snapshot_name,
'newLvName': 'noop'},
'op': 'orchStrLunMods',
'args': 'delClSnap'}
pack_data(delete_snapshot_request)
self.fake_rpc.handle('configure', {}, [delete_snapshot_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.delete_snapshot(fake_snapshot)
self.mox.VerifyAll()
def test_create_volume_from_snapshot(self):
self.mock_volume_types()
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_volume_name,
fake_volume['size'])\
.AndReturn(None)
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_snapshot_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
create_clone_request = {'addr': 'cms',
'data': {
'lvName': fake_snapshot_name,
'repoName': fake_repository_name,
'newLvName': fake_volume_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)
self.mox.VerifyAll()
def test_initialize_connection(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
self.mox.ReplayAll()
connection = self.driver.initialize_connection(fake_volume, {})
self.mox.VerifyAll()
self.assertEqual(connection['driver_volume_type'], 'aoe')
self.assertEqual(connection['data']['target_shelf'], fake_shelf)
self.assertEqual(connection['data']['target_lun'], fake_lun)
def test_get_repository_capabilities(self):
reply = [[{}, {'reply': [
{'name': 'repo1',
'profile':
{'fullName': 'Bronze-Bronze:Profile1'}},
{'name': 'repo2',
'profile':
{'fullName': 'Bronze-Bronze:Profile2'}}]}]]
self.fake_rpc.handle('fetch', {'orchStrRepo': ''}, None,
reply)
self.mox.ReplayAll()
capabilities = self.driver.get_volume_stats(refresh=True)
self.mox.VerifyAll()
self.assertEqual(
capabilities[fake_coraid_repository_key],
'Bronze-Bronze:Profile1:repo1 Bronze-Bronze:Profile2:repo2')
def test_create_cloned_volume(self):
self.mock_volume_types([fake_repository_name])
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun)
create_clone_request = {'addr': 'cms',
'data': {
'shelfLun': shelf_lun,
'lvName': fake_volume_name,
'repoName': fake_repository_name,
'newLvName': fake_clone_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_cloned_volume(fake_clone_volume, fake_volume)
self.mox.VerifyAll()
def test_create_cloned_volume_with_resize(self):
self.mock_volume_types([fake_repository_name])
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_big_clone_volume['name'],
fake_big_clone_volume['size'])\
.AndReturn(None)
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun)
create_clone_request = {'addr': 'cms',
'data': {
'shelfLun': shelf_lun,
'lvName': fake_volume_name,
'repoName': fake_repository_name,
'newLvName': fake_clone_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_cloned_volume(fake_big_clone_volume, fake_volume)
self.mox.VerifyAll()
def test_create_cloned_volume_in_different_repository(self):
self.mock_volume_types([fake_repository_name + '_another'])
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
self.mox.ReplayAll()
self.assertRaises(
exception.CoraidException,
self.driver.create_cloned_volume,
fake_clone_volume,
fake_volume)
self.mox.VerifyAll()
def test_extend_volume(self):
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_volume_name, 10)\
.AndReturn(None)
self.mox.ReplayAll()
self.driver.extend_volume(fake_volume, 10)
self.mox.VerifyAll()
class AutoReloginCoraidTestCase(test.TestCase):
def setUp(self):
super(AutoReloginCoraidTestCase, self).setUp()
self.rest_client = coraid.CoraidRESTClient('https://fake')
self.appliance = coraid.CoraidAppliance(self.rest_client,
'fake_username',
'fake_password',
'fake_group')
def _test_auto_relogin_fail(self, state):
self.mox.StubOutWithMock(self.rest_client, 'rpc')
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.mox.StubOutWithMock(self.appliance, '_ensure_session')
self.appliance._ensure_session().AndReturn(None)
self.mox.StubOutWithMock(self.appliance, '_relogin')
self.appliance._relogin().AndReturn(None)
self.appliance._relogin().AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMReloginFailed,
self.appliance.rpc,
'fake_handle', {}, None, False)
self.mox.VerifyAll()
def test_auto_relogin_fail_admin(self):
self._test_auto_relogin_fail('GeneralAdminFailure')
def test_auto_relogin_fail_inactivity(self):
self._test_auto_relogin_fail('passwordInactivityTimeout')
def test_auto_relogin_fail_absolute(self):
self._test_auto_relogin_fail('passwordAbsoluteTimeout')
def test_auto_relogin_success(self):
self.mox.StubOutWithMock(self.rest_client, 'rpc')
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': 'GeneralAdminFailure',
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': 'ok'})
self.mox.StubOutWithMock(self.appliance, '_ensure_session')
self.appliance._ensure_session().AndReturn(None)
self.mox.StubOutWithMock(self.appliance, '_relogin')
self.appliance._relogin().AndReturn(None)
self.mox.ReplayAll()
reply = self.appliance.rpc('fake_handle', {}, None, False)
self.mox.VerifyAll()
self.assertEqual(reply['state'], 'ok')
class CoraidDriverImageTestCases(CoraidDriverTestCase):
def setUp(self):
super(CoraidDriverImageTestCases, self).setUp()
self.fake_dev_path = '/dev/ether/fake_dev'
self.fake_connection = {'driver_volume_type': 'aoe',
'data': {'target_shelf': fake_shelf,
'target_lun': fake_lun}}
self.fake_volume_info = {
'shelf': self.fake_connection['data']['target_shelf'],
'lun': self.fake_connection['data']['target_lun']}
self.mox.StubOutWithMock(self.driver, 'initialize_connection')
self.driver.initialize_connection(fake_volume, {})\
.AndReturn(self.fake_connection)
self.mox.StubOutWithMock(self.driver, 'terminate_connection')
self.driver.terminate_connection(fake_volume, mox.IgnoreArg(),
force=False).AndReturn(None)
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
self.mox.StubOutWithMock(connector, 'get_connector_properties')
connector.get_connector_properties(root_helper,
CONF.my_ip, False, False).\
AndReturn({})
self.mox.StubOutWithMock(utils, 'brick_get_connector')
aoe_initiator = self.mox.CreateMockAnything()
utils.brick_get_connector('aoe',
device_scan_attempts=3,
use_multipath=False,
conn=mox.IgnoreArg()).\
AndReturn(aoe_initiator)
aoe_initiator\
.connect_volume(self.fake_connection['data'])\
.AndReturn({'path': self.fake_dev_path})
aoe_initiator.check_valid_device(self.fake_dev_path, mox.IgnoreArg())\
.AndReturn(True)
aoe_initiator.disconnect_volume(
{'target_shelf': self.fake_volume_info['shelf'],
'target_lun': self.fake_volume_info['lun']}, mox.IgnoreArg())
def test_copy_volume_to_image(self):
fake_image_service = 'fake-image-service'
fake_image_meta = 'fake-image-meta'
self.mox.StubOutWithMock(image_utils, 'upload_volume')
image_utils.upload_volume({},
fake_image_service,
fake_image_meta,
self.fake_dev_path)
self.mox.ReplayAll()
self.driver.copy_volume_to_image({},
fake_volume,
fake_image_service,
fake_image_meta)
self.mox.VerifyAll()
def test_copy_image_to_volume(self):
fake_image_service = 'fake-image-service'
fake_image_id = 'fake-image-id;'
self.mox.StubOutWithMock(image_utils, 'fetch_to_raw')
image_utils.fetch_to_raw({},
fake_image_service,
fake_image_id,
self.fake_dev_path,
mox.IgnoreArg(),
size=fake_volume_size)
self.mox.ReplayAll()
self.driver.copy_image_to_volume({},
fake_volume,
fake_image_service,
fake_image_id)
self.mox.VerifyAll()
class CoraidResetConnectionTestCase(CoraidDriverTestCase):
def test_create_new_appliance_for_every_request(self):
self.mox.StubOutWithMock(coraid, 'CoraidRESTClient')
self.mox.StubOutWithMock(coraid, 'CoraidAppliance')
coraid.CoraidRESTClient(mox.IgnoreArg())
coraid.CoraidRESTClient(mox.IgnoreArg())
coraid.CoraidAppliance(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('fake_app1')
coraid.CoraidAppliance(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('fake_app2')
self.mox.ReplayAll()
self.assertEqual(self.driver.appliance, 'fake_app1')
self.assertEqual(self.driver.appliance, 'fake_app2')
self.mox.VerifyAll()
|
Akrog/cinder
|
cinder/tests/test_coraid.py
|
Python
|
apache-2.0
| 33,873
| 0
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for main window related functionality
"""
import PyQt4.QtGui
from herculeum.ui.controllers import EndScreenController, StartGameController
from herculeum.ui.gui.endscreen import EndScreen
from herculeum.ui.gui.eventdisplay import EventMessageDockWidget
from herculeum.ui.gui.map import PlayMapWindow
from herculeum.ui.gui.menu import MenuDialog
from herculeum.ui.gui.startgame import StartGameWidget
from PyQt4.QtCore import QFile, Qt
from PyQt4.QtGui import (QAction, QApplication, QCursor, QDialog, QIcon,
QMainWindow, QPixmap, QSplashScreen)
class QtUserInterface():
"""
Class for Qt User Interface
.. versionadded:: 0.9
"""
def __init__(self, application):
"""
Default constructor
"""
super().__init__()
self.application = application
self.splash_screen = None
self.qt_app = QApplication([])
# self.qt_app.setOverrideCursor(QCursor(Qt.BlankCursor))
def show_splash_screen(self):
"""
Show splash screen
"""
file = QFile(':herculeum.qss')
file.open(QFile.ReadOnly)
styleSheet = str(file.readAll().data(), 'ascii')
self.qt_app.setStyleSheet(styleSheet)
pixmap = QPixmap(':splash.png')
self.splash_screen = QSplashScreen(pixmap)
self.splash_screen.show()
def show_main_window(self):
"""
Show main window
"""
main_window = MainWindow(self.application,
self.application.surface_manager,
self.qt_app,
None,
Qt.FramelessWindowHint,
StartGameController(self.application.level_generator_factory,
self.application.creature_generator,
self.application.item_generator,
self.application.config.start_level))
self.splash_screen.finish(main_window)
main_window.show_new_game()
self.qt_app.exec_()
class MainWindow(QMainWindow):
"""
Class for displaying main window
.. versionadded:: 0.5
"""
def __init__(self, application, surface_manager, qt_app, parent, flags,
controller):
"""
Default constructor
"""
super().__init__(parent, flags)
self.application = application
self.surface_manager = surface_manager
self.qt_app = qt_app
self.controller = controller
self.__set_layout()
def __set_layout(self):
exit_action = QAction(QIcon(':exit-game.png'),
'&Quit',
self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Quit game')
exit_action.triggered.connect(PyQt4.QtGui.qApp.quit)
inventory_action = QAction(QIcon(':inventory.png'),
'Inventory',
self)
inventory_action.setShortcut('Ctrl+I')
inventory_action.setStatusTip('Show inventory')
inventory_action.triggered.connect(self.__show_menu)
character_action = QAction(QIcon(':character.png'),
'Character',
self)
character_action.setShortcut('Ctrl+C')
character_action.setStatusTip('Show character')
self.map_window = PlayMapWindow(parent=None,
model=self.application.world,
surface_manager=self.surface_manager,
action_factory=self.application.action_factory,
rng=self.application.rng,
rules_engine=self.application.rules_engine,
configuration=self.application.config)
self.setCentralWidget(self.map_window)
self.map_window.MenuRequested.connect(self.__show_menu)
self.map_window.EndScreenRequested.connect(self.__show_end_screen)
self.setGeometry(50, 50, 800, 600)
self.setWindowTitle('Herculeum')
self.setWindowIcon(QIcon(':rune-stone.png'))
self.showMaximized()
def show_new_game(self):
"""
Show new game dialog
"""
app = self.application
start_dialog = StartGameWidget(generator=app.player_generator,
config=self.application.config.controls,
parent=self,
application=self.application,
surface_manager=self.surface_manager,
flags=Qt.Dialog | Qt.CustomizeWindowHint)
result = start_dialog.exec_()
if result == QDialog.Accepted:
player = start_dialog.player_character
intro_text = self.controller.setup_world(self.application.world,
player)
player.register_for_updates(self.map_window.hit_points_widget)
self.map_window.hit_points_widget.show_hit_points(player)
self.map_window.hit_points_widget.show_spirit_points(player)
self.map_window.message_widget.text_edit.setText(intro_text)
self.__show_map_window()
def __show_map_window(self):
"""
Show map window
"""
self.map_window.construct_scene()
def __show_message_window(self, character):
"""
Show message display
:param character: character which events to display
:type character: Character
"""
messages_display = EventMessageDockWidget(self, character)
self.addDockWidget(Qt.BottomDockWidgetArea,
messages_display)
def __show_menu(self):
"""
Show menu
"""
menu_dialog = MenuDialog(self.surface_manager,
self.application.world.player,
self.application.action_factory,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint)
menu_dialog.exec_()
def __show_end_screen(self):
"""
Show end screen
.. versionadded:: 0.8
"""
end_screen = EndScreen(self.application.world,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint,
controller=EndScreenController())
end_screen.exec_()
self.qt_app.quit()
|
tuturto/pyherc
|
src/herculeum/ui/gui/mainwindow.py
|
Python
|
mit
| 8,099
| 0.001235
|
LANG_TEXT = {'en_EN': {'tr_TR': 'Turkish',
'fr_FR': 'French',
'fi_FI': 'Finnish',
'pt_PT': 'Portuguese',
'fy_x-FY': 'Frisian',
'it_IT': 'Italian',
'et_EE': 'Estonian',
'no_NO': 'Norwegian',
'nl_NL': 'Dutch',
'lv_LV': 'Latvian',
'el_GR': 'Greek',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hungarian',
'lt_LT': 'Lithuanian',
'sl_SI': 'Slovenian',
'hr_HR': 'Croatian',
'en_EN': 'English',
'es_ES': 'Spanish',
'ca_AD': 'Catalan',
'ru_RU': 'Russian',
'is_IS': 'Icelandic',
'da_DK': 'Danish',
'ar_AE': 'Arabic',
'sk_SK': 'Slovakian',
'de_DE': 'German',
'sr_YU': 'Serbian',
'cs_CZ': 'Czech',
'pl_PL': 'Polish',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': 'Swedish',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': 'Language selection'},
'de_DE': {'tr_TR': 'T\xc3\xbcrkisch',
'fr_FR': 'Franz\xc3\xb6sisch',
'fi_FI': 'Finnisch',
'pt_PT': 'portugiesisch',
'fy_x-FY': 'Friesisch',
'it_IT': 'Italienisch',
'et_EE': 'Estnisch',
'no_NO': 'Norwegisch',
'nl_NL': 'Holl\xc3\xa4ndisch',
'lv_LV': 'Lettisch',
'el_GR': 'Griechisch',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungarisch',
'lt_LT': 'litauisch',
'sl_SI': 'Slowenisch',
'hr_HR': 'Kroatisch',
'en_EN': 'Englisch',
'es_ES': 'Spanisch',
'ca_AD': 'Katalanisch',
'ru_RU': 'Russisch',
'is_IS': 'Isl\xc3\xa4ndisch',
'da_DK': 'D\xc3\xa4nisch',
'ar_AE': 'Arabisch',
'sk_SK': 'Slowakisch',
'de_DE': 'Deutsch',
'sr_YU': 'Serbisch',
'cs_CZ': 'Tschechisch',
'pl_PL': 'Polnisch',
'uk_UA': 'Ukrainisch',
'fa_IR': 'Persisch',
'sv_SE': 'Schwedisch',
'he_IL': 'Hebr\xc3\xa4isch',
'T1': 'Bitte benutzen Sie die Hoch/Runter-Tasten, um Ihre Sprache auszuw\xc3\xa4hlen. Danach dr\xc3\xbccken Sie bitte OK.',
'T2': 'Sprachauswahl'},
'ar_AE': {'tr_TR': '\xd8\xaa\xd8\xb1\xd9\x83\xd9\x89',
'fr_FR': '\xd9\x81\xd8\xb1\xd9\x86\xd8\xb3\xd9\x89',
'fi_FI': '\xd8\xa5\xd9\x86\xd8\xaa\xd9\x87\xd8\xa7\xd8\xa1',
'pt_PT': '\xd8\xa8\xd8\xb1\xd8\xaa\xd8\xba\xd8\xa7\xd9\x84\xd9\x89',
'fy_x-FY': 'Frisian',
'it_IT': '\xd8\xa5\xd9\x8a\xd8\xb7\xd8\xa7\xd9\x84\xd9\x89',
'et_EE': '\xd8\xa3\xd8\xb3\xd8\xaa\xd9\x88\xd9\x86\xd9\x89',
'no_NO': '\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac\xd9\x89',
'nl_NL': '\xd9\x87\xd9\x88\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'lv_LV': 'Latvian',
'el_GR': '\xd8\xa7\xd9\x84\xd9\x8a\xd9\x88\xd9\x86\xd8\xa7\xd9\x86',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd9\x85\xd8\xac\xd8\xb1\xd9\x89',
'lt_LT': '\xd9\x84\xd9\x8a\xd8\xaa\xd9\x88\xd8\xa7\xd9\x86\xd9\x89',
'sl_SI': '\xd8\xb3\xd9\x84\xd9\x88\xd9\x81\xd8\xa7\xd9\x86\xd9\x89',
'hr_HR': '\xd9\x83\xd8\xb1\xd9\x88\xd8\xa7\xd8\xaa\xd9\x89',
'en_EN': '\xd8\xa5\xd9\x86\xd8\xac\xd9\x84\xd9\x8a\xd8\xb2\xd9\x89',
'es_ES': '\xd8\xa3\xd8\xb3\xd8\xa8\xd8\xa7\xd9\x86\xd9\x89',
'ca_AD': '\xd9\x83\xd8\xa7\xd8\xaa\xd8\xa7\xd9\x84\xd8\xa7\xd9\x86\xd9\x8a',
'ru_RU': '\xd8\xb1\xd9\x88\xd8\xb3\xd9\x89',
'is_IS': '\xd8\xa7\xd9\x8a\xd8\xb3\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'da_DK': '\xd8\xaf\xd9\x86\xd9\x85\xd8\xa7\xd8\xb1\xd9\x83\xd9\x89',
'ar_AE': '\xd8\xb9\xd9\x80\xd8\xb1\xd8\xa8\xd9\x89',
'sk_SK': '\xd8\xb3\xd9\x84\xd9\x88\xd9\x81\xd8\xa7\xd9\x83\xd9\x89',
'de_DE': '\xd8\xa7\xd9\x84\xd9\x85\xd8\xa7\xd9\x86\xd9\x80\xd9\x89',
'sr_YU': '\xd8\xb5\xd8\xb1\xd9\x8a\xd9\x89',
'cs_CZ': '\xd8\xa7\xd9\x84\xd8\xaa\xd8\xb4\xd9\x8a\xd9\x83',
'pl_PL': '\xd8\xa8\xd9\x88\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'uk_UA': '\xd8\xa3\xd9\x88\xd9\x83\xd8\xb1\xd8\xa7\xd9\x86\xd9\x89',
'fa_IR': 'Persian',
'sv_SE': '\xd8\xb3\xd9\x88\xd9\x8a\xd8\xaf\xd9\x89',
'he_IL': 'Hebrew',
'T1': '\xd9\x85\xd9\x86 \xd9\x81\xd8\xb6\xd9\x84\xd9\x83 \xd8\xa3\xd8\xb3\xd8\xaa\xd8\xae\xd8\xaf\xd9\x85 \xd8\xb0\xd8\xb1 \xd8\xa7\xd9\x84\xd8\xb3\xd9\x87\xd9\x85 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x84\xd9\x88\xd9\x89 \xd8\xa3\xd9\x88 \xd8\xa7\xd9\x84\xd8\xb3\xd9\x81\xd9\x84\xd9\x89 \xd9\x84\xd8\xa5\xd8\xae\xd8\xaa\xd9\x8a\xd8\xa7\xd8\xb1 \xd8\xa7\xd9\x84\xd9\x84\xd8\xba\xd9\x87. \xd8\xab\xd9\x85 \xd8\xa3\xd8\xb6\xd8\xba\xd8\xb7 \xd9\x85\xd9\x88\xd8\xa7\xd9\x81\xd9\x82 .',
'T2': '\xd8\xa5\xd8\xae\xd8\xaa\xd9\x8a\xd8\xa7\xd8\xb1 \xd8\xa7\xd9\x84\xd9\x84\xd8\xba\xd9\x80\xd9\x87'},
'pt_BR_BR': {'tr_TR': 'Turco',
'fr_FR': 'Franc\xc3\xaas',
'fi_FI': 'Finland\xc3\xaas',
'pt_PT': 'Portugu\xc3\xaas brasileiro',
'fy_x-FY': 'Fr\xc3\xadsio',
'it_IT': 'Italiano',
'et_EE': 'Estoniano',
'no_NO': 'Noruegu\xc3\xaas',
'nl_NL': 'Holand\xc3\xaas',
'lv_LV': 'Let\xc3\xa3o',
'el_GR': 'Grego',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'H\xc3\xbangaro',
'lt_LT': 'Lituano',
'sl_SI': 'Esloveno',
'hr_HR': 'Croata',
'en_EN': 'Ingl\xc3\xaas',
'es_ES': 'Catal\xc3\xa3o',
'ca_AD': 'Catal\xc3\xa3o',
'ru_RU': 'Russo',
'is_IS': 'Island\xc3\xaas',
'da_DK': 'Dinamarqu\xc3\xaas',
'ar_AE': '\xc3\x81rabe',
'sk_SK': 'Eslovaco',
'de_DE': 'Alem\xc3\xa3o',
'sr_YU': 'S\xc3\xa9rvia',
'cs_CZ': 'Checo',
'pl_PL': 'Polaco',
'uk_UA': 'Ucraniano',
'fa_IR': 'Persa',
'sv_SE': 'Sueco',
'he_IL': 'Hebr\xc3\xa1ico',
'T1': 'Use a tecla de cima ou de baixo para selecionar seu idioma. Depois pressione OK.',
'T2': 'Sele\xc3\xa7\xc3\xa3o do idioma'},
'ca_AD': {'tr_TR': 'Turc',
'fr_FR': 'Franc\xc3\xa8s',
'fi_FI': 'Finland\xc3\xa8s',
'pt_PT': 'Portugu\xc3\xa8s',
'fy_x-FY': 'Frisian',
'it_IT': 'Itali\xc3\xa0',
'et_EE': 'Estonian',
'no_NO': 'Noruec',
'nl_NL': 'Holand\xc3\xa8s',
'lv_LV': 'Latvian',
'el_GR': 'Grec',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hongar\xc3\xa8s',
'lt_LT': 'Litu\xc3\xa0',
'sl_SI': 'Slovenian',
'hr_HR': 'Croat',
'en_EN': 'Angl\xc3\xa8s',
'es_ES': 'Espanyol',
'ca_AD': 'Catal\xc3\xa0',
'ru_RU': 'Rus',
'is_IS': 'Island\xc3\xa8s',
'da_DK': 'Dan\xc3\xa8s',
'ar_AE': '\xc3\x80rab',
'sk_SK': 'Slovakian',
'de_DE': 'Alemany',
'sr_YU': 'Serbian',
'cs_CZ': 'Txec',
'pl_PL': 'Polish',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': 'Suec',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': "Selecci\xc3\xb3 d'idioma"},
'hr_HR': {'tr_TR': 'Turski',
'fr_FR': 'Francuski',
'fi_FI': 'Finski',
'pt_PT': 'Portugalski',
'fy_x-FY': 'Frisian',
'it_IT': 'Talijanski',
'et_EE': 'Estonian',
'no_NO': 'Norve\xc5\xa1ki',
'nl_NL': 'Nizozemski',
'lv_LV': 'Latvian',
'el_GR': 'Gr\xc4\x8dki',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ma\xc4\x91arski',
'lt_LT': 'Litvanski',
'sl_SI': 'Slovenian',
'hr_HR': 'Hrvatski',
'en_EN': 'Engleski',
'es_ES': '\xc5\xa0panjolski',
'ca_AD': 'Katalonski',
'ru_RU': 'Ruski',
'is_IS': 'Islandski',
'da_DK': 'Danski',
'ar_AE': 'Arabski',
'sk_SK': 'Slovakian',
'de_DE': 'Njema\xc4\x8dki',
'sr_YU': 'Serbian',
'cs_CZ': '\xc4\x8ce\xc5\xa1ki',
'pl_PL': 'Poljski',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': '\xc5\xa0vedski',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': 'Odaberite Jezik'},
'cs_CZ': {'tr_TR': 'Turecky',
'fr_FR': 'Francouzsky',
'fi_FI': 'Finsky',
'pt_PT': 'Portugalsky',
'fy_x-FY': 'Frisian',
'it_IT': 'Italsky',
'et_EE': 'Estonian',
'no_NO': 'Norsky',
'nl_NL': 'Holandsky',
'lv_LV': 'Latvian',
'el_GR': '\xc5\x98ecky',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ma\xc4\x8farsky',
'lt_LT': 'Litevsky',
'sl_SI': 'Slovenian',
'hr_HR': 'Chorvatsky',
'en_EN': 'Anglicky',
'es_ES': '\xc5\xa0pan\xc4\x9blsky',
'ca_AD': 'Katal\xc3\xa1nsky',
'ru_RU': 'Rusky',
'is_IS': 'Islandsky',
'da_DK': 'D\xc3\xa1nsky',
'ar_AE': 'Arabsky',
'sk_SK': 'Slovakian',
'de_DE': 'N\xc4\x9bmecky',
'sr_YU': 'Serbian',
'cs_CZ': '\xc4\x8cesky',
'pl_PL': 'Polsky',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': '\xc5\xa0v\xc3\xa9dsky',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': 'V\xc3\xbdb\xc4\x9br jazyka'},
'da_DK': {'tr_TR': 'Tyrkisk',
'fr_FR': 'Fransk',
'fi_FI': 'Finsk',
'pt_PT': 'Portugisisk',
'fy_x-FY': 'Frisisk',
'it_IT': 'Italiensk',
'et_EE': 'Estisk',
'no_NO': 'Norsk',
'nl_NL': 'Hollandsk',
'lv_LV': 'Lettisk',
'el_GR': 'Gr\xc3\xa6sk',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungarsk',
'lt_LT': 'Litauisk',
'sl_SI': 'Slovensk',
'hr_HR': 'Kroatisk',
'en_EN': 'Engelsk',
'es_ES': 'Spansk',
'ca_AD': 'Katalansk',
'ru_RU': 'Russisk',
'is_IS': 'Islandsk',
'da_DK': 'Dansk',
'ar_AE': 'Arabisk',
'sk_SK': 'Slovakisk',
'de_DE': 'Tysk',
'sr_YU': 'Serbisk',
'cs_CZ': 'Tjekkisk',
'pl_PL': 'Polsk',
'uk_UA': 'Ukrainsk',
'fa_IR': 'Persisk',
'sv_SE': 'Svensk',
'he_IL': 'Hebr\xc3\xa6isk',
'T1': 'Benyt venligst OP og NED tasten til at v\xc3\xa6lge sprog. Tryk bagefter p\xc3\xa5 OK knappen.',
'T2': 'Valg af sprog'},
'nl_NL': {'tr_TR': 'Turks',
'fr_FR': 'Frans',
'fi_FI': 'Fins',
'pt_PT': 'Portugees',
'fy_x-FY': 'Fries',
'it_IT': 'Italiaans',
'et_EE': 'Estlands',
'no_NO': 'Noors',
'nl_NL': 'Nederlands',
'lv_LV': 'Lets',
'el_GR': 'Grieks',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hongaars',
'lt_LT': 'Litouws',
'sl_SI': 'Sloveens',
'hr_HR': 'Kroatisch',
'en_EN': 'Engels',
'es_ES': 'Spaans',
'ca_AD': 'Catalaans',
'ru_RU': 'Russisch',
'is_IS': 'Ijslands',
'da_DK': 'Deens',
'ar_AE': 'Arabisch',
'sk_SK': 'Slowaaks',
'de_DE': 'Duits',
'sr_YU': 'Servisch',
'cs_CZ': 'Tsjechisch',
'pl_PL': 'Pools',
'uk_UA': 'Oekra\xc3\xafens',
'fa_IR': 'Perzisch',
'sv_SE': 'Zweeds',
'he_IL': 'Hebreeuws',
'T1': 'Gebruik de omhoog/omlaag toeten om de gewenste taal te selecteren. Druk daarna op OK.',
'T2': 'Taalkeuze'},
'et_EE': {'tr_TR': 'T\xc3\xbcrgi',
'fr_FR': 'Prantsuse',
'fi_FI': 'Soome',
'pt_PT': 'Portugali',
'fy_x-FY': 'Friisi',
'it_IT': 'Itaalia',
'et_EE': 'Eesti',
'no_NO': 'Norra',
'nl_NL': 'Holland',
'lv_LV': 'L\xc3\xa4ti',
'el_GR': 'Kreeka',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungari',
'lt_LT': 'Leedu',
'sl_SI': 'Sloveenia',
'hr_HR': 'Horvaatia',
'en_EN': 'Inglise',
'es_ES': 'Hispaania',
'ca_AD': 'Katalaani',
'ru_RU': 'Vene',
'is_IS': 'Island',
'da_DK': 'Taani',
'ar_AE': 'Araabia',
'sk_SK': 'Slovakkia',
'de_DE': 'Saksa',
'sr_YU': 'Serbia',
'cs_CZ': 'T\xc5\xa1ehhi',
'pl_PL': 'Poola',
'uk_UA': 'Ukraina',
'fa_IR': 'P\xc3\xa4rsia',
'sv_SE': 'Rootsi',
'he_IL': 'Hebrew',
'T1': 'Palun kasuta UP ja DOWN nuppe oma keele valimiseks.Keele valimiseks vajuta OK.',
'T2': 'Keele valik'},
'fi_FI': {'tr_TR': 'Turkki',
'fr_FR': 'Ranska',
'fi_FI': 'Suomi',
'pt_PT': 'Portugali',
'fy_x-FY': 'Friisi',
'it_IT': 'Italia',
'et_EE': 'Viro',
'no_NO': 'Norja',
'nl_NL': 'Hollanti',
'lv_LV': 'Latvia',
'el_GR': 'Kreikka',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Unkari',
'lt_LT': 'Liettua',
'sl_SI': 'Slovenia',
'hr_HR': 'Kroatia',
'en_EN': 'Englanti',
'es_ES': 'Espanja',
'ca_AD': 'Katalaani',
'ru_RU': 'Ven\xc3\xa4j\xc3\xa4',
'is_IS': 'Islanti',
'da_DK': 'Tanska',
'ar_AE': 'Arabia',
'sk_SK': 'Slovakia',
'de_DE': 'Saksa',
'sr_YU': 'Serbia',
'cs_CZ': 'T\xc5\xa1ekki',
'pl_PL': 'Puola',
'uk_UA': 'Ukraina',
'fa_IR': 'Persia',
'sv_SE': 'Ruotsi',
'he_IL': 'Hebrea',
'T1': 'Valitse kieli yl\xc3\xb6s/alas n\xc3\xa4pp\xc3\xa4imill\xc3\xa4 ja paina OK-n\xc3\xa4pp\xc3\xa4int\xc3\xa4.',
'T2': 'Kielivalinta'},
'fr_FR': {'tr_TR': 'Turke',
'fr_FR': 'Fran\xc3\xa7ais',
'fi_FI': 'Finlandais',
'pt_PT': 'Portugais',
'fy_x-FY': 'Frison',
'it_IT': 'Italien',
'et_EE': 'Estonien',
'no_NO': 'Norv\xc3\xa9gien',
'nl_NL': 'Hollandais',
'lv_LV': 'Letton',
'el_GR': 'Gr\xc3\xa8que',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hongrois',
'lt_LT': 'Lithuanien',
'sl_SI': 'Slov\xc3\xa8ne',
'hr_HR': 'Croate',
'en_EN': 'Anglais',
'es_ES': 'Espagnol',
'ca_AD': 'Catalan',
'ru_RU': 'Russe',
'is_IS': 'Islandais',
'da_DK': 'Danois',
'ar_AE': 'Arabe',
'sk_SK': 'Slovaque',
'de_DE': 'Allemand',
'sr_YU': 'Serbe',
'cs_CZ': 'Tch\xc3\xa8que',
'pl_PL': 'Polonais',
'uk_UA': 'Ukrainien',
'fa_IR': 'Iranien',
'sv_SE': 'Su\xc3\xa9dois',
'he_IL': 'Hebrew',
'T1': 'Veuillez utiliser les touches HAUT et BAS pour choisir votre langage. Ensuite presser le bouton OK.',
'T2': 'S\xc3\xa9lection de la langue'},
'el_GR': {'tr_TR': '\xce\xa4\xce\xbf\xcf\x8d\xcf\x81\xce\xba\xce\xb9\xce\xba\xce\xb1',
'fr_FR': '\xce\x93\xce\xb1\xce\xbb\xce\xbb\xce\xb9\xce\xba\xce\xac',
'fi_FI': '\xce\xa6\xce\xb9\xce\xbb\xce\xbb\xce\xb1\xce\xbd\xce\xb4\xce\xb9\xce\xba\xce\xac',
'pt_PT': '\xce\xa0\xce\xbf\xcf\x81\xcf\x84\xce\xbf\xce\xb3\xce\xb1\xce\xbb\xce\xbb\xce\xb9\xce\xba\xce\xac',
'fy_x-FY': 'Frisian',
'it_IT': '\xce\x99\xcf\x84\xce\xb1\xce\xbb\xce\xb9\xce\xba\xce\xac',
'et_EE': 'Estonian',
'no_NO': '\xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xcf\x86\xce\xb9\xce\xba\xce\xac',
'nl_NL': '\xce\x9f\xce\xbb\xce\xbb\xce\xb1\xce\xbd\xce\xb4\xce\xb9\xce\xba\xce\xac',
'lv_LV': 'Latvian',
'el_GR': '\xce\x95\xce\xbb\xce\xbb\xce\xb7\xce\xbd\xce\xb9\xce\xba\xce\xac',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xce\x9f\xcf\x85\xce\xb3\xce\xb3\xce\xb1\xcf\x81\xce\xb5\xce\xb6\xce\xb9\xce\xba\xce\xac',
'lt_LT': '\xce\x9b\xce\xb9\xce\xb8\xce\xbf\xcf\x85\xce\xb1\xce\xbd\xce\xb9\xce\xba\xce\xac',
'sl_SI': 'Slovenian',
'hr_HR': '\xce\x9a\xcf\x81\xce\xbf\xce\xac\xcf\x84\xce\xb9\xce\xba\xce\xb1',
'en_EN': '\xce\x91\xce\xb3\xce\xb3\xce\xbb\xce\xb9\xce\xba\xce\xac',
'es_ES': '\xce\x99\xcf\x83\xcf\x80\xce\xb1\xce\xbd\xce\xb9\xce\xba\xce\xac',
'ca_AD': 'Catalan',
'ru_RU': '\xce\xa1\xcf\x89\xcf\x83\xce\xb9\xce\xba\xce\xac',
'is_IS': '\xce\x99\xcf\x83\xce\xbb\xce\xb1\xce\xbd\xce\xb4\xce\xb9\xce\xba\xce\xac',
'da_DK': '\xce\x94\xce\xb1\xce\xbd\xce\xb9\xce\xba\xce\xac',
'ar_AE': '\xce\x91\xcf\x81\xce\xb1\xce\xb2\xce\xb9\xce\xba\xce\xac',
'sk_SK': 'Slovakian',
'de_DE': '\xce\x93\xce\xb5\xcf\x81\xce\xbc\xce\xb1\xce\xbd\xce\xb9\xce\xba\xce\xac',
'sr_YU': 'Serbian',
'cs_CZ': '\xce\xa4\xcf\x83\xce\xad\xcf\x87\xce\xb9\xce\xba\xce\xb1',
'pl_PL': '\xce\xa0\xce\xbf\xce\xbb\xcf\x89\xce\xbd\xce\xb9\xce\xba\xce\xac',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': '\xce\xa3\xce\xbf\xcf\x85\xce\xb9\xce\xb4\xce\xb5\xce\xb6\xce\xb9\xce\xba\xce\xac',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': '\xce\x95\xcf\x80\xce\xb9\xce\xbb\xce\xbf\xce\xb3\xce\xae \xce\xb3\xce\xbb\xcf\x8e\xcf\x83\xcf\x83\xce\xb1\xcf\x82'},
'he_IL': {'tr_TR': '\xd7\x98\xd7\x95\xd7\xa8\xd7\xa7\xd7\x99\xd7\xaa',
'fr_FR': '\xd7\xa6\xd7\xa8\xd7\xa4\xd7\xaa\xd7\x99\xd7\xaa',
'fi_FI': '\xd7\xa4\xd7\x99\xd7\xa0\xd7\x99\xd7\xaa',
'pt_PT': '\xd7\xa4\xd7\x95\xd7\xa8\xd7\x98\xd7\x95\xd7\x92\xd7\x96\xd7\x99\xd7\xaa',
'fy_x-FY': 'Frisian',
'it_IT': '\xd7\x90\xd7\x99\xd7\x98\xd7\x9c\xd7\xa7\xd7\x99\xd7\xaa',
'et_EE': '\xd7\x90\xd7\xa1\xd7\x98\xd7\x95\xd7\xa0\xd7\x99\xd7\x94',
'no_NO': '\xd7\xa0\xd7\x95\xd7\xa8\xd7\x95\xd7\x95\xd7\x92\xd7\x99\xd7\xaa',
'nl_NL': '\xd7\x94\xd7\x95\xd7\x9c\xd7\xa0\xd7\x93\xd7\x99\xd7\xaa',
'lv_LV': 'Latvian',
'el_GR': '\xd7\x99\xd7\x95\xd7\x95\xd7\xa0\xd7\x99\xd7\xaa',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd7\x94\xd7\x95\xd7\xa0\xd7\x92\xd7\xa8\xd7\x99\xd7\xaa',
'lt_LT': '\xd7\x9c\xd7\x99\xd7\x98\xd7\x90',
'sl_SI': '\xd7\xa1\xd7\x9c\xd7\x95\xd7\x91\xd7\xa0\xd7\x99\xd7\xaa',
'hr_HR': '\xd7\xa7\xd7\xa8\xd7\x95\xd7\x90\xd7\x98\xd7\x99\xd7\xaa',
'en_EN': '\xd7\x90\xd7\xa0\xd7\x92\xd7\x9c\xd7\x99\xd7\xaa',
'es_ES': '\xd7\xa1\xd7\xa4\xd7\xa8\xd7\x93\xd7\x99\xd7\xaa',
'ca_AD': '\xd7\xa7\xd7\x98\xd7\x9c\xd7\x95\xd7\xa0\xd7\x99',
'ru_RU': '\xd7\xa8\xd7\x95\xd7\xa1\xd7\x99\xd7\xaa',
'is_IS': '\xd7\x90\xd7\x99\xd7\xa1\xd7\x9c\xd7\xa0\xd7\x93\xd7\x99\xd7\xaa',
'da_DK': '\xd7\x93\xd7\xa0\xd7\x99\xd7\xaa',
'ar_AE': '\xd7\xa2\xd7\xa8\xd7\x91\xd7\x99\xd7\xaa',
'sk_SK': '\xd7\xa1\xd7\x9c\xd7\x95\xd7\x91\xd7\xa7\xd7\x99\xd7\x94',
'de_DE': '\xd7\x92\xd7\xa8\xd7\x9e\xd7\xa0\xd7\x99\xd7\xaa',
'sr_YU': '\xd7\xa1\xd7\xa8\xd7\x91\xd7\x99\xd7\xaa',
'cs_CZ': "\xd7\xa6'\xd7\x9b\xd7\x99\xd7\xaa",
'pl_PL': '\xd7\xa4\xd7\x95\xd7\x9c\xd7\xa0\xd7\x99\xd7\xaa',
'uk_UA': '\xd7\x90\xd7\x95\xd7\xa7\xd7\xa8\xd7\x90\xd7\x99\xd7\xa0\xd7\x99\xd7\xaa',
'fa_IR': 'Persian',
'sv_SE': '\xd7\xa9\xd7\x95\xd7\x95\xd7\x93\xd7\x99\xd7\xaa',
'he_IL': 'Hebrew',
'T1': '.\xd7\x90\xd7\xa0\xd7\x90 \xd7\x91\xd7\x97\xd7\xa8 \xd7\x90\xd7\xaa \xd7\x94\xd7\xa9\xd7\xa4\xd7\x94, \xd7\x9c\xd7\x90\xd7\x97\xd7\xa8 \xd7\x9e\xd7\x9b\xd7\x9f \xd7\x9c\xd7\x97\xd7\xa5 \xd7\xa2\xd7\x9c \xd7\x90\xd7\xa9\xd7\xa8',
'T2': '\xd7\x91\xd7\x97\xd7\x99\xd7\xa8\xd7\xaa \xd7\xa9\xd7\xa4\xd7\x94'},
'hu_HU': {'tr_TR': 'T\xc3\xb6r\xc3\xb6k',
'fr_FR': 'Francia',
'fi_FI': 'Finn',
'pt_PT': 'Portug\xc3\xa1l',
'fy_x-FY': 'Fr\xc3\xadz',
'it_IT': 'Olasz',
'et_EE': '\xc3\x89szt',
'no_NO': 'Norv\xc3\xa9g',
'nl_NL': 'Holland',
'lv_LV': 'Lett',
'el_GR': 'G\xc3\xb6r\xc3\xb6g',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Magyar',
'lt_LT': 'Litv\xc3\xa1n',
'sl_SI': 'Szolv\xc3\xa9n',
'hr_HR': 'Horv\xc3\xa1t',
'en_EN': 'Angol',
'es_ES': 'Spanyol',
'ca_AD': 'Katal\xc3\xa1n',
'ru_RU': 'Orosz',
'is_IS': 'Izlandi',
'da_DK': 'D\xc3\xa1n',
'ar_AE': 'Arab',
'sk_SK': 'Szlov\xc3\xa1k',
'de_DE': 'N\xc3\xa9met',
'sr_YU': 'Szerb',
'cs_CZ': 'Cseh',
'pl_PL': 'Lengyel',
'uk_UA': 'Ukr\xc3\xa1n',
'fa_IR': 'Perzsa',
'sv_SE': 'Sv\xc3\xa9d',
'he_IL': 'Hebrew',
'T1': 'K\xc3\xa9rem, haszn\xc3\xa1lja a FEL \xc3\xa9s LE gombokat a nyelv kiv\xc3\xa1laszt\xc3\xa1s\xc3\xa1hoz. Ez ut\xc3\xa1n nyomja le az OK gombot.',
'T2': 'Nyelvv\xc3\xa1laszt\xc3\xa1s'},
'lt_LT': {'tr_TR': 'Turk\xc5\xb3',
'fr_FR': 'Pranc\xc5\xabz\xc5\xb3',
'fi_FI': 'Suomi\xc5\xb3',
'pt_PT': 'Portugal\xc5\xb3',
'fy_x-FY': 'Friz\xc5\xb3',
'it_IT': 'Ital\xc5\xb3',
'et_EE': 'Est\xc5\xb3',
'no_NO': 'Norveg\xc5\xb3',
'nl_NL': 'Oland\xc5\xb3',
'lv_LV': 'Latvi\xc5\xb3',
'el_GR': 'Graik\xc5\xb3',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Vengr\xc5\xb3',
'lt_LT': 'Lietuvi\xc5\xb3',
'sl_SI': 'Slov\xc4\x97n\xc5\xb3',
'hr_HR': 'Kroat\xc5\xb3',
'en_EN': 'Angl\xc5\xb3',
'es_ES': 'Ispan\xc5\xb3',
'ca_AD': 'Katalon\xc5\xb3',
'ru_RU': 'Rus\xc5\xb3',
'is_IS': 'Island\xc5\xb3',
'da_DK': 'Dan\xc5\xb3',
'ar_AE': 'Arab\xc5\xb3',
'sk_SK': 'Slovak\xc5\xb3',
'de_DE': 'Vokie\xc4\x8di\xc5\xb3',
'sr_YU': 'Serb\xc5\xb3',
'cs_CZ': '\xc4\x8cek\xc5\xb3',
'pl_PL': 'Lenk\xc5\xb3',
'uk_UA': 'Ukrainie\xc4\x8di\xc5\xb3',
'fa_IR': 'Pers\xc5\xb3',
'sv_SE': '\xc5\xa0ved\xc5\xb3',
'he_IL': 'Hebraj\xc5\xb3',
'T1': 'Pra\xc5\xa1ome naudoti AUK\xc5\xa0TYN IR \xc5\xbdEMYN mygtukus, kad i\xc5\xa1sirinktum\xc4\x97te savo kalb\xc4\x85. Po to spauskite OK mygtuk\xc4\x85.',
'T2': 'Kalbos pasirinkimas'},
'lv_LV': {'tr_TR': 'Turku',
'fr_FR': 'Fran\xc4\x8du',
'fi_FI': 'Somu',
'pt_PT': 'Portug\xc4\x81\xc4\xbcu',
'fy_x-FY': 'Fr\xc4\xabzu',
'it_IT': 'It\xc4\x81\xc4\xbcu',
'et_EE': 'Estonian',
'no_NO': 'Norv\xc4\x93\xc4\xa3u',
'nl_NL': 'Holandie\xc5\xa1u',
'lv_LV': 'Latvie\xc5\xa1u',
'el_GR': 'Grie\xc4\xb7u',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ung\xc4\x81ru',
'lt_LT': 'Lietuvie\xc5\xa1u',
'sl_SI': 'Slovenian',
'hr_HR': 'Horv\xc4\x81tu',
'en_EN': 'Ang\xc4\xbcu',
'es_ES': 'Sp\xc4\x81\xc5\x86u',
'ca_AD': 'Kat\xc4\x81lie\xc5\xa1u',
'ru_RU': 'Krievu',
'is_IS': 'Islandie\xc5\xa1u',
'da_DK': 'D\xc4\x81\xc5\x86u',
'ar_AE': 'Ar\xc4\x81bu',
'sk_SK': 'Slovakian',
'de_DE': 'V\xc4\x81cu',
'sr_YU': 'Serbian',
'cs_CZ': '\xc4\x8cehu',
'pl_PL': 'Po\xc4\xbcu',
'uk_UA': 'Ukrai\xc5\x86u',
'fa_IR': 'Persian',
'sv_SE': 'Zviedru',
'he_IL': 'Hebrew',
'T1': 'L\xc5\xabdzu lietojiet UP un DOWN tausti\xc5\x86us, lai izv\xc4\x93l\xc4\x93tos valodu. P\xc4\x93c tam spiediet OK.',
'T2': 'Valodas izv\xc4\x93le'},
'is_IS': {'tr_TR': 'Tyrkneska',
'fr_FR': 'Franska',
'fi_FI': 'Finnska',
'pt_PT': 'Port\xc3\xbagalska',
'fy_x-FY': 'Fr\xc3\xadsneska',
'it_IT': '\xc3\x8dtalska',
'et_EE': 'Eistneska',
'no_NO': 'Norska',
'nl_NL': 'Hollenska',
'lv_LV': 'Lettneska',
'el_GR': 'Gr\xc3\xadska',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungverska',
'lt_LT': 'Lith\xc3\xa1\xc3\xadska',
'sl_SI': 'Slovenian',
'hr_HR': 'Kr\xc3\xb3at\xc3\xadska',
'en_EN': 'Enska',
'es_ES': 'Sp\xc3\xa6nska',
'ca_AD': 'Katal\xc3\xb3nska',
'ru_RU': 'R\xc3\xbassneska',
'is_IS': '\xc3\x8dslenska',
'da_DK': 'Danska',
'ar_AE': 'Arab\xc3\xadska',
'sk_SK': 'Slovakian',
'de_DE': '\xc3\x9e\xc3\xbdska',
'sr_YU': 'Serneska',
'cs_CZ': 'T\xc3\xa9kkneska',
'pl_PL': 'P\xc3\xb3lska',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persneska',
'sv_SE': 'S\xc3\xa6nskt',
'he_IL': 'Hebrew',
'T1': 'Vinsamlega noti\xc3\xb0 UP og NI\xc3\x90UR takka til a\xc3\xb0 velja tungum\xc3\xa1l. \xc3\x9dttu svo \xc3\xa1 OK til a\xc3\xb0 nota.',
'T2': 'Val tungum\xc3\xa1ls'},
'it_IT': {'tr_TR': 'Turco',
'fr_FR': 'Francese',
'fi_FI': 'Finlandese',
'pt_PT': 'Portoghese',
'fy_x-FY': 'Frisone',
'it_IT': 'Italiano',
'et_EE': 'Estone',
'no_NO': 'Norvegese',
'nl_NL': 'Olandese',
'lv_LV': 'Lettone',
'el_GR': 'Greco',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungherese',
'lt_LT': 'Lituano',
'sl_SI': 'Sloveno',
'hr_HR': 'Croato',
'en_EN': 'Inglese',
'es_ES': 'Spagnolo',
'ca_AD': 'Catalano',
'ru_RU': 'Russo',
'is_IS': 'Islandese',
'da_DK': 'Danese',
'ar_AE': 'Arabo',
'sk_SK': 'Slovacco',
'de_DE': 'Tedesco',
'sr_YU': 'Serbo',
'cs_CZ': 'Ceco',
'pl_PL': 'Polacco',
'uk_UA': 'Ucraino',
'fa_IR': 'Persiano',
'sv_SE': 'Svedese',
'he_IL': 'Ebraico',
'T1': 'Selezionare la propria lingua utilizzando i tasti S\xc3\xb9/Gi\xc3\xb9. OK >> confermare.',
'T2': 'Selezione lingua'},
'no_NO': {'tr_TR': 'Tyrkisk',
'fr_FR': 'Fransk',
'fi_FI': 'Finsk',
'pt_PT': 'Portugisisk',
'fy_x-FY': 'Frisisk',
'it_IT': 'Italiensk',
'et_EE': 'Estlandsk',
'no_NO': 'Norsk',
'nl_NL': 'Nederlandsk',
'lv_LV': 'Latvisk',
'el_GR': 'Gresk',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungarsk',
'lt_LT': 'Litauisk',
'sl_SI': 'Slovenisk',
'hr_HR': 'Kroatisk',
'en_EN': 'Engelsk',
'es_ES': 'Spansk',
'ca_AD': 'Katalansk',
'ru_RU': 'Russisk',
'is_IS': 'Islandsk',
'da_DK': 'Dansk',
'ar_AE': 'Arabisk',
'sk_SK': 'Slovakisk',
'de_DE': 'Tysk',
'sr_YU': 'Serbisk',
'cs_CZ': 'Tjekkisk',
'pl_PL': 'Polsk',
'uk_UA': 'Ukrainsk',
'fa_IR': 'Persisk',
'sv_SE': 'Svensk',
'he_IL': 'Hebraisk',
'T1': 'Vennligst bruk OPP og NED taster for \xc3\xa5 velge spr\xc3\xa5k. Etterp\xc3\xa5 trykker du OK tast for \xc3\xa5 fortsette.',
'T2': 'Spr\xc3\xa5kvalg'},
'fa_IR': {'tr_TR': '\xd8\xaa\xd8\xb1\xda\xa9\xdb\x8c',
'fr_FR': '\xd9\x81\xd8\xb1\xd8\xa7\xd9\x86\xd8\xb3\xd9\x88\xdb\x8c',
'fi_FI': '\xd9\xbe\xd8\xa7\xdb\x8c\xd8\xa7\xd9\x86',
'pt_PT': '\xd9\xbe\xd8\xb1\xd8\xaa\xd8\xba\xd8\xa7\xd9\x84\xdb\x8c',
'fy_x-FY': '\xd9\x81\xd8\xb1\xdb\x8c\xd8\xb2\xdb\x8c',
'it_IT': '\xd8\xa7\xdb\x8c\xd8\xaa\xd8\xa7\xd9\x84\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'et_EE': '\xd8\xa7\xd8\xb3\xd8\xaa\xd9\x88\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'no_NO': '\xd9\x86\xd8\xb1\xd9\x88\xda\x98\xdb\x8c',
'nl_NL': '\xd9\x81\xd9\x84\xd9\x85\xd9\x86\xda\xa9\xdb\x8c',
'lv_LV': '\xd9\x84\xd8\xaa\xd9\x88\xd9\x86\xdb\x8c',
'el_GR': '\xdb\x8c\xd9\x88\xd9\x86\xd8\xa7\xd9\x86\xdb\x8c',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd9\x85\xd8\xac\xd8\xa7\xd8\xb1\xd8\xb3\xd8\xaa\xd8\xa7\xd9\x86\xdb\x8c',
'lt_LT': '\xd9\x84\xdb\x8c\xd8\xaa\xd9\x88\xd8\xa7\xd9\x86\xdb\x8c',
'sl_SI': '\xd8\xa7\xd8\xb3\xd9\x84\xd9\x88\xd9\x88\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'hr_HR': '\xda\xa9\xd8\xb1\xd9\x88\xd8\xa7\xd8\xb3\xdb\x8c',
'en_EN': '\xd8\xa7\xd9\x86\xda\xaf\xd9\x84\xdb\x8c\xd8\xb3\xdb\x8c',
'es_ES': '\xd8\xa7\xd8\xb3\xd9\xbe\xd8\xa7\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'ca_AD': '\xda\xa9\xd8\xa7\xd8\xaa\xd8\xa7\xd9\x84\xd8\xa7\xd9\x86',
'ru_RU': '\xd8\xb1\xd9\x88\xd8\xb3\xdb\x8c',
'is_IS': '\xd8\xa7\xdb\x8c\xd8\xb3\xd9\x84\xd9\x86\xd8\xaf\xdb\x8c',
'da_DK': '\xd8\xaf\xd8\xa7\xd9\x86\xd9\x85\xd8\xa7\xd8\xb1\xda\xa9\xdb\x8c',
'ar_AE': '\xd8\xb9\xd8\xb1\xd8\xa8\xdb\x8c',
'sk_SK': '\xd8\xa7\xd8\xb3\xd9\x84\xd9\x88\xd8\xa7\xda\xa9\xdb\x8c',
'de_DE': '\xd8\xa2\xd9\x84\xd9\x85\xd8\xa7\xd9\x86\xdb\x8c',
'sr_YU': '\xd8\xb5\xd8\xb1\xd8\xa8\xd8\xb3\xd8\xaa\xd8\xa7\xd9\x86\xdb\x8c',
'cs_CZ': '\xda\x86\xda\xa9\xd9\x88\xd8\xb3\xd9\x84\xd9\x88\xd8\xa7\xda\xa9\xdb\x8c',
'pl_PL': '\xd9\x84\xd9\x87\xd8\xb3\xd8\xaa\xd8\xa7\xd9\x86\xdb\x8c',
'uk_UA': '\xd8\xa7\xd9\x88\xda\xa9\xd8\xb1\xd8\xa7\xdb\x8c\xd9\x86\xdb\x8c',
'fa_IR': '\xd9\x81\xd8\xa7\xd8\xb1\xd8\xb3\xdb\x8c',
'sv_SE': '\xd8\xb3\xd9\x88\xd8\xa6\xd8\xaf\xdb\x8c',
'he_IL': '\xd8\xb9\xd8\xa8\xd8\xb1\xdb\x8c',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': '\xd8\xa7\xd9\x86\xd8\xaa\xd8\xae\xd8\xa7\xd8\xa8 \xd8\xb2\xd8\xa8\xd8\xa7\xd9\x86'},
'pl_PL': {'tr_TR': 'Turecki',
'fr_FR': 'Francuski',
'fi_FI': 'Fi\xc5\x84ski',
'pt_PT': 'Portugalski',
'fy_x-FY': 'Fryzyjski',
'it_IT': 'W\xc5\x82oski',
'et_EE': 'Esto\xc5\x84ski',
'no_NO': 'Norweski',
'nl_NL': 'Holenderski',
'lv_LV': '\xc5\x81otewski',
'el_GR': 'Grecki',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'W\xc4\x99gierski',
'lt_LT': 'Litewski',
'sl_SI': 'S\xc5\x82owe\xc5\x84ski',
'hr_HR': 'Chorwacki',
'en_EN': 'Angielski',
'es_ES': 'Hiszpa\xc5\x84ski',
'ca_AD': 'Hiszpa\xc5\x84ski',
'ru_RU': 'Rosyjski',
'is_IS': 'Islandzki',
'da_DK': 'Du\xc5\x84ski',
'ar_AE': 'Arabski',
'sk_SK': 'S\xc5\x82owacki',
'de_DE': 'Niemiecki',
'sr_YU': 'Serbski',
'cs_CZ': 'Czeski',
'pl_PL': 'Polski',
'uk_UA': 'Ukrai\xc5\x84ski',
'fa_IR': 'Perski',
'sv_SE': 'Szwedzki',
'he_IL': 'Hebrajski',
'T1': 'W celu wyboru j\xc4\x99zyka u\xc5\xbcyj klawiszy G\xc3\x93RA i D\xc3\x93\xc5\x81. Nastepnie nacisnij przycisk OK.',
'T2': 'Wyb\xc3\xb3r j\xc4\x99zyka'},
'pt_PT': {'tr_TR': 'Turco',
'fr_FR': 'Franc\xc3\xaas',
'fi_FI': 'Finland\xc3\xaas',
'pt_PT': 'Portugu\xc3\xaas',
'fy_x-FY': 'Frisian',
'it_IT': 'Italiano',
'et_EE': 'Estonian',
'no_NO': 'Noruegu\xc3\xaas',
'nl_NL': 'Holand\xc3\xaas',
'lv_LV': 'Latvian',
'el_GR': 'Grego',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hungaro',
'lt_LT': 'Lituano',
'sl_SI': 'Slovenian',
'hr_HR': 'Croata',
'en_EN': 'Ingl\xc3\xaas',
'es_ES': 'Catal\xc3\xa3o',
'ca_AD': 'Catal\xc3\xa3o',
'ru_RU': 'Russo',
'is_IS': 'Island\xc3\xaas',
'da_DK': 'Dinamarqu\xc3\xaas',
'ar_AE': 'Arabe',
'sk_SK': 'Slovakian',
'de_DE': 'Alem\xc3\xa3o',
'sr_YU': 'Serbian',
'cs_CZ': 'Checo',
'pl_PL': 'Polaco',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': 'Sueco',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': 'Selec\xc3\xa7\xc3\xa3o do Idioma'},
'ru_RU': {'tr_TR': '\xd0\xa2\xd1\x83\xd1\x80\xd0\xb5\xd1\x86\xd0\xba\xd0\xb8\xd0\xb9',
'fr_FR': '\xd0\xa4\xd1\x80\xd0\xb0\xd0\xbd\xd1\x86\xd1\x83\xd0\xb7\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'fi_FI': '\xd0\xa4\xd0\xb8\xd0\xbd\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'pt_PT': '\xd0\x9f\xd0\xbe\xd1\x80\xd1\x82\xd1\x83\xd0\xb3\xd0\xb0\xd0\xbb\xd1\x8c\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'fy_x-FY': '\xd0\xa4\xd1\x80\xd0\xb8\xd0\xb7\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'it_IT': '\xd0\x98\xd1\x82\xd0\xb0\xd0\xbb\xd1\x8c\xd1\x8f\xd0\xbd\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'et_EE': 'Estonian',
'no_NO': '\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb6\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'nl_NL': '\xd0\x9d\xd0\xb8\xd0\xb4\xd0\xb5\xd1\x80\xd0\xbb\xd0\xb0\xd0\xbd\xd0\xb4\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'lv_LV': '\xd0\x9b\xd0\xb0\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb9\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'el_GR': '\xd0\x93\xd1\x80\xd0\xb5\xd1\x87\xd0\xb5\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd0\x92\xd0\xb5\xd0\xbd\xd0\xb3\xd0\xb5\xd1\x80\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'lt_LT': '\xd0\x9b\xd0\xb8\xd1\x82\xd0\xbe\xd0\xb2\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'sl_SI': 'Slovenian',
'hr_HR': '\xd0\xa5\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb0\xd1\x82\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'en_EN': '\xd0\x90\xd0\xbd\xd0\xb3\xd0\xbb\xd0\xb8\xd0\xb9\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'es_ES': '\xd0\x98\xd1\x81\xd0\xbf\xd0\xb0\xd0\xbd\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'ca_AD': '\xd0\x9a\xd0\xb0\xd1\x82\xd0\xb0\xd0\xbb\xd0\xbe\xd0\xbd\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'ru_RU': '\xd0\xa0\xd1\x83\xd1\x81\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'is_IS': '\xd0\x98\xd1\x81\xd0\xbb\xd0\xb0\xd0\xbd\xd0\xb4\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'da_DK': '\xd0\x94\xd0\xb0\xd1\x82\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'ar_AE': '\xd0\x90\xd1\x80\xd0\xb0\xd0\xb1\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'sk_SK': 'Slovakian',
'de_DE': '\xd0\x9d\xd0\xb5\xd0\xbc\xd0\xb5\xd1\x86\xd0\xba\xd0\xb8\xd0\xb9',
'sr_YU': 'Serbian',
'cs_CZ': '\xd0\xa7\xd0\xb5\xd1\x88\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'pl_PL': '\xd0\x9f\xd0\xbe\xd0\xbb\xd1\x8c\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'uk_UA': '\xd0\xa3\xd0\xba\xd1\x80\xd0\xb0\xd0\xb8\xd0\xbd\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'fa_IR': 'Persian',
'sv_SE': '\xd0\xa8\xd0\xb2\xd0\xb5\xd0\xb4\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',
'he_IL': 'Hebrew',
'T1': '\xd0\x9f\xd0\xbe\xd0\xb6\xd0\xb0\xd0\xbb\xd1\x83\xd0\xb9\xd1\x81\xd1\x82\xd0\xb0, \xd0\xb8\xd1\x81\xd0\xbf\xd0\xbe\xd0\xbb\xd1\x8c\xd0\xb7\xd1\x83\xd0\xb9\xd1\x82\xd0\xb5 \xd0\xba\xd0\xbd\xd0\xbe\xd0\xbf\xd0\xba\xd0\xb8 \xd0\x92\xd0\x92\xd0\x95\xd0\xa0\xd0\xa5 \xd0\xb8 \xd0\x92\xd0\x9d\xd0\x98\xd0\x97 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xb1\xd0\xbe\xd1\x80\xd0\xb0 \xd1\x8f\xd0\xb7\xd1\x8b\xd0\xba\xd0\xb0. \xd0\x94\xd0\xb0\xd0\xbb\xd0\xb5\xd0\xb5 \xd0\xbd\xd0\xb0\xd0\xb6\xd0\xbc\xd0\xb8\xd1\x82\xd0\xb5 \xd0\x9e\xd0\x9a.',
'T2': '\xd0\x92\xd1\x8b\xd0\xb1\xd0\xbe\xd1\x80 \xd1\x8f\xd0\xb7\xd1\x8b\xd0\xba\xd0\xb0'},
'sr_YU': {'tr_TR': 'Turski',
'fr_FR': 'Francuski',
'fi_FI': 'Finski',
'pt_PT': 'Portugalski',
'fy_x-FY': 'Frizijski',
'it_IT': 'Italijanski',
'et_EE': 'Estonski',
'no_NO': 'Norve\xc5\xa1ki',
'nl_NL': 'Holandski',
'lv_LV': 'Letonski',
'el_GR': 'Gr\xc4\x8dki',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ma\xc4\x91arski',
'lt_LT': 'Litvanski',
'sl_SI': 'Slovena\xc4\x8dki',
'hr_HR': 'Hrvatski',
'en_EN': 'Engleski',
'es_ES': '\xc5\xa0panski',
'ca_AD': 'Katalonski',
'ru_RU': 'Ruski',
'is_IS': 'Islandski',
'da_DK': 'Danski',
'ar_AE': 'Arapski',
'sk_SK': 'Slova\xc4\x8dki',
'de_DE': 'Nema\xc4\x8dki',
'sr_YU': 'Srpski',
'cs_CZ': '\xc4\x8ce\xc5\xa1ki',
'pl_PL': 'Poljski',
'uk_UA': 'Ukrajinski',
'fa_IR': 'Persijski',
'sv_SE': '\xc5\xa0vedski',
'he_IL': 'Hebrew',
'T1': 'Molim koristite UP i DOWN tipke da izaberete jezik.Posle toga pritisnite OK dugme.',
'T2': 'Izbor jezika'},
'sk_SK': {'tr_TR': 'Ture\xc4\x8dtina',
'fr_FR': 'Franc\xc3\xbaz\xc5\xa1tina',
'fi_FI': 'F\xc3\xadn\xc4\x8dina',
'pt_PT': 'Portugal\xc4\x8dina',
'fy_x-FY': 'Fr\xc3\xadz\xc5\xa1tina',
'it_IT': 'Talian\xc4\x8dina',
'et_EE': 'Est\xc3\xb3n\xc4\x8dina',
'no_NO': 'N\xc3\xb3r\xc4\x8dina',
'nl_NL': 'Holand\xc4\x8dina',
'lv_LV': 'Loty\xc5\xa1\xc4\x8dina',
'el_GR': 'Gr\xc3\xa9\xc4\x8dtina',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ma\xc4\x8far\xc4\x8dina',
'lt_LT': 'Litov\xc4\x8dina',
'sl_SI': 'Slovin\xc4\x8dina',
'hr_HR': 'Chorv\xc3\xa1t\xc4\x8dina',
'en_EN': 'Angli\xc4\x8dtina',
'es_ES': '\xc5\xa0paniel\xc4\x8dina',
'ca_AD': 'Katal\xc3\xa1n\xc4\x8dina',
'ru_RU': 'Ru\xc5\xa1tina',
'is_IS': 'Island\xc4\x8dina',
'da_DK': 'D\xc3\xa1n\xc4\x8dina',
'ar_AE': 'Arab\xc4\x8dina',
'sk_SK': 'Sloven\xc4\x8dina',
'de_DE': 'Nem\xc4\x8dina',
'sr_YU': 'Srb\xc4\x8dina',
'cs_CZ': '\xc4\x8ce\xc5\xa1tina',
'pl_PL': 'Po\xc4\xbe\xc5\xa1tina',
'uk_UA': 'Ukrajin\xc4\x8dina',
'fa_IR': 'Perz\xc5\xa1tina',
'sv_SE': '\xc5\xa0v\xc3\xa9d\xc4\x8dina',
'he_IL': 'Hebrej\xc4\x8dina',
'T1': 'Tla\xc4\x8didlami NAHOR alebo NADOL zvo\xc4\xbete jazyk. Potom stla\xc4\x8dte tla\xc4\x8didlo OK.',
'T2': 'Vo\xc4\xbeba jazyka'},
'sl_SI': {'tr_TR': 'Tur\xc5\xa1ko',
'fr_FR': 'Francosko',
'fi_FI': 'Finsko',
'pt_PT': 'Portugalsko',
'fy_x-FY': 'Frisian',
'it_IT': 'Italjansko',
'et_EE': 'Estonian',
'no_NO': 'Norve\xc5\xa1ki',
'nl_NL': 'Nizozemsko',
'lv_LV': 'Latvian',
'el_GR': 'Gr\xc5\xa1ko',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Mad\xc5\xbearsko',
'lt_LT': 'Litvansko',
'sl_SI': 'Slovensko',
'hr_HR': 'Hrva\xc5\xa1ko',
'en_EN': 'Angle\xc5\xa1ko',
'es_ES': '\xc5\xa0pansko',
'ca_AD': 'Katalonski',
'ru_RU': 'Rusko',
'is_IS': 'Islandsko',
'da_DK': 'Dansko',
'ar_AE': 'Arabsko',
'sk_SK': 'Slovakian',
'de_DE': 'Nem\xc5\xa1ko',
'sr_YU': 'Serbian',
'cs_CZ': '\xc4\x8ce\xc5\xa1ko',
'pl_PL': 'Poljsko',
'uk_UA': 'Ukrajinsko',
'fa_IR': 'Persian',
'sv_SE': '\xc5\xa0vedsko',
'he_IL': 'Hebrew',
'T1': 'Prosim uporabite tipke GOR in DOL za izbiro jezika in pritisnite OK tipko za potrditev.',
'T2': 'Izberite jezik'},
'es_ES': {'tr_TR': 'Turco',
'fr_FR': 'Franc\xc3\xa9s',
'fi_FI': 'Finland\xc3\xa9s',
'pt_PT': 'Portugu\xc3\xa9s',
'fy_x-FY': 'Fris\xc3\xb3n',
'it_IT': 'Italiano',
'et_EE': 'Estonio',
'no_NO': 'Noruego',
'nl_NL': 'Alem\xc3\xa1n',
'lv_LV': 'Let\xc3\xb3n',
'el_GR': 'Griego',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'H\xc3\xbangaro',
'lt_LT': 'Lituano',
'sl_SI': 'Esloveno',
'hr_HR': 'Croata',
'en_EN': 'Ingl\xc3\xa9s',
'es_ES': 'Espa\xc3\xb1ol',
'ca_AD': 'Catal\xc3\xa1n',
'ru_RU': 'Ruso',
'is_IS': 'Island\xc3\xa9s',
'da_DK': 'Dan\xc3\xa9s',
'ar_AE': 'Ar\xc3\xa1bigo',
'sk_SK': 'Eslovaco',
'de_DE': 'Alem\xc3\xa1n',
'sr_YU': 'Servio',
'cs_CZ': 'Checo',
'pl_PL': 'Polaco',
'uk_UA': 'Ucraniano',
'fa_IR': 'Persa',
'sv_SE': 'Sueco',
'he_IL': 'Hebrew',
'T1': 'Use las teclas ARRIBA y ABAJO para seleccionar su idioma. Despu\xc3\xa9s, pulse el bot\xc3\xb3n OK.',
'T2': 'Selecci\xc3\xb3n de idioma'},
'sv_SE': {'tr_TR': 'Turkiska',
'fr_FR': 'Franska',
'fi_FI': 'Finska',
'pt_PT': 'Portugisiska',
'fy_x-FY': 'Frisiska',
'it_IT': 'Italienska',
'et_EE': 'Estniska',
'no_NO': 'Norska',
'nl_NL': 'Holl\xc3\xa4ndska',
'lv_LV': 'Lettiska',
'el_GR': 'Grekiska',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungerska',
'lt_LT': 'Litauiska',
'sl_SI': 'Slovenska',
'hr_HR': 'Kroatiska',
'en_EN': 'Engelska',
'es_ES': 'Spanska',
'ca_AD': 'Katalanska',
'ru_RU': 'Ryska',
'is_IS': 'Isl\xc3\xa4ndska',
'da_DK': 'Danska',
'ar_AE': 'Arabiska',
'sk_SK': 'Slovakisk',
'de_DE': 'Tyska',
'sr_YU': 'Serbiska',
'cs_CZ': 'Tjeckiska',
'pl_PL': 'Polska',
'uk_UA': 'Ukrainska',
'fa_IR': 'Persiska',
'sv_SE': 'Svenska',
'he_IL': 'Hebreiska',
'T1': 'V\xc3\xa4nligen anv\xc3\xa4nd UPP och NER pil f\xc3\xb6r att v\xc3\xa4lja spr\xc3\xa5k. Efter val tryck p\xc3\xa5 OK knappen.',
'T2': 'V\xc3\xa4lj spr\xc3\xa5k'},
'tr_TR': {'tr_TR': 'T\xc3\xbcrk\xc3\xa7e',
'fr_FR': 'Frans\xc4\xb1zca',
'fi_FI': 'Fince',
'pt_PT': 'Portekizce',
'fy_x-FY': 'Frizyece',
'it_IT': '\xc4\xb0talyanca',
'et_EE': 'Est\xc3\xa7e',
'no_NO': 'Norve\xc3\xa7\xc3\xa7e',
'nl_NL': 'Flemenk\xc3\xa7e',
'lv_LV': 'Letonca',
'el_GR': 'Yunanca',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Macarca',
'lt_LT': 'Litvanyaca',
'sl_SI': 'Slovence',
'hr_HR': 'H\xc4\xb1rvat\xc3\xa7a',
'en_EN': '\xc4\xb0ngilizce',
'es_ES': '\xc4\xb0spanyolca',
'ca_AD': 'Katalanca',
'ru_RU': 'Rus\xc3\xa7a',
'is_IS': '\xc4\xb0zlandaca',
'da_DK': 'Danca',
'ar_AE': 'Arap\xc3\xa7a',
'sk_SK': 'Slovak\xc3\xa7a',
'de_DE': 'Almanca',
'sr_YU': 'S\xc4\xb1rp\xc3\xa7a',
'cs_CZ': '\xc3\x87ek\xc3\xa7e',
'pl_PL': 'Leh\xc3\xa7e',
'uk_UA': 'Ukraynaca',
'fa_IR': 'Fars\xc3\xa7a',
'sv_SE': '\xc4\xb0sve\xc3\xa7\xc3\xa7e',
'he_IL': 'Hebrew',
'T1': 'Dil se\xc3\xa7iminizi, yapmak i\xc3\xa7in YUKARI ve A\xc5\x9eA\xc4\x9eI tu\xc5\x9flar\xc4\xb1n\xc4\xb1, onaylamak i\xc3\xa7in OK tu\xc5\x9funu kullan\xc4\xb1n.',
'T2': 'Dil se\xc3\xa7imi'},
'uk_UA': {'tr_TR': '\xd0\xa2\xd1\x83\xd1\x80\xd0\xb5\xd1\x86\xd1\x8c\xd0\xba\xd0\xb0',
'fr_FR': '\xd0\xa4\xd1\x80\xd0\xb0\xd0\xbd\xd1\x86\xd1\x83\xd0\xb7\xd1\x8c\xd0\xba\xd0\xb0',
'fi_FI': '\xd0\xa4\xd1\x96\xd0\xbd\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'pt_PT': '\xd0\x9f\xd0\xbe\xd1\x80\xd1\x82\xd1\x83\xd0\xb3\xd0\xb0\xd0\xbb\xd1\x8c\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'fy_x-FY': '\xd0\xa4\xd1\x80\xd1\x96\xd0\xb7\xd1\x96\xd0\xb9\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'it_IT': '\xd0\x86\xd1\x82\xd0\xb0\xd0\xbb\xd1\x96\xd0\xb9\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'et_EE': 'Estonian',
'no_NO': '\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb6\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'nl_NL': '\xd0\x93\xd0\xbe\xd0\xbb\xd0\xbb\xd0\xb0\xd0\xbd\xd0\xb4\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'lv_LV': '\xd0\x9b\xd0\xb0\xd1\x82\xd0\xb2\xd1\x96\xd0\xb9\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'el_GR': '\xd0\x93\xd1\x80\xd0\xb5\xd1\x86\xd1\x8c\xd0\xba\xd0\xb0',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd0\xa3\xd0\xb3\xd0\xbe\xd1\x80\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'lt_LT': '\xd0\x9b\xd0\xb8\xd1\x82\xd0\xbe\xd0\xb2\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'sl_SI': 'Slovenian',
'hr_HR': '\xd0\xa5\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd0\xba\xd0\xb0',
'en_EN': '\xd0\x90\xd0\xbd\xd0\xb3\xd0\xbb\xd1\x96\xd0\xb9\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'es_ES': '\xd0\x86\xd1\x81\xd0\xbf\xd0\xb0\xd0\xbd\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'ca_AD': '\xd0\x9a\xd0\xb0\xd1\x82\xd0\xb0\xd0\xbb\xd0\xbe\xd0\xbd\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'ru_RU': '\xd0\xa0\xd0\xbe\xd1\x81\xd1\x96\xd0\xb9\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'is_IS': '\xd0\x86\xd1\x81\xd0\xbb\xd0\xb0\xd0\xbd\xd0\xb4\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'da_DK': '\xd0\x94\xd0\xb0\xd0\xbd\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'ar_AE': '\xd0\x90\xd1\x80\xd0\xb0\xd0\xb1\xd1\x81\xd1\x8c\xd0\xba\xd0\xb8\xd0\xb9',
'sk_SK': 'Slovakian',
'de_DE': '\xd0\x9d\xd1\x96\xd0\xbc\xd0\xb5\xd1\x86\xd1\x8c\xd0\xba\xd0\xb0',
'sr_YU': '\xd0\xa1\xd0\xb5\xd1\x80\xd0\xb1\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'cs_CZ': '\xd0\xa7\xd0\xb5\xd1\x88\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'pl_PL': '\xd0\x9f\xd0\xbe\xd0\xbb\xd1\x8c\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'uk_UA': '\xd0\xa3\xd0\xba\xd1\x80\xd0\xb0\xd1\x97\xd0\xbd\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'fa_IR': 'Persian',
'sv_SE': '\xd0\xa8\xd0\xb2\xd0\xb5\xd0\xb4\xd1\x81\xd1\x8c\xd0\xba\xd0\xb0',
'he_IL': 'Hebrew',
'T1': '\xd0\x92\xd0\xb8\xd0\xba\xd0\xbe\xd1\x80\xd0\xb8\xd1\x81\xd1\x82\xd0\xbe\xd0\xb2\xd1\x83\xd0\xb9\xd1\x82\xd0\xb5 \xd0\xba\xd0\xbd\xd0\xbe\xd0\xbf\xd0\xba\xd0\xb8 \xd0\x92\xd0\x92\xd0\x95\xd0\xa0\xd0\xa5 \xd1\x96 \xd0\x92\xd0\x9d\xd0\x98\xd0\x97, \xd1\x89\xd0\xbe\xd0\xb1 \xd0\xb2\xd0\xb8\xd0\xb1\xd1\x80\xd0\xb0\xd1\x82\xd0\xb8 \xd0\x92\xd0\xb0\xd1\x88\xd1\x83 \xd0\xbc\xd0\xbe\xd0\xb2\xd1\x83. \xd0\x9f\xd1\x96\xd1\x81\xd0\xbb\xd1\x8f \xd0\xb2\xd0\xb8\xd0\xb1\xd0\xbe\xd1\x80\xd1\x83 \xd0\xbd\xd0\xb0\xd1\x82\xd0\xb8\xd1\x81\xd0\xbd\xd1\x96\xd1\x82\xd1\x8c OK.',
'T2': '\xd0\x92\xd0\xb8\xd0\xb1\xd1\x96\xd1\x80 \xd0\xbc\xd0\xbe\xd0\xb2\xd0\xb8'},
'fy_x-FY': {'tr_TR': 'Turks',
'fr_FR': 'Frans',
'fi_FI': 'Finsk',
'pt_PT': 'Portugeesk',
'fy_x-FY': 'Frysk ',
'it_IT': 'Italiaansk',
'et_EE': 'Estonian',
'no_NO': 'Noarsk',
'nl_NL': 'Nederlansk',
'lv_LV': 'Latvian',
'el_GR': 'Gryks',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hongaarsk',
'lt_LT': 'Lithuaniansk',
'sl_SI': 'Slovenian',
'hr_HR': 'Kroatysk',
'en_EN': 'Engelsk',
'es_ES': 'Spaans',
'ca_AD': 'Catal\xc3\xa2nsk',
'ru_RU': 'Russysk',
'is_IS': 'Iislansk',
'da_DK': 'Deensk',
'ar_AE': 'Arabysk',
'sk_SK': 'Slovakian',
'de_DE': 'D\xc3\xbats',
'sr_YU': 'Serbian',
'cs_CZ': 'Tsjechysk',
'pl_PL': 'Poolsk',
'uk_UA': 'Oekra\xc3\xafne',
'fa_IR': 'Persian',
'sv_SE': 'Zweeds',
'he_IL': 'Hebrew',
'T1': 'Br\xc3\xbak de op en del toets om jo taal te kiezen. Dernei druk op OK',
'T2': 'Taal Kieze'}}
|
kingvuplus/ts-gui-3
|
lib/python/Components/language_cache.py
|
Python
|
gpl-2.0
| 50,254
| 0.002846
|
# -*- coding: utf-8 -*-
"""WebUI."""
from .websocket import WebsocketProxyHandler
def create_webapp(naumanni, **kwargs):
"""App factory.
:param CircleCore core: CircleCore Core
:param str base_url: ベースURL
:param int ws_port: Websocket Port Number
:return: WebUI App
:rtype: CCWebApp
"""
from .app import NaumanniWebApp
app = NaumanniWebApp(naumanni, **kwargs)
return app
|
glucoseinc/naumanni-server
|
naumanni/web/__init__.py
|
Python
|
agpl-3.0
| 422
| 0
|
#
# Copyright (c) 2014, Jim Bosch
# All rights reserved.
#
# mcpib is distributed under a simple BSD-like license;
# see the LICENSE file that should be present in the root
# of the source distribution.
#
import unittest
import os
import sys
buildPythonPath = os.path.join(os.path.split(__file__)[0], "..", "python")
if os.path.exists(buildPythonPath): sys.path.insert(0, buildPythonPath)
import mcpib
import builtin_strings_mod as mod
class BuiltinStringsTestCase(unittest.TestCase):
def testString(self):
"""Test that to-Python and from-Python converters for std::string work as expected."""
self.assertEqual(mod.passthru_string("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, ["bar"])
def testCString(self):
"""Test that to-Python and from-Python converters for char const * work as expected."""
self.assertEqual(mod.passthru_cstring("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, ["bar"])
def testCharArgs(self):
"""Test that c-string converters are not used for char values, references, or non-const pointers."""
self.assertRaises(mcpib.FromPythonError, mod.accept_char, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ptr, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ref, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const_ref, "foo")
self.assertRaises(mcpib.ToPythonError, mod.return_char)
self.assertRaises(mcpib.ToPythonError, mod.return_char_const)
self.assertRaises(mcpib.ToPythonError, mod.return_char_ptr)
self.assertRaises(mcpib.ToPythonError, mod.return_char_ref)
self.assertRaises(mcpib.ToPythonError, mod.return_char_const_ref)
if __name__ == "__main__":
unittest.main()
|
TallJimbo/mcpib
|
tests/builtin_strings_test.py
|
Python
|
bsd-2-clause
| 2,081
| 0.004325
|
#!/usr/bin/env python
"""FEM library
Demonstrates some simple finite element definitions, and computes a mass
matrix
$ python fem.py
[ 1/60, 0, -1/360, 0, -1/90, -1/360]
[ 0, 4/45, 0, 2/45, 2/45, -1/90]
[-1/360, 0, 1/60, -1/90, 0, -1/360]
[ 0, 2/45, -1/90, 4/45, 2/45, 0]
[ -1/90, 2/45, 0, 2/45, 4/45, 0]
[-1/360, -1/90, -1/360, 0, 0, 1/60]
"""
from sympy import symbols, Symbol, factorial, Rational, zeros, div, eye, \
integrate, diff, pprint, reduced
x, y, z = symbols('x,y,z')
class ReferenceSimplex:
def __init__(self, nsd):
self.nsd = nsd
coords = []
if nsd <= 3:
coords = symbols('x,y,z')[:nsd]
else:
coords = []
for d in range(0,nsd):
coords.append(Symbol("x_%d" % d))
self.coords = coords
def integrate(self,f):
coords = self.coords
nsd = self.nsd
limit = 1
for p in coords:
limit -= p
intf = f
for d in range(0,nsd):
p = coords[d]
limit += p
intf = integrate(intf, (p, 0, limit))
return intf
def bernstein_space(order, nsd):
if nsd > 3:
raise RuntimeError("Bernstein only implemented in 1D, 2D, and 3D")
sum = 0
basis = []
coeff = []
if nsd == 1:
b1, b2 = x, 1-x
for o1 in range(0,order+1):
for o2 in range(0,order+1):
if o1 + o2 == order:
aij = Symbol("a_%d_%d" % (o1,o2))
sum += aij*binomial(order,o1)*pow(b1, o1)*pow(b2, o2)
basis.append(binomial(order,o1)*pow(b1, o1)*pow(b2, o2))
coeff.append(aij)
if nsd == 2:
b1, b2, b3 = x, y, 1-x-y
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
if o1 + o2 + o3 == order:
aij = Symbol("a_%d_%d_%d" % (o1,o2,o3))
fac = factorial(order) / (factorial(o1)*factorial(o2)*factorial(o3))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3))
coeff.append(aij)
if nsd == 3:
b1, b2, b3, b4 = x, y, z, 1-x-y-z
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
for o4 in range(0,order+1):
if o1 + o2 + o3 + o4 == order:
aij = Symbol("a_%d_%d_%d_%d" % (o1,o2,o3,o4))
fac = factorial(order)/ (factorial(o1)*factorial(o2)*factorial(o3)*factorial(o4))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4))
coeff.append(aij)
return sum, coeff, basis
def create_point_set(order, nsd):
h = Rational(1,order)
set = []
if nsd == 1:
for i in range(0, order+1):
x = i*h
if x <= 1:
set.append((x,y))
if nsd == 2:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
if x + y <= 1:
set.append((x,y))
if nsd == 3:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
for k in range(0, order+1):
z = j*h
if x + y + z <= 1:
set.append((x,y,z))
return set
def create_matrix(equations, coeffs):
A = zeros(len(equations))
i = 0; j = 0
for j in range(0, len(coeffs)):
c = coeffs[j]
for i in range(0, len(equations)):
e = equations[i]
d, _ = reduced(e, [c])
A[i,j] = d[0]
return A
class Lagrange:
def __init__(self,nsd, order):
self.nsd = nsd
self.order = order
self.compute_basis()
def nbf(self):
return len(self.N)
def compute_basis(self):
order = self.order
nsd = self.nsd
N = []
pol, coeffs, basis = bernstein_space(order, nsd)
points = create_point_set(order, nsd)
equations = []
for p in points:
ex = pol.subs(x, p[0])
if nsd > 1:
ex = ex.subs(y, p[1])
if nsd > 2:
ex = ex.subs(z, p[2])
equations.append(ex )
A = create_matrix(equations, coeffs)
Ainv = A.inv()
b = eye(len(equations))
xx = Ainv*b
for i in range(0,len(equations)):
Ni = pol
for j in range(0,len(coeffs)):
Ni = Ni.subs(coeffs[j], xx[j,i])
N.append(Ni)
self.N = N
def main():
t = ReferenceSimplex(2)
fe = Lagrange(2,2)
u = 0
#compute u = sum_i u_i N_i
us = []
for i in range(0, fe.nbf()):
ui = Symbol("u_%d" % i)
us.append(ui)
u += ui*fe.N[i]
J = zeros(fe.nbf())
for i in range(0, fe.nbf()):
Fi = u*fe.N[i]
print Fi
for j in range(0, fe.nbf()):
uj = us[j]
integrands = diff(Fi, uj)
print integrands
J[j,i] = t.integrate(integrands)
pprint(J)
if __name__ == "__main__":
main()
|
flacjacket/sympy
|
examples/advanced/fem.py
|
Python
|
bsd-3-clause
| 5,414
| 0.035648
|
# fMBT, free Model Based Testing tool
# Copyright (c) 2013-2016, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
"""
This library implements fmbt GUITestInterface for X.
Using Screen.refreshView() requires the pyatspi library and enabling
accessilibity. For example:
gsettings set org.gnome.desktop.interface toolkit-accessibility true
"""
import fmbt_config
import fmbtgti
fmbtgti._OCRPREPROCESS = [
"",
"-sharpen 5 -level 90%%,100%%,3.0 -sharpen 5"
]
import ctypes
import os
import subprocess
import zlib
import fmbtx11_conn
def _run(command):
exit_status = subprocess.call(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
close_fds=(os.name != "nt"))
return exit_status
sortItems = fmbtgti.sortItems
class ViewItem(fmbtgti.GUIItem):
def __init__(self, view, itemId, parentId, className, text, bbox,
dumpFilename, rawProperties=None):
self._view = view
self._itemId = itemId
self._parentId = parentId
self._className = className
self._text = text
if rawProperties:
self._properties = rawProperties
else:
self._properties = {}
fmbtgti.GUIItem.__init__(self, self._className, bbox, dumpFilename)
def branch(self):
"""Returns list of view items from the root down to this item"""
rv = []
itemId = self._itemId
while itemId:
rv.append(self._view._viewItems[itemId])
if itemId in self._view._viewItems:
itemId = self._view._viewItems[itemId]._parentId
else:
itemId = None
rv.reverse()
return rv
def children(self):
items = self._view._viewItems
return [items[itemId]
for itemId in items
if items[itemId]._parentId == self._itemId]
def parent(self):
return self._parentId
def parentItem(self):
try:
return self._view._viewItems[self._parentId]
except KeyError:
return None
def id(self):
return self._itemId
def properties(self):
return self._properties
def text(self):
return self._text
def dumpProperties(self):
rv = []
if self._properties:
for key in sorted(self._properties.keys()):
rv.append("%s=%s" % (key, self._properties[key]))
return "\n".join(rv)
def __str__(self):
return "ViewItem(%s)" % (self._view._dumpItem(self),)
class View(object):
def __init__(self, dumpFilename, itemTree, itemOnScreen=None):
self._dumpFilename = dumpFilename
self._itemTree = itemTree
self._rootItem = None
self._viewItems = {}
if itemOnScreen == None:
self._itemOnScreen = lambda item: True
else:
self._itemOnScreen = itemOnScreen
self._viewSource = "atspi"
for item in itemTree:
className = item.get("class", "")
text = item.get("text", "")
if text == "" or text == None:
text = item.get("name", "")
if text == "":
text = className
vi = ViewItem(
self, item["id"], item["parent"],
className,
text,
item["bbox"],
dumpFilename,
item)
self._viewItems[item["id"]] = vi
if vi.parent() == None:
self._rootItem = vi
if not self._rootItem:
raise ValueError("no root item in view data")
def _intCoords(self, *args):
# TODO: relative coordinates like (0.5, 0.9)
return [int(c) for c in args[0]]
def filename(self):
return self._dumpFilename
def rootItem(self):
return self._rootItem
def _dumpItem(self, viewItem):
return "id=%s cls=%s text=%s bbox=%s" % (
viewItem._itemId, repr(viewItem._className), repr(viewItem._text),
viewItem._bbox)
def _dumpTree(self, rootItem, depth=0):
l = ["%s%s" % (" " * (depth * 4), self._dumpItem(rootItem))]
for child in rootItem.children():
l.extend(self._dumpTree(child, depth+1))
return l
def dumpTree(self, rootItem=None):
"""
Returns item tree as a string
"""
if rootItem == None:
rootItem = self.rootItem()
return "\n".join(self._dumpTree(rootItem))
def __str__(self):
return "View(%s, %s items)" % (repr(self._dumpFilename), len(self._viewItems))
def findItems(self, comparator, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
foundItems = []
if count == 0: return foundItems
if searchRootItem != None:
if comparator(searchRootItem) and (
not onScreen or (self._itemOnScreen(searchRootItem))):
foundItems.append(searchRootItem)
for c in searchRootItem.children():
foundItems.extend(self.findItems(comparator, count=count-len(foundItems), searchRootItem=c, onScreen=onScreen))
else:
if searchItems:
domain = iter(searchItems)
else:
domain = self._viewItems.itervalues
for i in domain():
if comparator(i) and (not onScreen or (self._itemOnScreen(i))):
foundItems.append(i)
if count > 0 and len(foundItems) >= count:
break
return foundItems
def findItemsByText(self, text, partial=False, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
if partial:
c = lambda item: (text in item._text or text in item.properties()["name"])
else:
c = lambda item: (text == item._text)
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsByClass(self, className, partial=False, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
if partial:
c = lambda item: (className in item._className)
else:
c = lambda item: (className == item._className)
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsById(self, itemId, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
c = lambda item: (itemId == item._itemId or itemId == item.properties().get("AutomationId", None))
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsByProperties(self, properties, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
"""
Returns ViewItems where every property matches given properties
Parameters:
properties (dictionary):
names and required values of properties
Example:
view.findItemsByProperties({"Value": "HELLO", "Name": "File name:"})
See also:
viewitem.dumpProperties()
"""
c = lambda item: 0 == len([key for key in properties
if properties[key] != item.properties().get(key, None)])
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsByPos(self, pos, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
"""
Returns list of ViewItems whose bounding box contains the position.
Parameters:
pos (pair of floats (0.0..0.1) or integers (x, y)):
coordinates that fall in the bounding box of found items.
other parameters: refer to findItems documentation.
Items are listed in ascending order based on area. They may
or may not be from the same branch in the widget hierarchy.
"""
x, y = self._intCoords(pos)
c = lambda item: (item.bbox()[0] <= x <= item.bbox()[2] and item.bbox()[1] <= y <= item.bbox()[3])
items = self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
# sort from smallest to greatest area
area_items = [((i.bbox()[2] - i.bbox()[0]) * (i.bbox()[3] - i.bbox()[1]), i) for i in items]
return [i for _, i in sorted(area_items)]
def items(self):
"""
Returns list of all items in the view
"""
return fmbtgti.sortItems(self._viewItems.values(), "topleft")
def save(self, fileOrDirName):
"""
Save view dump to a file.
"""
shutil.copy(self._dumpFilename, fileOrDirName)
class Screen(fmbtgti.GUITestInterface):
def __init__(self, display="", **kwargs):
"""Parameters:
display (string, optional)
X display to connect to.
Example: display=":0". The default is "", that is,
the default X display in the DISPLAY environment
variable will be used.
rotateScreenshot (integer, optional)
rotate new screenshots by rotateScreenshot degrees.
Example: rotateScreenshot=-90. The default is 0 (no
rotation).
"""
fmbtgti.GUITestInterface.__init__(self, **kwargs)
self._lastView = None
self._refreshViewDefaults = {}
self.setConnection(X11Connection(display))
def atspiApplicationList(self):
"""Returns list of running ATSPI applications.
refreshView with view source "atspi" works for these
applications.
"""
return fmbtx11_conn.atspiApplicationList()
def existingView(self):
if self._lastView:
return self._lastView
else:
raise FMBTWindowsError("view is not available. Missing refreshView()?")
def itemOnScreen(self, guiItem):
maxX, maxY = self.screenSize()
return fmbtgti._boxOnRegion(guiItem.bbox(), (0, 0, maxX, maxY))
def keyNames(self):
"""Returns list of key names understood by pressKey"""
return _keyNames[:]
def refreshView(self, window=None, forcedView=None, viewSource=None):
"""Update toolkit data"""
self._lastView = None
if window == None:
window = self._refreshViewDefaults.get("window", None)
if viewSource == None:
viewSource = self._refreshViewDefaults.get("viewSource", "atspi")
if viewSource == "atspi":
foundItems = self.existingConnection().recvAtspiViewData(window)
if self.screenshotDir() == None:
self.setScreenshotDir(self._screenshotDirDefault)
if self.screenshotSubdir() == None:
self.setScreenshotSubdir(self._screenshotSubdirDefault)
viewFilename = self._newScreenshotFilepath()[:-3] + "view"
file(viewFilename, "w").write(repr(foundItems))
self._lastView = View(
viewFilename, foundItems,
itemOnScreen=lambda i: self.itemOnScreen(i))
else:
raise ValueError('viewSource "%s" not supported' % (viewSource,))
return self._lastView
def refreshViewDefaults(self):
return self._refreshViewDefaults
def setRefreshViewDefaults(self, **kwargs):
"""Set default arguments for refreshView() calls
Parameters:
**kwargs (keyword arguments)
new default values for optional refreshView() parameters.
"""
self._refreshViewDefaults = kwargs
def tapText(self, text, partial=False, **tapKwArgs):
"""
Find an item with given text from the latest view, and tap it.
Parameters:
partial (boolean, optional):
refer to verifyText documentation. The default is
False.
tapPos (pair of floats (x, y)):
refer to tapItem documentation.
button, long, hold, count, delayBetweenTaps (optional):
refer to tap documentation.
Returns True if successful, otherwise False.
"""
items = self.existingView().findItemsByText(text, partial=partial, count=1, onScreen=True)
if len(items) == 0: return False
return self.tapItem(items[0], **tapKwArgs)
def verifyText(self, text, partial=False):
"""
Verify that the last view has at least one item with given
text.
Parameters:
text (string):
text to be searched for in items.
partial (boolean, optional):
if True, match items if item text contains given
text, otherwise match only if item text is equal to
the given text. The default is False (exact match).
"""
assert self._lastView != None, "View required."
return self._lastView.findItemsByText(text, partial=partial, count=1, onScreen=True) != []
def view(self):
return self._lastView
def windowList(self):
"""
Return list of properties of windows (dictionaries)
Example: list window id's and names:
for props in screen.windowList():
print props["window"], props["name"]
"""
return self.existingConnection().recvChildWindows(recursive=True)
class X11Connection(fmbtx11_conn.Display):
def __init__(self, display):
fmbtx11_conn.Display.__init__(self, display)
def target(self):
return "X11"
def recvAtspiViewData(self, window):
return fmbtx11_conn.atspiViewData(window)
def recvScreenshot(self, filename):
# This is a hack to get this stack quickly testable,
# let's replace this with Xlib/libMagick functions, too...
data = fmbtx11_conn.Display.recvScreenshot(self, "PNG")
if data:
if data.startswith("FMBTRAWX11"):
try:
header, zdata = data.split('\n', 1)
width, height, depth, bpp = [int(n) for n in header.split()[1:]]
data = zlib.decompress(zdata)
except Exception, e:
raise FMBTX11Error("Corrupted screenshot data: %s" % (e,))
if len(data) != width * height * 4:
raise FMBTX11Error("Image data size mismatch.")
fmbtgti.eye4graphics.bgrx2rgb(data, width, height)
ppm_header = "P6\n%d %d\n%d\n" % (width, height, 255)
f = file(filename + ".ppm", "w").write(ppm_header + data[:width*height*3])
_run([fmbt_config.imagemagick_convert, filename + ".ppm", filename])
os.remove("%s.ppm" % (filename,))
elif fmbtx11_conn.fmbtpng and data.startswith(fmbtx11_conn.fmbtpng.PNG_MAGIC):
file(filename, "w").write(data)
else:
raise FMBTX11Error('Unsupported image format "%s"...' % (data[:4],))
else:
return False
return True
class FMBTX11Error(Exception): pass
X11ConnectionError = fmbtx11_conn.X11ConnectionError
_keyNames = [ "VoidSymbol", "BackSpace", "Tab", "Linefeed", "Clear",
"Return", "Pause", "Scroll_Lock", "Sys_Req", "Escape",
"Delete", "Multi_key", "Codeinput", "SingleCandidate",
"MultipleCandidate", "PreviousCandidate", "Kanji",
"Muhenkan", "Henkan_Mode", "Henkan", "Romaji",
"Hiragana", "Katakana", "Hiragana_Katakana", "Zenkaku",
"Hankaku", "Zenkaku_Hankaku", "Touroku", "Massyo",
"Kana_Lock", "Kana_Shift", "Eisu_Shift", "Eisu_toggle",
"Kanji_Bangou", "Zen_Koho", "Mae_Koho", "Home", "Left",
"Up", "Right", "Down", "Prior", "Page_Up", "Next",
"Page_Down", "End", "Begin", "Select", "Print",
"Execute", "Insert", "Undo", "Redo", "Menu", "Find",
"Cancel", "Help", "Break", "Mode_switch",
"script_switch", "Num_Lock", "KP_Space", "KP_Tab",
"KP_Enter", "KP_F1", "KP_F2", "KP_F3", "KP_F4",
"KP_Home", "KP_Left", "KP_Up", "KP_Right", "KP_Down",
"KP_Prior", "KP_Page_Up", "KP_Next", "KP_Page_Down",
"KP_End", "KP_Begin", "KP_Insert", "KP_Delete",
"KP_Equal", "KP_Multiply", "KP_Add", "KP_Separator",
"KP_Subtract", "KP_Decimal", "KP_Divide", "KP_0",
"KP_1", "KP_2", "KP_3", "KP_4", "KP_5", "KP_6", "KP_7",
"KP_8", "KP_9", "F1", "F2", "F3", "F4", "F5", "F6",
"F7", "F8", "F9", "F10", "F11", "L1", "F12", "L2",
"F13", "L3", "F14", "L4", "F15", "L5", "F16", "L6",
"F17", "L7", "F18", "L8", "F19", "L9", "F20", "L10",
"F21", "R1", "F22", "R2", "F23", "R3", "F24", "R4",
"F25", "R5", "F26", "R6", "F27", "R7", "F28", "R8",
"F29", "R9", "F30", "R10", "F31", "R11", "F32", "R12",
"F33", "R13", "F34", "R14", "F35", "R15", "Shift_L",
"Shift_R", "Control_L", "Control_R", "Caps_Lock",
"Shift_Lock", "Meta_L", "Meta_R", "Alt_L", "Alt_R",
"Super_L", "Super_R", "Hyper_L", "Hyper_R", "ISO_Lock",
"ISO_Level2_Latch", "ISO_Level3_Shift",
"ISO_Level3_Latch", "ISO_Level3_Lock",
"ISO_Level5_Shift", "ISO_Level5_Latch",
"ISO_Level5_Lock", "ISO_Group_Shift", "ISO_Group_Latch",
"ISO_Group_Lock", "ISO_Next_Group",
"ISO_Next_Group_Lock", "ISO_Prev_Group",
"ISO_Prev_Group_Lock", "ISO_First_Group",
"ISO_First_Group_Lock", "ISO_Last_Group",
"ISO_Last_Group_Lock", "ISO_Left_Tab",
"ISO_Move_Line_Up", "ISO_Move_Line_Down",
"ISO_Partial_Line_Up", "ISO_Partial_Line_Down",
"ISO_Partial_Space_Left", "ISO_Partial_Space_Right",
"ISO_Set_Margin_Left", "ISO_Set_Margin_Right",
"ISO_Release_Margin_Left", "ISO_Release_Margin_Right",
"ISO_Release_Both_Margins", "ISO_Fast_Cursor_Left",
"ISO_Fast_Cursor_Right", "ISO_Fast_Cursor_Up",
"ISO_Fast_Cursor_Down", "ISO_Continuous_Underline",
"ISO_Discontinuous_Underline", "ISO_Emphasize",
"ISO_Center_Object", "ISO_Enter", "dead_grave",
"dead_acute", "dead_circumflex", "dead_tilde",
"dead_perispomeni", "dead_macron", "dead_breve",
"dead_abovedot", "dead_diaeresis", "dead_abovering",
"dead_doubleacute", "dead_caron", "dead_cedilla",
"dead_ogonek", "dead_iota", "dead_voiced_sound",
"dead_semivoiced_sound", "dead_belowdot", "dead_hook",
"dead_horn", "dead_stroke", "dead_abovecomma",
"dead_psili", "dead_abovereversedcomma", "dead_dasia",
"dead_doublegrave", "dead_belowring",
"dead_belowmacron", "dead_belowcircumflex",
"dead_belowtilde", "dead_belowbreve",
"dead_belowdiaeresis", "dead_invertedbreve",
"dead_belowcomma", "dead_currency", "dead_a", "dead_A",
"dead_e", "dead_E", "dead_i", "dead_I", "dead_o",
"dead_O", "dead_u", "dead_U", "dead_small_schwa",
"dead_capital_schwa", "dead_greek",
"First_Virtual_Screen", "Prev_Virtual_Screen",
"Next_Virtual_Screen", "Last_Virtual_Screen",
"Terminate_Server", "AccessX_Enable",
"AccessX_Feedback_Enable", "RepeatKeys_Enable",
"SlowKeys_Enable", "BounceKeys_Enable",
"StickyKeys_Enable", "MouseKeys_Enable",
"MouseKeys_Accel_Enable", "Overlay1_Enable",
"Overlay2_Enable", "AudibleBell_Enable", "Pointer_Left",
"Pointer_Right", "Pointer_Up", "Pointer_Down",
"Pointer_UpLeft", "Pointer_UpRight", "Pointer_DownLeft",
"Pointer_DownRight", "Pointer_Button_Dflt",
"Pointer_Button1", "Pointer_Button2", "Pointer_Button3",
"Pointer_Button4", "Pointer_Button5",
"Pointer_DblClick_Dflt", "Pointer_DblClick1",
"Pointer_DblClick2", "Pointer_DblClick3",
"Pointer_DblClick4", "Pointer_DblClick5",
"Pointer_Drag_Dflt", "Pointer_Drag1", "Pointer_Drag2",
"Pointer_Drag3", "Pointer_Drag4", "Pointer_Drag5",
"Pointer_EnableKeys", "Pointer_Accelerate",
"Pointer_DfltBtnNext", "Pointer_DfltBtnPrev", "ch",
"Ch", "CH", "c_h", "C_h", "C_H", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent",
"ampersand", "apostrophe", "quoteright", "parenleft",
"parenright", "asterisk", "plus", "comma", "minus",
"period", "slash", "0", "1", "2", "3", "4", "5", "6",
"7", "8", "9", "colon", "semicolon", "less", "equal",
"greater", "question", "at", "A", "B", "C", "D", "E",
"F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "quoteleft", "a",
"b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "braceleft", "bar", "braceright",
"asciitilde", "nobreakspace", "exclamdown", "cent",
"sterling", "currency", "yen", "brokenbar", "section",
"diaeresis", "copyright", "ordfeminine",
"guillemotleft", "notsign", "hyphen", "registered",
"macron", "degree", "plusminus", "twosuperior",
"threesuperior", "acute", "mu", "paragraph",
"periodcentered", "cedilla", "onesuperior", "masculine",
"guillemotright", "onequarter", "onehalf",
"threequarters", "questiondown", "Agrave", "Aacute",
"Acircumflex", "Atilde", "Adiaeresis", "Aring", "AE",
"Ccedilla", "Egrave", "Eacute", "Ecircumflex",
"Ediaeresis", "Igrave", "Iacute", "Icircumflex",
"Idiaeresis", "ETH", "Eth", "Ntilde", "Ograve",
"Oacute", "Ocircumflex", "Otilde", "Odiaeresis",
"multiply", "Oslash", "Ooblique", "Ugrave", "Uacute",
"Ucircumflex", "Udiaeresis", "Yacute", "THORN", "Thorn",
"ssharp", "agrave", "aacute", "acircumflex", "atilde",
"adiaeresis", "aring", "ae", "ccedilla", "egrave",
"eacute", "ecircumflex", "ediaeresis", "igrave",
"iacute", "icircumflex", "idiaeresis", "eth", "ntilde",
"ograve", "oacute", "ocircumflex", "otilde",
"odiaeresis", "division", "oslash", "ooblique",
"ugrave", "uacute", "ucircumflex", "udiaeresis",
"yacute", "thorn", "ydiaeresis", "Aogonek", "breve",
"Lstroke", "Lcaron", "Sacute", "Scaron", "Scedilla",
"Tcaron", "Zacute", "Zcaron", "Zabovedot", "aogonek",
"ogonek", "lstroke", "lcaron", "sacute", "caron",
"scaron", "scedilla", "tcaron", "zacute", "doubleacute",
"zcaron", "zabovedot", "Racute", "Abreve", "Lacute",
"Cacute", "Ccaron", "Eogonek", "Ecaron", "Dcaron",
"Dstroke", "Nacute", "Ncaron", "Odoubleacute", "Rcaron",
"Uring", "Udoubleacute", "Tcedilla", "racute", "abreve",
"lacute", "cacute", "ccaron", "eogonek", "ecaron",
"dcaron", "dstroke", "nacute", "ncaron", "odoubleacute",
"rcaron", "uring", "udoubleacute", "tcedilla",
"abovedot", "Hstroke", "Hcircumflex", "Iabovedot",
"Gbreve", "Jcircumflex", "hstroke", "hcircumflex",
"idotless", "gbreve", "jcircumflex", "Cabovedot",
"Ccircumflex", "Gabovedot", "Gcircumflex", "Ubreve",
"Scircumflex", "cabovedot", "ccircumflex", "gabovedot",
"gcircumflex", "ubreve", "scircumflex", "kra", "kappa",
"Rcedilla", "Itilde", "Lcedilla", "Emacron", "Gcedilla",
"Tslash", "rcedilla", "itilde", "lcedilla", "emacron",
"gcedilla", "tslash", "ENG", "eng", "Amacron",
"Iogonek", "Eabovedot", "Imacron", "Ncedilla",
"Omacron", "Kcedilla", "Uogonek", "Utilde", "Umacron",
"amacron", "iogonek", "eabovedot", "imacron",
"ncedilla", "omacron", "kcedilla", "uogonek", "utilde",
"umacron", "Wcircumflex", "wcircumflex", "Ycircumflex",
"ycircumflex", "Babovedot", "babovedot", "Dabovedot",
"dabovedot", "Fabovedot", "fabovedot", "Mabovedot",
"mabovedot", "Pabovedot", "pabovedot", "Sabovedot",
"sabovedot", "Tabovedot", "tabovedot", "Wgrave",
"wgrave", "Wacute", "wacute", "Wdiaeresis",
"wdiaeresis", "Ygrave", "ygrave", "OE", "oe",
"Ydiaeresis", "overline"]
|
yoonkiss/fMBT
|
utils/fmbtx11.py
|
Python
|
lgpl-2.1
| 26,132
| 0.002334
|
# -*- coding: utf-8 -*-
"""
* Partial implementation of standard atmospheric model as described in
* GOST 4401-81 useful for processing of data from meteorological balloon
* sensors.
*
* Supported modelling of temperature and pressure over the altitude span from
* 0 up to 51km.
*
* algorithm by Oleg Kochetov <ok@noiselab.ru>
"""
from math import log10
class GOST4401(object):
G = 9.80665
R = 287.05287
E = 6356766
MIN_PRESSURE = 6.69384
MAX_PRESSURE = 101325.00
MIN_GP_ALT = 0.00
MAX_GP_ALT = 51000.00
# Lookup table with averaged empirical parameters for
# lower layers of atmosphere in accordance with ГОСТ 4401-81
LUT_RECORDS = 6
tab = {
'altitude' : 0, # Geopotentional altitude
'temperature' : 1, # degrees K
'temp gradient' : 2, # degrees K per meter
'pressure' : 3, # pascals
}
ag_table = [
[0, 288.15, -0.0065, 101325.00],
[11000, 216.65, 0.0, 22632.04],
[20000, 216.65, 0.0010, 5474.87],
[32000, 228.65, 0.0028, 868.0146],
[47000, 270.65, 0.0, 110.9056],
[51000, 270.65, -0.0028, 6.69384]
]
@staticmethod
def geopotential_to_geometric(self, altitude):
return altitude * self.E / (self.E - altitude)
@staticmethod
def geometric_to_geopotential(self, altitude):
return altitude * self.E / (self.E + altitude)
def get_altitude(self, pressure):
"""
Returns geometric altitude value for the given pressure.
:param pressure: float pressure - pressure in pascals
:return: float geometric altitude in meters
"""
# Pressure in Pascals
if (pressure <= self.MIN_PRESSURE) or (pressure > self.MAX_PRESSURE):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((pressure <= self.ag_table[idx][self.tab['pressure']]) and
(pressure > self.ag_table[idx + 1][self.tab['pressure']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
geopot_H = ((Tm * pow(Ps / pressure, Bm * self.R / self.G) - Tm) / Bm)
else:
geopot_H = log10(Ps / pressure) * (self.R * Tm) / self.G * 0.434292
return self.geopotential_to_geometric(self, Hb + geopot_H)
def get_pressure(self, altitude):
"""
Returns pressure in pascals for the given geometric altitude
:param altitude: float altitude - geometric altitude in meters
:return: float - pressure in pascals
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
lP = log10(Ps) - (self.G / (Bm * self.R)) * log10((Tm + Bm * (geopot_H - Hb)) / Tm)
else:
lP = log10(Ps) - 0.434294 * (self.G * (geopot_H - Hb)) / (self.R * Tm)
return pow(10, lP)
def get_temperature(self, altitude):
"""
Returns temperature value in K for the given geometric altitude.
:param altitude: float altitude - geometric altitude in meters
:return: float - temperature in degrees K
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
temp = Tm
if Bm != 0:
temp += Bm * (geopot_H - Hb)
return temp
|
Shatki/PyIMU
|
gost4401_81.py
|
Python
|
gpl-3.0
| 4,655
| 0.002365
|
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 4720 2010/03/24 03:14:11 jars"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
qewerty/moto.old
|
tools/scons/engine/SCons/Tool/sunf95.py
|
Python
|
gpl-2.0
| 2,167
| 0.00323
|
"""
Django settings for untitled project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^8wdj$q^6mp6g7z1s7nwip_ffhof4r6g)nl88dy0-u(r)(o=_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_SFW = False
# Use to blank most frontend NSFW-stuff for developing in public spaces
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_q',
'kinksorter_app',
]
USE_ASYNC = False
Q_CLUSTER = {
'name': 'kinksorter-cluster',
'recycle': 10, # big tasks -> often recycle workers
'save_limit': 10, # try to minimize database_size
'catch_up': False, # try to minimize database_size
'orm': 'default',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kinksorter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kinksorter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'kinksorter.db'),
'OPTIONS': {'timeout': 20000},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
}
},
'filters': {
'ignore_get_current_task': {
'()': 'django.utils.log.CallbackFilter',
'callback': lambda r: not (len(r.args) > 2 and r.args[1] == '200' and r.args[0] == 'GET /get_current_task HTTP/1.1'),
}
},
'handlers': {
'console': {
'filters': ['ignore_get_current_task'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django.server': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'kinksorter', 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'kinksorter', 'static_collected')
# User content (to get the videos via static/, as it needs to be under the root
STATIC_LINKED_DIRECTORIES = os.path.join(STATIC_URL, 'directory_links')
DIRECTORY_LINKS = os.path.join(STATIC_ROOT, 'directory_links')
os.makedirs(DIRECTORY_LINKS, exist_ok=True)
|
sistason/kinksorter2
|
src/kinksorter/settings.py
|
Python
|
gpl-3.0
| 4,818
| 0.001038
|
import rdtest
import struct
import renderdoc as rd
class VK_Indirect(rdtest.TestCase):
demos_test_name = 'VK_Indirect'
def check_overlay(self, eventId: int, out: rd.ReplayOutput, tex: rd.TextureDisplay, save_data: rd.TextureSave):
pipe: rd.PipeState = self.controller.GetPipelineState()
# Check that the highlight draw overlay is empty
tex.resourceId = pipe.GetOutputTargets()[0].resourceId
out.SetTextureDisplay(tex)
overlay_path = rdtest.get_tmp_path(str(eventId) + '_draw.png')
ref_path = self.get_ref_path(str(eventId) + '_draw.png')
save_data.resourceId = out.GetDebugOverlayTexID()
self.controller.SaveTexture(save_data, overlay_path)
if not rdtest.png_compare(overlay_path, ref_path):
raise rdtest.TestFailureException("Reference and output image differ @ EID {}".format(str(eventId)),
ref_path, overlay_path)
def check_capture(self):
self.check_final_backbuffer()
for level in ["Primary", "Secondary"]:
rdtest.log.print("Checking {} indirect calls".format(level))
dispatches = self.find_draw("{}: Dispatches".format(level))
# Set up a ReplayOutput and TextureSave for quickly testing the drawcall highlight overlay
out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100),
rd.ReplayOutputType.Texture)
self.check(out is not None)
tex = rd.TextureDisplay()
tex.overlay = rd.DebugOverlay.Drawcall
save_data = rd.TextureSave()
save_data.destType = rd.FileType.PNG
# Rewind to the start of the capture
draw: rd.DrawcallDescription = dispatches.children[0]
while draw.previous is not None:
draw = draw.previous
# Ensure we can select all draws
while draw is not None:
self.controller.SetFrameEvent(draw.eventId, False)
draw = draw.next
rdtest.log.success("Selected all {} draws".format(level))
self.check(dispatches and len(dispatches.children) == 3)
self.check(dispatches.children[0].dispatchDimension == [0,0,0])
self.check(dispatches.children[1].dispatchDimension == [1,1,1])
self.check(dispatches.children[2].dispatchDimension == [3,4,5])
rdtest.log.success("{} Indirect dispatches are the correct dimensions".format(level))
self.controller.SetFrameEvent(dispatches.children[2].eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
ssbo: rd.BoundResource = pipe.GetReadWriteResources(rd.ShaderStage.Compute)[0].resources[0]
data: bytes = self.controller.GetBufferData(ssbo.resourceId, 0, 0)
rdtest.log.print("Got {} bytes of uints".format(len(data)))
uints = [struct.unpack_from('=4L', data, offs) for offs in range(0, len(data), 16)]
for x in range(0, 6): # 3 groups of 2 threads each
for y in range(0, 8): # 3 groups of 2 threads each
for z in range(0, 5): # 5 groups of 1 thread each
idx = 100 + z*8*6 + y*6 + x
if not rdtest.value_compare(uints[idx], [x, y, z, 12345]):
raise rdtest.TestFailureException(
'expected thread index data @ {},{},{}: {} is not as expected: {}'
.format(x, y, z, uints[idx], [x, y, z, 12345]))
rdtest.log.success("Dispatched buffer contents are as expected for {}".format(level))
empties = self.find_draw("{}: Empty draws".format(level))
self.check(empties and len(empties.children) == 2)
draw: rd.DrawcallDescription
for draw in empties.children:
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have empty PostVS
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut, 0, 1)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} empty draws are empty".format(level))
indirects = self.find_draw("{}: Indirect draws".format(level))
self.check('vkCmdDrawIndirect' in indirects.children[0].name)
self.check('vkCmdDrawIndexedIndirect' in indirects.children[1].name)
self.check(len(indirects.children[1].children) == 2)
rdtest.log.success("Correct number of {} indirect draws".format(level))
# vkCmdDrawIndirect(...)
draw = indirects.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.8, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.7, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[0](...)
draw = indirects.children[1].children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 3)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.5, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[1](...)
draw = indirects.children[1].children[1]
self.check(draw.numIndices == 6)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.3, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.2, -0.8, 0.0, 1.0]},
3: {'vtx': 3, 'idx': 3, 'gl_PerVertex.gl_Position': [-0.1, -0.5, 0.0, 1.0]},
4: {'vtx': 4, 'idx': 4, 'gl_PerVertex.gl_Position': [ 0.0, -0.8, 0.0, 1.0]},
5: {'vtx': 5, 'idx': 5, 'gl_PerVertex.gl_Position': [ 0.1, -0.8, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
indirect_count_root = self.find_draw("{}: KHR_draw_indirect_count".format(level))
if indirect_count_root is not None:
self.check(indirect_count_root.children[0].name == '{}: Empty count draws'.format(level))
self.check(indirect_count_root.children[1].name == '{}: Indirect count draws'.format(level))
empties = indirect_count_root.children[0]
self.check(empties and len(empties.children) == 2)
draw: rd.DrawcallDescription
for draw in empties.children:
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have empty PostVS
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut, 0, 1)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
# vkCmdDrawIndirectCountKHR
draw_indirect = indirect_count_root.children[1].children[0]
self.check(draw_indirect and len(draw_indirect.children) == 1)
# vkCmdDrawIndirectCountKHR[0]
draw = draw_indirect.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 4)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.8, 0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.7, 0.2, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.6, 0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirectCountKHR
draw_indirect = indirect_count_root.children[1].children[1]
self.check(draw_indirect and len(draw_indirect.children) == 3)
# vkCmdDrawIndirectCountKHR[0]
draw = draw_indirect.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 1)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.6, 0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.5, 0.2, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.4, 0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndirectCountKHR[1]
draw = draw_indirect.children[1]
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndirectCountKHR[2]
draw = draw_indirect.children[2]
self.check(draw.numIndices == 6)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.4, 0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.3, 0.2, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.2, 0.2, 0.0, 1.0]},
3: {'vtx': 3, 'idx': 3, 'gl_PerVertex.gl_Position': [-0.1, 0.5, 0.0, 1.0]},
4: {'vtx': 4, 'idx': 4, 'gl_PerVertex.gl_Position': [ 0.0, 0.2, 0.0, 1.0]},
5: {'vtx': 5, 'idx': 5, 'gl_PerVertex.gl_Position': [ 0.1, 0.2, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
else:
rdtest.log.print("KHR_draw_indirect_count not tested")
|
TurtleRockStudios/renderdoc_public
|
util/test/tests/Vulkan/VK_Indirect.py
|
Python
|
mit
| 14,258
| 0.004699
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Provides version information of important supporting modules.
"""
from __future__ import unicode_literals
import functools
import appinfo
def _catch_unknown(f):
"""Decorate a function, returning "unknown" on import/attribute error."""
@functools.wraps(f)
def wrapper():
try:
return f()
except (ImportError, AttributeError):
return "unknown"
return wrapper
@_catch_unknown
def app_version():
import appinfo
return appinfo.version
@_catch_unknown
def sip_version():
import sip
return sip.SIP_VERSION_STR
@_catch_unknown
def pyqt_version():
import PyQt4.QtCore
return PyQt4.QtCore.PYQT_VERSION_STR
@_catch_unknown
def qt_version():
import PyQt4.QtCore
return PyQt4.QtCore.QT_VERSION_STR
@_catch_unknown
def python_version():
import platform
return platform.python_version()
@_catch_unknown
def operating_system():
import platform
return platform.platform()
@_catch_unknown
def ly_version():
import ly.pkginfo
return ly.pkginfo.version
@_catch_unknown
def poppler_version():
import popplerqt4
return '.'.join(format(n) for n in popplerqt4.poppler_version())
@_catch_unknown
def python_poppler_version():
import popplerqt4
return '.'.join(format(n) for n in popplerqt4.version())
def version_info_named():
"""Yield all the relevant names and their version string."""
yield appinfo.appname, appinfo.version
yield "Python", python_version()
yield "python-ly", ly_version()
yield "Qt", qt_version()
yield "PyQt", pyqt_version()
yield "sip", sip_version()
yield "poppler", poppler_version()
yield "python-poppler-qt", python_poppler_version()
yield "OS", operating_system()
def version_info_string(separator='\n'):
"""Return all version names as a string, joint with separator."""
return separator.join(map("{0[0]}: {0[1]}".format, version_info_named()))
|
shimpe/frescobaldi
|
frescobaldi_app/debuginfo.py
|
Python
|
gpl-2.0
| 2,835
| 0.003175
|
from .bhtsne import bh_tsne
__all__ = ["_bhtsne", "bh_tsne"]
|
CurryBoy/ProtoML-Deprecated
|
protoml/extras/tsne/_bhtsne/__init__.py
|
Python
|
bsd-3-clause
| 62
| 0
|
##!/usr/bin/env python
from array import *
import os
import struct
stats = os.stat('freqtest.dat')
file_size = stats.st_size
#print('file size ', +file_size, ' bytes')
entries = file_size/4
#print('file has ', +entries, +' entries')
freq_array = array('f', []) #create an array to hold the entries
for a in range(0, file_size, 4): #read the entries sequentially from the file
with open('freqtest.dat', 'rb') as f:
f.seek(a)
bytes = f.read(4)
freq = struct.unpack('<f', bytes)
b = (a/4) +1
# frq(b) = str(freq[0])
print('Frequency: ' + str((a/4)+1) + ' ' + str(freq[0])) #print the entries as they are read
freq_array.append(freq[0]) #and add them to the array
f.close()
x = raw_input('continue? (y to modify freqs in the list, n to go to adding freqs)')
while x != "n":
# print(x)
fm = int(input('freq to modify: ')) #we want to modify a particular frequency
current_freq = freq_array[fm-1]
print('current freq is: ', + current_freq) #we want to replace it with a new value
new_freq = input('new frequency: ')
freq_array[fm-1] = new_freq
for indx in range(len(freq_array)): #print the modified list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to change another frequency? ")
x = raw_input('continue? (y to add freqs to the list, n to save the list and exit)') #second part... we may want to add new frequencies to the list
while x != "n": #similar to the modify loop
new_freq = input('new frequency: ')
freq_array.append(new_freq) #except we append the frequency at the end
for indx in range(len(freq_array)): #and as before print the modified list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to add another frequency? ")
print freq_array #this is here as a troubleshooting tool
f = open('freqtest.dat', 'wb') #everything done? dump the array to the file (overwrites
f.write(freq_array) #the old one)
f.close()
|
unclejed613/gnuradio-projects-rtlsdr
|
scanner/frqedit.py
|
Python
|
gpl-2.0
| 2,267
| 0.013674
|
"""
====================================
Probabilistic Tracking on ODF fields
====================================
In this example we perform probabilistic fiber tracking on fields of ODF peaks.
This example requires importing example `reconst_csa.py`.
"""
import numpy as np
from reconst_csa import *
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.tracking.markov import (BoundaryStepper,
FixedSizeStepper,
ProbabilisticOdfWeightedTracker)
from dipy.tracking.utils import seeds_from_mask
stepper = FixedSizeStepper(1)
"""
Read the voxel size from the image header:
"""
zooms = img.get_header().get_zooms()[:3]
"""
Randomly select some seed points from the mask:
"""
seeds = seeds_from_mask(mask, [1, 1, 1], zooms)
seeds = seeds[:2000]
interpolator = NearestNeighborInterpolator(data, zooms)
pwt = ProbabilisticOdfWeightedTracker(csamodel, interpolator, mask,
stepper, 20, seeds, sphere)
csa_streamlines = list(pwt)
"""
Now that we have our streamlines in memory we can save the results to disk.
For this purpose we can use the TrackVis format (``*.trk``). First, we need to
create a header.
"""
import nibabel as nib
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = (2., 2., 2.)
hdr['voxel_order'] = 'LAS'
hdr['dim'] = csapeaks.gfa.shape[:3]
"""
Save the streamlines.
"""
csa_streamlines_trk = ((sl, None, None) for sl in csa_streamlines)
csa_sl_fname = 'csa_prob_streamline.trk'
nib.trackvis.write(csa_sl_fname, csa_streamlines_trk, hdr)
"""
Visualize the streamlines with fvtk (python vtk is required).
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
r = fvtk.ren()
fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))
print('Saving illustration as csa_prob_tracks.png')
fvtk.record(r, n_frames=1, out_path='csa_prob_tracks.png', size=(600, 600))
"""
.. figure:: csa_prob_tracks.png
:align: center
**Probabilistic streamlines applied on an ODF field modulated by GFA**.
"""
|
maurozucchelli/dipy
|
doc/examples/probabilistic_tracking_odfs.py
|
Python
|
bsd-3-clause
| 2,094
| 0.001433
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from . import gradle
from . import ci_command
@click.argument('task', required=True, nargs=-1)
@click.option(
'--gradle-opts',
default='',
help='GRADLE_OPTS passed to the gradle invocation.')
@ci_command('gradle')
def gradle_command(task, gradle_opts):
"""Runs the specified gradle commands."""
gradle.run(*task, gradle_opts=gradle_opts)
|
firebase/firebase-android-sdk
|
ci/fireci/fireci/commands.py
|
Python
|
apache-2.0
| 946
| 0.002114
|
# Creates graph of restaurant reviews for yelp or trip advisor.
# writes graph to gml file for use in gephi
#
# Rob Churchill
#
# NOTE: I learned to do this in my data science class last semester. If you are looking for plagiarism things, you will almost certainly find similar clustering code.
# I did not copy it, I learned this specific way of doing it, and referred to my previous assignments when doing it for this project. If you would like to see my previous
# assignments, I will provide you them on request. Otherwise, I don't think that it's worth adding a lot of extra files for the sole sake of showing that I haven't plagiarized.
import networkx as nx
import numpy as np
import scipy as sp
import csv
folder = 'data/'
file_names = ['yelp_data.csv', 'trip_advisor_data.csv']
# EDIT this line to change which website you make the graph for. True=yelp, False=TripAdvisor
yelp = False
yelp_dataset = list()
file_name = file_names[1]
if yelp == True:
file_name = file_names[0]
# reads in appropriate file given yelp boolean variable
with open(folder+file_name, 'r') as f:
reader = csv.reader(f)
for line in reader:
yelp_dataset.append(line)
# removes headers
yelp_dataset.remove(yelp_dataset[0])
print len(yelp_dataset)
# create the graph
G = nx.Graph()
for y in yelp_dataset:
# add the nodes if they don't already exist
G.add_node(y[4], type='restaurant')
G.add_node(y[13], type='reviewer')
# add the edge between the reviewer and restaurant, weight is in different position in each file.
if yelp == True:
G.add_edge(y[13], y[4], weight=float(y[2]))
else:
G.add_edge(y[13], y[4], weight=float(y[1]))
print nx.number_of_nodes(G)
print nx.number_of_edges(G)
# write graph to gml file.
nx.write_gml(G, 'ta_graph.gml')
|
rchurch4/georgetown-data-science-fall-2015
|
analysis/graph/graph_creation.py
|
Python
|
mit
| 1,784
| 0.008408
|
# -*- coding: utf-8 -*-
"""Django URLconf file for ulm"""
from __future__ import unicode_literals
from django.conf import settings
try:
# pylint: disable=E0611
from django.conf.urls import patterns, include, url
except (ImportError): # Django 1.3 compatibility
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from ulm.views import laptop, batteries, wifi
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = \
patterns('',
url(r'^$', laptop),
url(r'^batter(?:y|ies)/$', batteries),
url(r'^(?:wifi|wlan)/$', wifi),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
dupuy/ulm
|
ulm/urls.py
|
Python
|
bsd-3-clause
| 911
| 0
|
#!/usr/bin/env python
import os
from os.path import join as pjoin
import sys
import subprocess
def get_output(cmd):
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out = s.communicate()[0]
s.wait()
return out.strip()
# you could use os.path.walk to calculate this... or you could use du(1).
def duhack(path):
cmd = ['du', '-k', path]
out = get_output(cmd).split()
return int(out[0]) * 1024
BASEPATH=sys.argv[1]
ROOT="/x1/mail-archives/mod_mbox"
HOSTNAME="http://mail-archives.apache.org/mod_mbox/"
PARITION_SIZE=100 * 1024 * 1024
tlps={}
for files in os.listdir(ROOT):
path = files
tlp = path[0:path.find('-')]
list = path[path.find('-')+1:]
# print "%s - %s %s" % (tlp, list, path)
if not os.access("%s/%s/listinfo.db" % (ROOT, path), os.F_OK):
continue
if tlp == "www":
tlp = "asf"
if not tlps.has_key(tlp):
tlps[tlp] = {}
tlps[tlp][list] = [path, duhack(pjoin(ROOT, path))]
keys = tlps.keys()
keys.sort()
count = 0
fcount = 0
def write_sitemap_header(fp):
fp.write("""<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
def write_sitemap_footer(fp):
fp.write("</sitemapindex>\n")
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
for tlp in keys:
klist = tlps[tlp].keys()
klist.sort()
for list in klist:
name = tlps[tlp][list][0]
size = tlps[tlp][list][1]
if size < PARITION_SIZE:
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap</loc></sitemap>\n" % (HOSTNAME, name))
else:
part = (size / PARITION_SIZE) + 1
for i in range(0, part):
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap&pmax=%d&part=%d</loc></sitemap>\n" % (HOSTNAME, name, part, i))
if count > 500:
write_sitemap_footer(fp)
fp.close()
count = 0
fcount += 1
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
write_sitemap_footer(fp)
|
sebbASF/infrastructure-puppet
|
modules/mail_archives/files/scripts/site-sitemap.py
|
Python
|
apache-2.0
| 2,114
| 0.008515
|
import os
import logging
import MySQLdb
import time
import sys
import Queue
import threading
import json
createUserSQL = "INSERT IGNORE INTO users (name) VALUES (%s);"
getUserByUsernameSQL = "SELECT * FROM users WHERE name=%s;"
getAuthorByNameSQL = "SELECT * FROM authors WHERE name=%s;"
createAuthorSQL = "INSERT IGNORE INTO authors (userid, name) VALUES (%s, %s);"
createBookSQL = "INSERT IGNORE INTO books (name, author, price) VALUES (%s, %s, %s);"
firstNames = sorted(["Kenia ", "Randal", "Shawnna ", "Rey ", "Cordia", "Kendal",
"Alina", "Dianna", "Misti", "Chelsie", "Gracia", "Teena", "Ronny", "Willy",
"Betsy", "Kenisha", "Elsy", "Cheryle", "Lurline ", "Karina", "Luba", "Vita",
"Lu", "Frances", "Lavenia", "Nereida", "Zetta", "Melony", "Eloise",
"Nickolas", "Ericka", "Cecilia", "Jenni", "Sofia", "Nobuko", "Trudy",
"Petronila", "Donnette", "Santos", "Viola", "Jessika", "Chere", "Azalee",
"Meggan", "Floyd", "Liberty", "Tabitha", "Juliana", "Pamila", "Blondell"])
lastNames = sorted(["Watterson", "Lawler", "Walt", "Birch", "Bryd", "Speight",
"Monroy", "Milledge", "Davilla", "Behrendt", "Mustain", "Blythe", "Gandhi",
"Brady", "Gooden", "Jellison", "Hager", "Selders", "Seaton", "Wind",
"Jelinek", "Reiser", "Lacour", "Maginnis", "Baggs", "Crossno", "Shadley",
"Bramer", "Mento", "Manigault", "Jacobi", "Deckman", "Spikes", "Duncan",
"Ackman", "Hornick", "Bourbeau", "Riehl", "Sena", "Rolon", "Pereira",
"Mikula", "Luk", "Albaugh", "Akin", "Bradburn", "Houlihan", "Frisina",
"Funnell", "Keister"])
def connect():
return MySQLdb.connect(host="mysql", # your host, usually localhost
user="root", # your username
passwd="password", # your password
db="bench") # name of the data base
createUserThreads = []
def createUsers(name):
logging.debug("Creating... "+name)
sys.stdout.flush()
db = connect();
cur = db.cursor()
for j in lastNames:
for k in range(0, 10):
myname = name + " " + j + "(" + str(k) + ")"
sys.stdout.flush()
cur.execute(createUserSQL, (myname,))
cur.execute(getUserByUsernameSQL, (myname, ))
row = cur.fetchone()
if not row == None:
cur.execute(createAuthorSQL, [str(row[0]), ("Author "+myname)])
else:
print "Could not create ", myname
db.commit()
db.close()
logging.debug("Created! "+name)
sys.stdout.flush()
createBookThreads = []
def createBook(username):
logging.debug("Creating books... "+username)
sys.stdout.flush()
db = connect()
cur = db.cursor()
for j in lastNames:
for k in range(0, 3):
myname = "Author " + username + " " + j + "(" + str(k) + ")"
cur.execute(getAuthorByNameSQL, (myname, ))
row = cur.fetchone()
if not row == None:
for i in range(0,2):
bookname = myname+"'s book "+str(i)
cur.execute(createBookSQL, [bookname, str(row[0]), i * 5])
else:
print "Could not find ", myname
db.commit()
db.close()
logging.debug("Created books! "+username)
sys.stdout.flush()
def initilizeUsers():
logging.debug("Initilizing users...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createUsers, args = (name, ))
t.daemon = True
createUserThreads.append(t)
# Start all the threads
for x in createUserThreads:
x.start()
# Wait for them to complete
for x in createUserThreads:
x.join()
# Return the time it took to run
logging.debug("Creating users took: "+str(time.time() - start))
return time.time() - start;
def initilizeBooks():
logging.debug("Initilizing books...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createBook, args = (name, ))
t.daemon = True
createBookThreads.append(t)
# Start all the threads
for x in createBookThreads:
x.start()
# Wait for them to complete
for x in createBookThreads:
x.join()
# Return the time it took to run
logging.debug("Creating books took: "+str(time.time() - start))
return time.time() - start;
def main():
logging.debug("Starting...")
db = connect();
intUserTime = initilizeUsers();
intBookTime = initilizeBooks();
# cur.execute("SELECT * FROM users")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[1])
#
# cur.execute("SELECT * FROM authors")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[2])
# db.close()
logging.info("Starting result save.")
with open('/tmp/bench_results/result.json', 'w') as fp:
results = {
"hostname": hostname,
"results": {
"Create": {
"Users": intUserTime,
"Books": intBookTime
}
}
}
logging.info(json.dumps(results))
json.dump(results, fp)
if __name__ == '__main__':
hostname = os.uname()[1]
logging.basicConfig(format=hostname + ' %(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG)
main()
|
mellanox-senior-design/docker-volume-rdma
|
benchmarking/book_store/bench.py
|
Python
|
apache-2.0
| 5,515
| 0.008704
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colour import Color
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.db.models import Sum
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.template import loader
from .models import Party, City, Senator, ContactList
from .forms import ChooseForm
from .getPopulations import getCityStatePopulations
import viewsenators.initialization as initialization
# This seems to be the most that Facebook will allow, though it varies over time
NUM_CITIES_PER_QUERY = 50
def index(request):
def colorToD3(color):
return "rgb(%d,%d,%d)" % (color.red*255, color.green*255, color.blue*255)
def substituteDesc(moc, desc):
if "{{number}}" not in desc:
desc += "\n\n%s's phone number is {{number}}" % moc.lastName
if moc.phoneNumber:
text = moc.phoneNumber
else:
text ="(unknown number)"
desc = desc.replace("{{name}}", moc.firstName + " " + moc.lastName)
return desc.replace("{{number}}", text)
template = loader.get_template('halcyonic/index.html')
if 'list' in request.GET:
clId = str(request.GET['list'])
contactList = get_object_or_404(ContactList, slug=clId)
else:
try:
contactList = ContactList.objects.get(slug='keep-children-with-their-families')
except ContactList.DoesNotExist:
contactList = ContactList.objects.get(title="Republican")
stateColor = colorToD3(Color(rgb=(125/255.0, 0/255.0, 16/255.0)))
senatorToURLsPopsAndDesc = {}
for senator in contactList.senators.all():
senatorToURLsPopsAndDesc[senator] = _stateToFbCode(senator.state)
senatorToURLsPopsAndDesc[senator]['callScript'] = substituteDesc(senator, contactList.description)
sortedDict = sorted(senatorToURLsPopsAndDesc.items(),
key = lambda x: x[0].state.name)
context = {
"stateColor": stateColor, # TODO eventually have meaningful colors?
"title": contactList.title,
"senatorToURLsPopsAndDesc": sortedDict
}
return HttpResponse(template.render(context, request))
def combineContactList(request):
template = loader.get_template('viewsenators/combine.html')
context = {'contactLists': ContactList.objects.all()}
return HttpResponse(template.render(context, request))
def createContactList(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ChooseForm(request.POST)
# check whether it's valid:
if form.is_valid():
data = form.cleaned_data
title = data['title']
description = data['description']
senators = data['senators']
public = data['public']
cl = _makeContactList(title, description, senators, public)
return HttpResponseRedirect(reverse('index')+'?list=' + cl.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = ChooseForm()
so = Senator.objects
ids = {}
for party in Party.objects.all():
idList = ["input[value=\""+str(s.id)+"\"]"
for s in so.filter(party=party)]
idsSet = set(idList)
idsStr = ', '.join(idsSet)
ids[party.name] = idsStr
template = loader.get_template('viewsenators/choose.html')
context = {'form': form,
'ids': ids}
return HttpResponse(template.render(context, request))
def debugWriteAnything(text):
response = HttpResponse()
response.write(text)
return response
def _stateToFbCode(state):
""" :return: the URL and the percentage of the population of the
desired states which will be found via that URL """
# While there are many better URL constructions that ideally start with
# your friends, rather than start with all FB users in each city then
# intersect that with your friends list, this is the only way I could get it
# to work.
# In particular, facebook seems to limit the number of unions to six,
# whereas the number of intersections can be ten times that.
setOfCities = City.objects.filter(state=state).order_by('-population')[:NUM_CITIES_PER_QUERY]
url = "https://www.facebook.com/search/"
for city in setOfCities:
url += city.facebookId + "/residents/present/"
url += "union/me/friends/intersect/"
# % of population in this search
cityPop = setOfCities.aggregate(Sum('population'))['population__sum']
if cityPop is None: cityPop = 0 # TODO hack if a state has no cities
statePop = state.population
percentPopIncludedInURL = float(cityPop) / float(statePop)
percentPopIncludedInURL = int(100*percentPopIncludedInURL+0.5)
return {'url': url,
'percentPopIncludedInURL': percentPopIncludedInURL}
def _makeContactList(title, description, senatorList, public):
cl = ContactList.objects.create(
title = title,
description = description,
public = public)
cl.senators.set(senatorList)
cl.save()
return cl
@user_passes_test(lambda u: u.is_superuser)
def populateSenators(request):
def _createInitialLists():
if ContactList.objects.count() != 0:
return
assert Senator.objects.count() == 100
for party in Party.objects.all():
title = party.name
description = "Call {{name}} at {{number}}"
senators = Senator.objects.filter(party=party)
_makeContactList(title, description, senators, public=True)
initialization.populateAllData()
_createInitialLists()
senators = Senator.objects.all()
def s2t(s): return "%s: %s, %s" % (s.state.abbrev, s.firstName, s.lastName)
senText = '<br>'.join(sorted([s2t(s) for s in senators]))
return debugWriteAnything("The list of senators: <br>" + senText)
@user_passes_test(lambda u: u.is_superuser)
def clearDataForNewCongress(request):
initialization.clearDataForNewCongress()
return populateSenators(request)
@user_passes_test(lambda u: u.is_superuser)
def updateCitiesAndStatesWithLatestData(request):
# This can take more than 30 seconds, so we need a streaming response
# for Heroku to not shut it down
# This is only run once by the admin, so the decreased performance
# shouldn't matter.
def runner():
cityPopulations, statePopulations = getCityStatePopulations()
for x in initialization.updateCitiesWithCurrentData(cityPopulations):
yield x
yield initialization.addPopulationToStates(statePopulations)
return StreamingHttpResponse(runner())
|
artoonie/RedStatesBlueStates
|
redblue/viewsenators/views.py
|
Python
|
gpl-3.0
| 6,895
| 0.004206
|
#!/usr/bin/env python
import sys
import logging
import argparse
from gff3 import feature_lambda, feature_test_qual_value
from CPT_GFFParser import gffParse, gffWrite
from Bio.SeqFeature import FeatureLocation
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def __get_features(child, interpro=False):
child_features = {}
for rec in gffParse(child):
log.info("Parsing %s", rec.id)
# Only top level
for feature in rec.features:
# Get the record id as parent_feature_id (since this is how it will be during remapping)
parent_feature_id = rec.id
# If it's an interpro specific gff3 file
if interpro:
# Then we ignore polypeptide features as they're useless
if feature.type == "polypeptide":
continue
try:
child_features[parent_feature_id].append(feature)
except KeyError:
child_features[parent_feature_id] = [feature]
# Keep a list of feature objects keyed by parent record id
return child_features
def __update_feature_location(feature, parent, protein2dna):
start = feature.location.start
end = feature.location.end
if protein2dna:
start *= 3
end *= 3
if parent.location.strand >= 0:
ns = parent.location.start + start
ne = parent.location.start + end
st = +1
else:
ns = parent.location.end - end
ne = parent.location.end - start
st = -1
# Don't let start/stops be less than zero.
#
# Instead, we'll replace with %3 to try and keep it in the same reading
# frame that it should be in.
if ns < 0:
ns %= 3
if ne < 0:
ne %= 3
feature.location = FeatureLocation(ns, ne, strand=st)
if hasattr(feature, "sub_features"):
for subfeature in feature.sub_features:
__update_feature_location(subfeature, parent, protein2dna)
def rebase(parent, child, interpro=False, protein2dna=False, map_by="ID"):
# get all of the features we will be re-mapping in a dictionary, keyed by parent feature ID
child_features = __get_features(child, interpro=interpro)
for rec in gffParse(parent):
replacement_features = []
# Horrifically slow I believe
for feature in feature_lambda(
rec.features,
# Filter features in the parent genome by those that are
# "interesting", i.e. have results in child_features array.
# Probably an unnecessary optimisation.
feature_test_qual_value,
{"qualifier": map_by, "attribute_list": child_features.keys()},
subfeatures=False,
):
# Features which will be re-mapped
to_remap = child_features[feature.id]
fixed_features = []
for x in to_remap:
# Then update the location of the actual feature
__update_feature_location(x, feature, protein2dna)
if interpro:
for y in ("status", "Target"):
try:
del x.qualifiers[y]
except:
pass
fixed_features.append(x)
replacement_features.extend(fixed_features)
# We do this so we don't include the original set of features that we
# were rebasing against in our result.
rec.features = replacement_features
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"parent", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument(
"child",
type=argparse.FileType("r"),
help="Child GFF3 annotations to rebase against parent",
)
parser.add_argument(
"--interpro", action="store_true", help="Interpro specific modifications"
)
parser.add_argument(
"--protein2dna",
action="store_true",
help="Map protein translated results to original DNA data",
)
parser.add_argument("--map_by", help="Map by key", default="ID")
args = parser.parse_args()
rebase(**vars(args))
|
TAMU-CPT/galaxy-tools
|
tools/gff3/gff3_rebase.py
|
Python
|
gpl-3.0
| 4,407
| 0.000908
|
from __future__ import unicode_literals
import binascii
from pymacaroons import Caveat
from pymacaroons.utils import (
convert_to_bytes,
sign_first_party_caveat
)
from .base_first_party import (
BaseFirstPartyCaveatDelegate,
BaseFirstPartyCaveatVerifierDelegate
)
class FirstPartyCaveatDelegate(BaseFirstPartyCaveatDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatDelegate, self).__init__(*args, **kwargs)
def add_first_party_caveat(self, macaroon, predicate, **kwargs):
predicate = convert_to_bytes(predicate)
caveat = Caveat(caveat_id=convert_to_bytes(predicate))
macaroon.caveats.append(caveat)
encode_key = binascii.unhexlify(macaroon.signature_bytes)
macaroon.signature = sign_first_party_caveat(encode_key, predicate)
return macaroon
class FirstPartyCaveatVerifierDelegate(BaseFirstPartyCaveatVerifierDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatVerifierDelegate, self).__init__(*args, **kwargs)
def verify_first_party_caveat(self, verifier, caveat, signature):
predicate = caveat.caveat_id
caveat_met = sum(callback(predicate)
for callback in verifier.callbacks)
return caveat_met
def update_signature(self, signature, caveat):
return binascii.unhexlify(
sign_first_party_caveat(
signature,
caveat._caveat_id
)
)
|
illicitonion/pymacaroons
|
pymacaroons/caveat_delegates/first_party.py
|
Python
|
mit
| 1,498
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#HW3 for EECS 598 Motion Planning
import time
import openravepy
import userdefined as us
import kdtree
import transformationFunction as tf
from random import randrange
#### YOUR IMPORTS GO HERE ####
handles = [];
#### END OF YOUR IMPORTS ####
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env,robot):
with env:
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
robot.SetActiveDOFValues([1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]);
robot.GetController().SetDesired(robot.GetDOFValues());
waitrobot(robot)
def stringToFloatList(path):
path = path.split('\n')
for line in xrange(len(path)):
path[line] = path[line].split(',')
for i in xrange(len(path[line])):
path[line][i]=float(path[line][i])
return path
def drawPath(path,robot,color,size):
if type(path) is str: path = stringToFloatList(path)
for i in path:
robot.SetActiveDOFValues(i)
handles.append(env.plot3(points=robot.GetTransform()[0:3,3],pointsize=size,colors=color,drawstyle=1))
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
collisionChecker = RaveCreateCollisionChecker(env,'ode')
env.SetCollisionChecker(collisionChecker)
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('env/bitreequad.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
robot.SetActiveDOFs([],DOFAffine.X|DOFAffine.Y|DOFAffine.Z|DOFAffine.RotationQuat)
# print robot.GetActiveDOFValues()
# raw_input("Press enter to move robot...")
# qt = tf.quaternion_from_euler(0.5,0.5,0.75,'rzxz')
# startconfig = [4.0,-1.5 ,0.2] + list(qt)
# print startconfig
startconfig = [ 4.0,-1.5 ,0.2 ,0.0, 0.0, 0.0 ];
robot.SetActiveDOFValues(us.E2Q(startconfig));
# robot.GetController().SetDesired(robot.GetDOFValues());
# waitrobot(robot);
waitrobot(robot)
print "test update state"
# s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
avf = 1.85*9.8/4
u = [-0.5*avf,2*avf,-0.5*avf,3*avf]
ts = 0.02
t = range(0,100)
while 1:
s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
for tt in t:
s2 = us.updateState(s2,u,ts)
x1 = array(s2[0:3])
v1 = array(s2[3:6])
Q1 = array(s2[6:10])
W1 = array(s2[10:13])
E1 = tf.euler_from_quaternion(Q1)
C = list(x1)+list(Q1)
robot.SetActiveDOFValues(C);
time.sleep(0.02)
# traj = RaveCreateTrajectory(env,'');
# config = robot.GetActiveConfigurationSpecification('linear');
# config.AddDeltaTimeGroup();
# traj.Init(config);
# # myPath = [ [point.x, point.y,point.theta,i*0.01] for i,point in enumerate(path) ];
# num = 0
# for pathNode in path:
# num += 1
# traj.Insert(num,pathNode,config,True)
# # for i ,wayPoint in enumerate(myPath):
# # traj.Insert(i,wayPoint,config,True);
# robot.GetController().SetPath(traj);
# # robot.GetController().SetPath(traj)
### END OF YOUR CODE ###
raw_input("Press enter to exit...")
|
willsirius/DualTreeRRTStartMotionPlanning
|
pythonVision2/HW3_testUpdateFunction.py
|
Python
|
mit
| 3,774
| 0.023317
|
import sys
import os
import struct
import binascii
from time import sleep
from ctypes import (CDLL, get_errno)
from ctypes.util import find_library
from socket import (socket, AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI, SOL_HCI, HCI_FILTER,)
os.system("hciconfig hci0 down")
os.system("hciconfig hci0 up")
if not os.geteuid() == 0:
sys.exit("script only works as root")
btlib = find_library("bluetooth")
if not btlib:
raise Exception(
"Can't find required bluetooth libraries"
" (need to install bluez)"
)
bluez = CDLL(btlib, use_errno=True)
dev_id = bluez.hci_get_route(None)
sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
sock.bind((dev_id,))
err = bluez.hci_le_set_scan_parameters(sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000);
if err < 0:
raise Exception("Set scan parameters failed")
# occurs when scanning is still enabled from previous call
# allows LE advertising events
hci_filter = struct.pack(
"<IQH",
0x00000010,
0x4000000000000000,
0
)
sock.setsockopt(SOL_HCI, HCI_FILTER, hci_filter)
err = bluez.hci_le_set_scan_enable(
sock.fileno(),
1, # 1 - turn on; 0 - turn off
0, # 0-filtering disabled, 1-filter out duplicates
1000 # timeout
)
if err < 0:
errnum = get_errno()
raise Exception("{} {}".format(
errno.errorcode[errnum],
os.strerror(errnum)
))
distanceAway = 1 # distance away from the estimote beacon in meter
with open("RSSI_data" + str(distanceAway) + ".csv","w") as out_file:
for x in range (1,100):
data = sock.recv(1024)
RSSI = int(binascii.b2a_hex(data[-1]),16)-255
out_string = ""
out_string += str(RSSI)
out_string += "\n"
out_file.write(out_string)
sock.close()
sys.exit()
|
jsantoso91/smartlighting
|
characterizeRSSI.py
|
Python
|
mit
| 1,721
| 0.012783
|
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nessusobject import NessusObject
class Scanner(NessusObject):
"""
A Nessus Scan Template instance.
Attributes:
_Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
def __init__(self, server):
"""Constructor"""
super(Scanner, self).__init__(server)
self._id = None
self._uuid = None
self._name = None
self._type = None
self._status = None
self._scan_count = 0
self._engine_version = None
self._platform = None
self._loaded_plugin_set = None
self._registration_code = None
self._owner = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = str(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = str(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = str(value)
@property
def scan_count(self):
return self._scan_count
@scan_count.setter
def scan_count(self, value):
self._scan_count = int(value)
@property
def engine_version(self):
return self._engine_version
@engine_version.setter
def engine_version(self, value):
self._engine_version = str(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = str(value)
@property
def loaded_plugin_set(self):
return self._loaded_plugin_set
@loaded_plugin_set.setter
def loaded_plugin_set(self, value):
self._loaded_plugin_set = str(value)
@property
def registration_code(self):
return self._registration_code
@registration_code.setter
def registration_code(self, value):
self._registration_code = str(value)
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = str(value)
|
QKaiser/pynessus
|
pynessus/models/scanner.py
|
Python
|
apache-2.0
| 2,984
| 0.001005
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package workspace
# Module caffe2.python.workspace
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
from google.protobuf.message import Message
from multiprocessing import Process
import os
from collections import defaultdict
import logging
import numpy as np
from past.builtins import basestring
import shutil
import socket
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import scope, utils
import caffe2.python._import_c_extension as C
logger = logging.getLogger(__name__)
Blobs = C.blobs
CreateBlob = C.create_blob
CurrentWorkspace = C.current_workspace
DeserializeBlob = C.deserialize_blob
GlobalInit = C.global_init
HasBlob = C.has_blob
RegisteredOperators = C.registered_operators
SerializeBlob = C.serialize_blob
SwitchWorkspace = C.switch_workspace
RootFolder = C.root_folder
Workspaces = C.workspaces
BenchmarkNet = C.benchmark_net
GetStats = C.get_stats
operator_tracebacks = defaultdict(dict)
is_asan = C.is_asan
has_gpu_support = C.has_gpu_support
if has_gpu_support:
NumCudaDevices = C.num_cuda_devices
SetDefaultGPUID = C.set_default_gpu_id
GetDefaultGPUID = C.get_default_gpu_id
GetCUDAVersion = C.get_cuda_version
GetCuDNNVersion = C.get_cudnn_version
def GetCudaPeerAccessPattern():
return np.asarray(C.get_cuda_peer_access_pattern())
GetDeviceProperties = C.get_device_properties
else:
NumCudaDevices = lambda: 0 # noqa
SetDefaultGPUID = lambda x: None # noqa
GetDefaultGPUID = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCudaPeerAccessPattern = lambda: np.array([]) # noqa
GetDeviceProperties = lambda x: None # noqa
def _GetFreeFlaskPort():
"""Get a free flask port."""
# We will prefer to use 5000. If not, we will then pick a random port.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 5000))
if result == 0:
return 5000
else:
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
# Race condition: between the interval we close the socket and actually
# start a mint process, another process might have occupied the port. We
# don't do much here as this is mostly for convenience in research
# rather than 24x7 service.
return port
def StartMint(root_folder=None, port=None):
"""Start a mint instance.
TODO(Yangqing): this does not work well under ipython yet. According to
https://github.com/ipython/ipython/issues/5862
writing up some fix is a todo item.
"""
from caffe2.python.mint import app
if root_folder is None:
# Get the root folder from the current workspace
root_folder = C.root_folder()
if port is None:
port = _GetFreeFlaskPort()
process = Process(
target=app.main,
args=(
['-p', str(port), '-r', root_folder],
)
)
process.start()
print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))
return process
def StringifyProto(obj):
"""Stringify a protocol buffer object.
Inputs:
obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()
function.
Outputs:
string: the output protobuf string.
Raises:
AttributeError: if the passed in object does not have the right attribute.
"""
if isinstance(obj, basestring):
return obj
else:
if isinstance(obj, Message):
# First, see if this object is a protocol buffer, which we can
# simply serialize with the SerializeToString() call.
return obj.SerializeToString()
elif hasattr(obj, 'Proto'):
return obj.Proto().SerializeToString()
else:
raise ValueError("Unexpected argument to StringifyProto of type " +
type(obj).__name__)
def ResetWorkspace(root_folder=None):
if root_folder is None:
# Reset the workspace, but keep the current root folder setting.
return C.reset_workspace(C.root_folder())
else:
if not os.path.exists(root_folder):
os.makedirs(root_folder)
return C.reset_workspace(root_folder)
def CreateNet(net, overwrite=False, input_blobs=None):
if input_blobs is None:
input_blobs = []
for input_blob in input_blobs:
C.create_blob(input_blob)
return CallWithExceptionIntercept(
C.create_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
def Predictor(init_net, predict_net):
return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))
def GetOperatorCost(operator, blobs):
return C.get_operator_cost(StringifyProto(operator), blobs)
def RunOperatorOnce(operator):
return C.run_operator_once(StringifyProto(operator))
def RunOperatorsOnce(operators):
for op in operators:
success = RunOperatorOnce(op)
if not success:
return False
return True
def CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
op_id = op_id_fetcher()
net_tracebacks = operator_tracebacks.get(net_name, None)
print("Traceback for operator {} in network {}".format(op_id, net_name))
if net_tracebacks and op_id in net_tracebacks:
tb = net_tracebacks[op_id]
for line in tb:
print(':'.join(map(str, line)))
raise
def RunNetOnce(net):
return CallWithExceptionIntercept(
C.run_net_once,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net),
)
def RunNet(name, num_iter=1, allow_fail=False):
"""Runs a given net.
Inputs:
name: the name of the net, or a reference to the net.
num_iter: number of iterations to run
allow_fail: if True, does not assert on net exec failure but returns False
Returns:
True or an exception.
"""
return CallWithExceptionIntercept(
C.run_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(name),
StringifyNetName(name), num_iter, allow_fail,
)
def RunPlan(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan(StringifyProto(plan_or_step))
def InferShapesAndTypes(nets, blob_dimensions=None):
"""Infers the shapes and types for the specified nets.
Inputs:
nets: the list of nets
blob_dimensions (optional): a dictionary of blobs and their dimensions.
If not specified, the workspace blobs are used.
Returns:
A tuple of (shapes, types) dictionaries keyed by blob name.
"""
net_protos = [StringifyProto(n.Proto()) for n in nets]
if blob_dimensions is None:
blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)
else:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions
)
blobdesc_proto = caffe2_pb2.TensorShapes()
blobdesc_proto.ParseFromString(blobdesc_prototxt)
shapes = {}
types = {}
for ts in blobdesc_proto.shapes:
if not ts.unknown_shape:
shapes[ts.name] = list(ts.dims)
types[ts.name] = ts.data_type
return (shapes, types)
def _StringifyName(name, expected_type):
if isinstance(name, basestring):
return name
assert type(name).__name__ == expected_type, \
"Expected a string or %s" % expected_type
return str(name)
def StringifyBlobName(name):
return _StringifyName(name, "BlobReference")
def StringifyNetName(name):
return _StringifyName(name, "Net")
def GetNetName(net):
if isinstance(net, basestring):
return net
if type(net).__name__ == "Net":
return net.Name()
if isinstance(net, caffe2_pb2.NetDef):
return net.name
raise Exception("Not a Net object: {}".format(str(net)))
def FeedBlob(name, arr, device_option=None):
"""Feeds a blob into the workspace.
Inputs:
name: the name of the blob.
arr: either a TensorProto object or a numpy array object to be fed into
the workspace.
device_option (optional): the device option to feed the data with.
Returns:
True or False, stating whether the feed is successful.
"""
if type(arr) is caffe2_pb2.TensorProto:
arr = utils.Caffe2TensorToNumpyArray(arr)
if type(arr) is np.ndarray and arr.dtype.kind in 'SU':
# Plain NumPy strings are weird, let's use objects instead
arr = arr.astype(np.object)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option and device_option.device_type == caffe2_pb2.CUDA:
if arr.dtype == np.dtype('float64'):
logger.warning(
"CUDA operators do not support 64-bit doubles, " +
"please use arr.astype(np.float32) or np.int32 for ints." +
" Blob: {}".format(name) +
" type: {}".format(str(arr.dtype))
)
name = StringifyBlobName(name)
if device_option is not None:
return C.feed_blob(name, arr, StringifyProto(device_option))
else:
return C.feed_blob(name, arr)
def FetchBlobs(names):
"""Fetches a list of blobs from the workspace.
Inputs:
names: list of names of blobs - strings or BlobReferences
Returns:
list of fetched blobs
"""
return [FetchBlob(name) for name in names]
def FetchBlob(name):
"""Fetches a blob from the workspace.
Inputs:
name: the name of the blob - a string or a BlobReference
Returns:
Fetched blob (numpy array or string) if successful
"""
return C.fetch_blob(StringifyBlobName(name))
def ApplyTransform(transform_key, net):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
"""
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef, only if it runs faster than the original.
The runs are performed on the current active workspace (gWorkspace).
You should initialize that workspace before making a call to this function.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
init_net: The net to initialize the workspace.
warmup_runs (optional):
Determines how many times the net is run before testing.
Will be 5 by default.
main_runs (optional):
Determines how many times the net is run during testing.
Will be 10 by default.
improvement_threshold (optional):
Determines the factor which the new net needs to be faster
in order to replace the old. Will be 1.01 by default.
Returns:
Either a Transformed NetDef protobuf object, or the original netdef.
"""
warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5
main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10
improvement_threshold = kwargs['improvement_threshold'] \
if 'improvement_threshold' in kwargs else 1.01
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform_if_faster(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
init_net.SerializeToString(),
warmup_runs,
main_runs,
float(improvement_threshold),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def GetNameScope():
"""Return the current namescope string. To be used to fetch blobs"""
return scope.CurrentNameScope()
class _BlobDict(object):
"""Provides python dict compatible way to do fetching and feeding"""
def __getitem__(self, key):
return FetchBlob(key)
def __setitem__(self, key, value):
return FeedBlob(key, value)
def __len__(self):
return len(C.blobs())
def __iter__(self):
return C.blobs().__iter__()
def __contains__(self, item):
return C.has_blob(item)
blobs = _BlobDict()
################################################################################
# Utilities for immediate mode
#
# Caffe2's immediate mode implements the following behavior: between the two
# function calls StartImmediate() and StopImmediate(), for any operator that is
# called through CreateOperator(), we will also run that operator in a workspace
# that is specific to the immediate mode. The user is explicitly expected to
# make sure that these ops have proper inputs and outputs, i.e. one should not
# run an op where an external input is not created or fed.
#
# Users can use FeedImmediate() and FetchImmediate() to interact with blobs
# in the immediate workspace.
#
# Once StopImmediate() is called, all contents in the immediate workspace is
# freed up so one can continue using normal runs.
#
# The immediate mode is solely for debugging purposes and support will be very
# sparse.
################################################################################
_immediate_mode = False
_immediate_workspace_name = "_CAFFE2_IMMEDIATE"
_immediate_root_folder = ''
def IsImmediate():
return _immediate_mode
@contextlib.contextmanager
def WorkspaceGuard(workspace_name):
current = CurrentWorkspace()
SwitchWorkspace(workspace_name, True)
yield
SwitchWorkspace(current)
def StartImmediate(i_know=False):
global _immediate_mode
global _immediate_root_folder
if IsImmediate():
# already in immediate mode. We will kill the previous one
# and start from fresh.
StopImmediate()
_immediate_mode = True
with WorkspaceGuard(_immediate_workspace_name):
_immediate_root_folder = tempfile.mkdtemp()
ResetWorkspace(_immediate_root_folder)
if i_know:
# if the user doesn't want to see the warning message, sure...
return
print("""
Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL
feature and may very easily go wrong. This is because Caffe2 uses a
declarative way of defining operators and models, which is essentially
not meant to run things in an interactive way. Read the following carefully
to make sure that you understand the caveats.
(1) You need to make sure that the sequences of operators you create are
actually runnable sequentially. For example, if you create an op that takes
an input X, somewhere earlier you should have already created X.
(2) Caffe2 immediate uses one single workspace, so if the set of operators
you run are intended to be under different workspaces, they will not run.
To create boundaries between such use cases, you can call FinishImmediate()
and StartImmediate() manually to flush out everything no longer needed.
(3) Underlying objects held by the immediate mode may interfere with your
normal run. For example, if there is a leveldb that you opened in immediate
mode and did not close, your main run will fail because leveldb does not
support double opening. Immediate mode may also occupy a lot of memory esp.
on GPUs. Call FinishImmediate() as soon as possible when you no longer
need it.
(4) Immediate is designed to be slow. Every immediate call implicitly
creates a temp operator object, runs it, and destroys the operator. This
slow-speed run is by design to discourage abuse. For most use cases other
than debugging, do NOT turn on immediate mode.
(5) If there is anything FATAL happening in the underlying C++ code, the
immediate mode will immediately (pun intended) cause the runtime to crash.
Thus you should use immediate mode with extra care. If you still would
like to, have fun [https://xkcd.com/149/].
""")
def StopImmediate():
"""Stops an immediate mode run."""
# Phew, that was a dangerous ride.
global _immediate_mode
global _immediate_root_folder
if not IsImmediate():
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False
def ImmediateBlobs():
with WorkspaceGuard(_immediate_workspace_name):
return Blobs()
def RunOperatorImmediate(op):
with WorkspaceGuard(_immediate_workspace_name):
RunOperatorOnce(op)
def FetchImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FetchBlob(*args, **kwargs)
def FeedImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FeedBlob(*args, **kwargs)
# CWorkspace utilities
def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):
return CallWithExceptionIntercept(
ws._create_net,
ws._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
C.Workspace.create_net = _Workspace_create_net_with_exception_intercept
def _Workspace_run(ws, obj):
if hasattr(obj, 'Proto'):
obj = obj.Proto()
if isinstance(obj, caffe2_pb2.PlanDef):
return ws._run_plan(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.NetDef):
return CallWithExceptionIntercept(
ws._run_net,
ws._last_failed_op_net_position,
GetNetName(obj),
obj.SerializeToString(),
)
# return ws._run_net(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.OperatorDef):
return ws._run_operator(obj.SerializeToString())
raise ValueError(
"Don't know how to do Workspace.run() on {}".format(type(obj)))
C.Workspace.run = _Workspace_run
def _Blob_feed(blob, arg, device_option=None):
if device_option is not None:
device_option = StringifyProto(device_option)
return blob._feed(arg, device_option)
C.Blob.feed = _Blob_feed
|
davinwang/caffe2
|
caffe2/python/workspace.py
|
Python
|
apache-2.0
| 19,553
| 0.001074
|
match: case = 42
|
smmribeiro/intellij-community
|
python/testData/psi/PatternMatchingAnnotatedAssignmentLooksLikeIncompleteMatchStatement.py
|
Python
|
apache-2.0
| 17
| 0
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os
import glob
# pylint: disable=E0611
from setuptools import setup
VERSION = open('VERSION', 'r').read().strip()
VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ
def get_dir(system_path=None, virtual_path=None):
"""
Retrieve VIRTUAL_ENV friendly path
:param system_path: Relative system path
:param virtual_path: Overrides system_path for virtual_env only
:return: VIRTUAL_ENV friendly path
"""
if virtual_path is None:
virtual_path = system_path
if VIRTUAL_ENV:
if virtual_path is None:
virtual_path = []
return os.path.join(*virtual_path)
else:
if system_path is None:
system_path = []
return os.path.join(*(['/'] + system_path))
def get_data_files():
def add_files(level=[]):
installed_location = ['usr', 'share', 'avocado-plugins-vt']
installed_location += level
level_str = '/'.join(level)
if level_str:
level_str += '/'
file_glob = '%s*' % level_str
files_found = [path for path in glob.glob(file_glob) if
os.path.isfile(path)]
return [((get_dir(installed_location, level)), files_found)]
data_files = [(get_dir(['etc', 'avocado', 'conf.d']),
['etc/avocado/conf.d/vt.conf'])]
data_files += [(get_dir(['usr', 'share', 'avocado-plugins-vt',
'test-providers.d']),
glob.glob('test-providers.d/*'))]
data_files_dirs = ['backends', 'shared']
for data_file_dir in data_files_dirs:
for root, dirs, files in os.walk(data_file_dir):
for subdir in dirs:
rt = root.split('/')
rt.append(subdir)
data_files += add_files(rt)
return data_files
setup(name='avocado-plugins-vt',
version=VERSION,
description='Avocado Virt Test Compatibility Layer plugin',
author='Avocado Developers',
author_email='avocado-devel@redhat.com',
url='http://github.com/avocado-framework/avocado-vt',
packages=['avocado_vt',
'avocado_vt.plugins',
'virttest',
'virttest.libvirt_xml',
'virttest.libvirt_xml.devices',
'virttest.libvirt_xml.nwfilter_protocols',
'virttest.qemu_devices',
'virttest.remote_commander',
'virttest.staging',
'virttest.staging.backports',
'virttest.tests',
'virttest.unittest_utils',
'virttest.utils_test',
'virttest.utils_test.qemu'],
package_data={"virttest": ["*.*"]},
data_files=get_data_files(),
entry_points={
'avocado.plugins.cli': [
'vt-list = avocado_vt.plugins.vt_list:VTLister',
'vt = avocado_vt.plugins.vt:VTRun',
],
'avocado.plugins.cli.cmd': [
'vt-bootstrap = avocado_vt.plugins.vt_bootstrap:VTBootstrap',
],
'avocado.plugins.job.prepost': [
'vt-joblock = avocado_vt.plugins.vt_joblock:VTJobLock'
],
},
)
|
CongLi/avocado-vt
|
setup.py
|
Python
|
gpl-2.0
| 3,742
| 0
|
# -*- coding:utf-8 -*-
from django.urls import path
from article.views.post import PostListApiView, PostCreateApiView, PostDetailApiView
urlpatterns = [
# 前缀:/api/v1/article/post/
path('create', PostCreateApiView.as_view(), name="create"),
path('list', PostListApiView.as_view(), name="list"),
path('<int:pk>', PostDetailApiView.as_view(), name="detail"),
]
|
codelieche/codelieche.com
|
apps/article/urls/api/post.py
|
Python
|
mit
| 382
| 0.00266
|
# Copyright (c) 2008 David Aguilar
# Copyright (c) 2015 Daniel Harding
"""Provides an filesystem monitoring for Linux (via inotify) and for Windows
(via pywin32 and the ReadDirectoryChanges function)"""
from __future__ import division, absolute_import, unicode_literals
import errno
import os
import os.path
import select
from threading import Lock
from . import utils
from . import version
from .decorators import memoize
AVAILABLE = None
if utils.is_win32():
try:
import pywintypes
import win32con
import win32event
import win32file
except ImportError:
pass
else:
AVAILABLE = 'pywin32'
elif utils.is_linux():
try:
from . import inotify
except ImportError:
pass
else:
AVAILABLE = 'inotify'
from qtpy import QtCore
from qtpy.QtCore import Signal
from . import core
from . import gitcfg
from . import gitcmds
from .compat import bchr
from .git import git
from .i18n import N_
from .interaction import Interaction
class _Monitor(QtCore.QObject):
files_changed = Signal()
def __init__(self, thread_class):
QtCore.QObject.__init__(self)
self._thread_class = thread_class
self._thread = None
def start(self):
if self._thread_class is not None:
assert self._thread is None
self._thread = self._thread_class(self)
self._thread.start()
def stop(self):
if self._thread_class is not None:
assert self._thread is not None
self._thread.stop()
self._thread.wait()
self._thread = None
def refresh(self):
if self._thread is not None:
self._thread.refresh()
class _BaseThread(QtCore.QThread):
#: The delay, in milliseconds, between detecting file system modification
#: and triggering the 'files_changed' signal, to coalesce multiple
#: modifications into a single signal.
_NOTIFICATION_DELAY = 888
def __init__(self, monitor):
QtCore.QThread.__init__(self)
self._monitor = monitor
self._running = True
self._use_check_ignore = version.check('check-ignore',
version.git_version())
self._force_notify = False
self._file_paths = set()
@property
def _pending(self):
return self._force_notify or self._file_paths
def refresh(self):
"""Do any housekeeping necessary in response to repository changes."""
pass
def notify(self):
"""Notifies all observers"""
do_notify = False
if self._force_notify:
do_notify = True
elif self._file_paths:
proc = core.start_command(['git', 'check-ignore', '--verbose',
'--non-matching', '-z', '--stdin'])
path_list = bchr(0).join(core.encode(path)
for path in self._file_paths)
out, err = proc.communicate(path_list)
if proc.returncode:
do_notify = True
else:
# Each output record is four fields separated by NULL
# characters (records are also separated by NULL characters):
# <source> <NULL> <linenum> <NULL> <pattern> <NULL> <pathname>
# For paths which are not ignored, all fields will be empty
# except for <pathname>. So to see if we have any non-ignored
# files, we simply check every fourth field to see if any of
# them are empty.
source_fields = out.split(bchr(0))[0:-1:4]
do_notify = not all(source_fields)
self._force_notify = False
self._file_paths = set()
if do_notify:
self._monitor.files_changed.emit()
@staticmethod
def _log_enabled_message():
msg = N_('File system change monitoring: enabled.\n')
Interaction.safe_log(msg)
if AVAILABLE == 'inotify':
class _InotifyThread(_BaseThread):
_TRIGGER_MASK = (
inotify.IN_ATTRIB |
inotify.IN_CLOSE_WRITE |
inotify.IN_CREATE |
inotify.IN_DELETE |
inotify.IN_MODIFY |
inotify.IN_MOVED_FROM |
inotify.IN_MOVED_TO
)
_ADD_MASK = (
_TRIGGER_MASK |
inotify.IN_EXCL_UNLINK |
inotify.IN_ONLYDIR
)
def __init__(self, monitor):
_BaseThread.__init__(self, monitor)
worktree = git.worktree()
if worktree is not None:
worktree = core.abspath(worktree)
self._worktree = worktree
self._git_dir = git.git_path()
self._lock = Lock()
self._inotify_fd = None
self._pipe_r = None
self._pipe_w = None
self._worktree_wd_to_path_map = {}
self._worktree_path_to_wd_map = {}
self._git_dir_wd_to_path_map = {}
self._git_dir_path_to_wd_map = {}
self._git_dir_wd = None
@staticmethod
def _log_out_of_wds_message():
msg = N_('File system change monitoring: disabled because the'
' limit on the total number of inotify watches was'
' reached. You may be able to increase the limit on'
' the number of watches by running:\n'
'\n'
' echo fs.inotify.max_user_watches=100000 |'
' sudo tee -a /etc/sysctl.conf &&'
' sudo sysctl -p\n')
Interaction.safe_log(msg)
def run(self):
try:
with self._lock:
self._inotify_fd = inotify.init()
self._pipe_r, self._pipe_w = os.pipe()
poll_obj = select.poll()
poll_obj.register(self._inotify_fd, select.POLLIN)
poll_obj.register(self._pipe_r, select.POLLIN)
self.refresh()
self._log_enabled_message()
while self._running:
if self._pending:
timeout = self._NOTIFICATION_DELAY
else:
timeout = None
try:
events = poll_obj.poll(timeout)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except select.error:
continue
else:
if not self._running:
break
elif not events:
self.notify()
else:
for fd, event in events:
if fd == self._inotify_fd:
self._handle_events()
finally:
with self._lock:
if self._inotify_fd is not None:
os.close(self._inotify_fd)
self._inotify_fd = None
if self._pipe_r is not None:
os.close(self._pipe_r)
self._pipe_r = None
os.close(self._pipe_w)
self._pipe_w = None
def refresh(self):
with self._lock:
if self._inotify_fd is None:
return
try:
if self._worktree is not None:
tracked_dirs = set(
os.path.dirname(os.path.join(self._worktree,
path))
for path in gitcmds.tracked_files())
self._refresh_watches(tracked_dirs,
self._worktree_wd_to_path_map,
self._worktree_path_to_wd_map)
git_dirs = set()
git_dirs.add(self._git_dir)
for dirpath, dirnames, filenames in core.walk(
os.path.join(self._git_dir, 'refs')):
git_dirs.add(dirpath)
self._refresh_watches(git_dirs,
self._git_dir_wd_to_path_map,
self._git_dir_path_to_wd_map)
self._git_dir_wd = \
self._git_dir_path_to_wd_map[self._git_dir]
except OSError as e:
if e.errno == errno.ENOSPC:
self._log_out_of_wds_message()
self._running = False
else:
raise
def _refresh_watches(self, paths_to_watch, wd_to_path_map,
path_to_wd_map):
watched_paths = set(path_to_wd_map)
for path in watched_paths - paths_to_watch:
wd = path_to_wd_map.pop(path)
wd_to_path_set.pop(wd)
try:
inotify.rm_watch(self._inotify_fd, wd)
except OSError as e:
if e.errno == errno.EINVAL:
# This error can occur if the target of the wd was
# removed on the filesystem before we call
# inotify.rm_watch() so ignore it.
pass
else:
raise
for path in paths_to_watch - watched_paths:
try:
wd = inotify.add_watch(self._inotify_fd, core.encode(path),
self._ADD_MASK)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
# These two errors should only occur as a result of
# race conditions: the first if the directory
# referenced by path was removed or renamed before the
# call to inotify.add_watch(); the second if the
# directory referenced by path was replaced with a file
# before the call to inotify.add_watch(). Therefore we
# simply ignore them.
pass
else:
raise
else:
wd_to_path_map[wd] = path
path_to_wd_map[path] = wd
def _check_event(self, wd, mask, name):
if mask & inotify.IN_Q_OVERFLOW:
self._force_notify = True
elif not mask & self._TRIGGER_MASK:
pass
elif mask & inotify.IN_ISDIR:
pass
elif wd in self._worktree_wd_to_path_map:
if self._use_check_ignore:
self._file_paths.add(
os.path.join(self._worktree_wd_to_path_map[wd],
core.decode(name)))
else:
self._force_notify = True
elif wd == self._git_dir_wd:
name = core.decode(name)
if name == 'HEAD' or name == 'index':
self._force_notify = True
elif (wd in self._git_dir_wd_to_path_map
and not core.decode(name).endswith('.lock')):
self._force_notify = True
def _handle_events(self):
for wd, mask, cookie, name in \
inotify.read_events(self._inotify_fd):
if not self._force_notify:
self._check_event(wd, mask, name)
def stop(self):
self._running = False
with self._lock:
if self._pipe_w is not None:
os.write(self._pipe_w, bchr(0))
self.wait()
if AVAILABLE == 'pywin32':
class _Win32Watch(object):
def __init__(self, path, flags):
self.flags = flags
self.handle = None
self.event = None
try:
self.handle = win32file.CreateFileW(
path,
0x0001, # FILE_LIST_DIRECTORY
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.buffer = win32file.AllocateReadBuffer(8192)
self.event = win32event.CreateEvent(None, True, False, None)
self.overlapped = pywintypes.OVERLAPPED()
self.overlapped.hEvent = self.event
self._start()
except:
self.close()
raise
def _start(self):
win32file.ReadDirectoryChangesW(self.handle, self.buffer, True,
self.flags, self.overlapped)
def read(self):
if win32event.WaitForSingleObject(self.event, 0) \
== win32event.WAIT_TIMEOUT:
result = []
else:
nbytes = win32file.GetOverlappedResult(self.handle,
self.overlapped, False)
result = win32file.FILE_NOTIFY_INFORMATION(self.buffer, nbytes)
self._start()
return result
def close(self):
if self.handle is not None:
win32file.CancelIo(self.handle)
win32file.CloseHandle(self.handle)
if self.event is not None:
win32file.CloseHandle(self.event)
class _Win32Thread(_BaseThread):
_FLAGS = (win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY)
def __init__(self, monitor):
_BaseThread.__init__(self, monitor)
worktree = git.worktree()
if worktree is not None:
worktree = self._transform_path(core.abspath(worktree))
self._worktree = worktree
self._worktree_watch = None
self._git_dir = self._transform_path(core.abspath(git.git_path()))
self._git_dir_watch = None
self._stop_event_lock = Lock()
self._stop_event = None
@staticmethod
def _transform_path(path):
return path.replace('\\', '/').lower()
def _read_watch(self, watch):
if win32event.WaitForSingleObject(watch.event, 0) \
== win32event.WAIT_TIMEOUT:
nbytes = 0
else:
nbytes = win32file.GetOverlappedResult(watch.handle,
watch.overlapped, False)
return win32file.FILE_NOTIFY_INFORMATION(watch.buffer, nbytes)
def run(self):
try:
with self._stop_event_lock:
self._stop_event = win32event.CreateEvent(None, True,
False, None)
events = [self._stop_event]
if self._worktree is not None:
self._worktree_watch = _Win32Watch(self._worktree,
self._FLAGS)
events.append(self._worktree_watch.event)
self._git_dir_watch = _Win32Watch(self._git_dir, self._FLAGS)
events.append(self._git_dir_watch.event)
self._log_enabled_message()
while self._running:
if self._pending:
timeout = self._NOTIFICATION_DELAY
else:
timeout = win32event.INFINITE
rc = win32event.WaitForMultipleObjects(events, False,
timeout)
if not self._running:
break
elif rc == win32event.WAIT_TIMEOUT:
self.notify()
else:
self._handle_results()
finally:
with self._stop_event_lock:
if self._stop_event is not None:
win32file.CloseHandle(self._stop_event)
self._stop_event = None
if self._worktree_watch is not None:
self._worktree_watch.close()
if self._git_dir_watch is not None:
self._git_dir_watch.close()
def _handle_results(self):
if self._worktree_watch is not None:
for action, path in self._worktree_watch.read():
if not self._running:
break
if self._force_notify:
continue
path = self._worktree + '/' + self._transform_path(path)
if (path != self._git_dir
and not path.startswith(self._git_dir + '/')
and not os.path.isdir(path)
):
if self._use_check_ignore:
self._file_paths.add(path)
else:
self._force_notify = True
for action, path in self._git_dir_watch.read():
if not self._running:
break
if self._force_notify:
continue
path = self._transform_path(path)
if path.endswith('.lock'):
continue
if (path == 'head'
or path == 'index'
or path.startswith('refs/')
):
self._force_notify = True
def stop(self):
self._running = False
with self._stop_event_lock:
if self._stop_event is not None:
win32event.SetEvent(self._stop_event)
self.wait()
@memoize
def current():
return _create_instance()
def _create_instance():
thread_class = None
cfg = gitcfg.current()
if not cfg.get('cola.inotify', True):
msg = N_('File system change monitoring: disabled because'
' "cola.inotify" is false.\n')
Interaction.log(msg)
elif AVAILABLE == 'inotify':
thread_class = _InotifyThread
elif AVAILABLE == 'pywin32':
thread_class = _Win32Thread
else:
if utils.is_win32():
msg = N_('File system change monitoring: disabled because pywin32'
' is not installed.\n')
Interaction.log(msg)
elif utils.is_linux():
msg = N_('File system change monitoring: disabled because libc'
' does not support the inotify system calls.\n')
Interaction.log(msg)
return _Monitor(thread_class)
|
dirtycold/git-cola
|
cola/fsmonitor.py
|
Python
|
gpl-2.0
| 19,637
| 0.000662
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
def blocks_match(sess, use_peephole):
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 4
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
stacked_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtypes.float32)
wcf = variable_scope.get_variable(
"rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtypes.float32)
wco = variable_scope.get_variable(
"rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtypes.float32)
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
basic_cell = rnn_cell.LSTMCell(
cell_size, use_peepholes=use_peephole, state_is_tuple=True, reuse=True)
basic_outputs_op, basic_state_op = rnn.static_rnn(
basic_cell, inputs, dtype=dtypes.float32)
if use_peephole:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
else:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=0)
fused_cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=use_peephole, reuse=True,
name="rnn/lstm_cell")
fused_outputs_op, fused_state_op = fused_cell(
stacked_inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])
basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))
block_outputs = sess.run(block_outputs_op)
block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))
block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))
return (basic_state, fused_state, basic_outputs, block_outputs,
fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,
block_wgrads, fused_wgrads)
class LSTMBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell")
fused_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
def testLSTMBasicToBlockCell(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlock(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=False)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=True)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.test_session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
cell_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("lstm_cell", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
variable_scope.get_variable(
"kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
variable_scope.get_variable(
"bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False, reuse=True,
name="lstm_cell")
fused_outputs_op, fused_state_op = cell(
cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
cell_vars = [
v for v in variables.trainable_variables()
if v.name.endswith("kernel") or v.name.endswith("bias")
]
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
unfused_outputs_op = []
state = None
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
array_ops.expand_dims(inp, 0),
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
unfused_outputs_op.append(output[0])
unfused_outputs_op = array_ops.stack(unfused_outputs_op)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])
unfused_grads = sess.run(
gradients_impl.gradients(unfused_outputs_op, inputs))
unfused_wgrads = sess.run(
gradients_impl.gradients(unfused_outputs_op, cell_vars))
fused_outputs, fused_state = sess.run(
[fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(
gradients_impl.gradients(fused_outputs_op, cell_vars))
self.assertAllClose(fused_outputs, unfused_outputs)
self.assertAllClose(fused_state, unfused_state)
self.assertAllClose(fused_grads, unfused_grads)
for fused, unfused in zip(fused_wgrads, unfused_wgrads):
self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
#### Benchmarking.
class BenchmarkLSTMBlock(test.Benchmark):
def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):
print("BlockLSTMCell forward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False]
}):
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
inputs = variable_scope.get_variable(
"x",
[config["time_steps"], config["batch_size"], config["cell_size"]])
cell = lstm_ops.LSTMBlockCell(config["cell_size"])
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtypes.float32)
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(outputs, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["batch_size"], config["cell_size"], config["cell_size"],
config["time_steps"], config["use_gpu"], wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_fprop", "BS%(batch_size)i", "CS%(cell_size)i",
"IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):
print("BlockLSTMCell backward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False]
}):
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
time_steps = config["time_steps"]
batch_size = config["batch_size"]
cell_size = input_size = config["cell_size"]
inputs = variable_scope.get_variable(
"x", [time_steps, batch_size, cell_size],
trainable=False,
dtype=dtypes.float32)
with variable_scope.variable_scope(
"rnn", reuse=variable_scope.AUTO_REUSE):
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockCell(cell_size)
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtypes.float32)
grads = gradients_impl.gradients(outputs, [inputs, w, b])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(grads, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
batch_size, cell_size, cell_size, time_steps, config["use_gpu"],
wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_bprop", "BS%(batch_size)i", "CS%(cell_size)i",
"IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
if __name__ == "__main__":
test.main()
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
|
Python
|
apache-2.0
| 21,832
| 0.0071
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core import FeatureExtractorRegistry
from twinkle.connectors.core import ConnectorRegistry
class FeatureExtractorPipelineFactory(object):
"""
Factory object for creating a pipeline from a file
"""
def __init__(self):
"""
"""
pass
def buildInput(self, config_data):
"""
builds an input from the ConnectorRegistry
"""
input_name = config_data["name"]
input_config = config_data["config"]
return ConnectorRegistry.buildConnector(input_name, input_config)
def buildOutput(self, config_data):
"""
builds na output from the connectorRegister
"""
output_name = config_data["name"]
output_config = config_data["config"]
return ConnectorRegistry.buildConnector(output_name, output_config)
def buildExtractor(self, config_data):
"""
"""
extractor_name = config_data["name"]
extractor_config = config_data["config"]
return FeatureExtractorRegistry.buildExtractor(extractor_name, extractor_config)
def buildFromDictionary(self,config_data):
"""
"""
if "input" not in config_data:
raise Exception("No input source was specified in the configuration data")
if "output" not in config_data:
raise Exception("No output source was specified in the configuration data")
#build input
input_data = config_data["input"]
input = self.buildInput(input_data)
# build output
output_data = config_data["output"]
output = self.buildOutput(output_data)
# create the pipeline
pipeline = FeatureExtractorPipeline(input, output)
# get feature extractors
extractors = config_data["extractors"]
# add each extractor
for extractor_config in extractors:
extractor = self.buildExtractor(extractor_config)
pipeline.addExtractor(extractor)
return pipeline
class FeatureExtractorPipeline(object):
"""
Simple feature extractor pipeline.
Needs a lot of features in the future such as dependency graphs to resolve some of the intermediates
and the ability to do second passes for items which need to be normalized.
"""
def __init__(self, input, output):
self.feature_extractors = []
self.input = input
self.output = output
def addExtractor(self, extractor):
"""
add Extractor to the pipeline
"""
self.feature_extractors.append(extractor)
def run(self):
"""
runs the pipeline
"""
processed_items = []
# iterate through each item
for item in self.input:
item_cookie = { "tweet": item, "text": item.text}
output = {}
# first do preprossing
for extractor in self.feature_extractors:
extractor.extract(item, item_cookie, output)
print output
# write output
self.output.write(output)
|
emCOMP/twinkle
|
twinkle/feature_extraction/pipelines.py
|
Python
|
mit
| 2,688
| 0.034226
|
from setuptools import setup
setup(
# general meta
name='elasticity',
version='0.7',
author='Brian C. Dilley - Flipagram',
author_email='brian@flipagram.com',
description='Python based command line tool for managing ElasticSearch clusters.',
platforms='any',
url='https://github.com/Cheers-Dev/elasticity',
download_url='https://github.com/Cheers-Dev/elasticity',
# packages
packages=[
'elasticity'
],
# dependencies
install_requires=[
'elasticsearch>=1.4.0',
'pyyaml>=3.10'
],
# additional files to include
include_package_data=True,
# the scripts
scripts=['scripts/elasticity'],
# wut?
classifiers=['Intended Audience :: Developers']
)
|
flipagram/elasticity
|
setup.py
|
Python
|
mit
| 752
| 0.00133
|
"""
Adds crowdsourced hinting functionality to lon-capa numerical response problems.
Currently experimental - not for instructor use, yet.
"""
import logging
import json
import random
import copy
from pkg_resources import resource_string
from lxml import etree
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.raw_module import RawDescriptor
from xblock.fields import Scope, String, Integer, Boolean, Dict, List
from capa.responsetypes import FormulaResponse
from django.utils.html import escape
log = logging.getLogger(__name__)
class CrowdsourceHinterFields(object):
"""Defines fields for the crowdsource hinter module."""
has_children = True
moderate = String(help='String "True"/"False" - activates moderation', scope=Scope.content,
default='False')
debug = String(help='String "True"/"False" - allows multiple voting', scope=Scope.content,
default='False')
# Usage: hints[answer] = {str(pk): [hint_text, #votes]}
# hints is a dictionary that takes answer keys.
# Each value is itself a dictionary, accepting hint_pk strings as keys,
# and returning [hint text, #votes] pairs as values
hints = Dict(help='A dictionary containing all the active hints.', scope=Scope.content, default={})
mod_queue = Dict(help='A dictionary containing hints still awaiting approval', scope=Scope.content,
default={})
hint_pk = Integer(help='Used to index hints.', scope=Scope.content, default=0)
# A list of previous hints that a student viewed.
# Of the form [answer, [hint_pk_1, ...]] for each problem.
# Sorry about the variable name - I know it's confusing.
previous_answers = List(help='A list of hints viewed.', scope=Scope.user_state, default=[])
# user_submissions actually contains a list of previous answers submitted.
# (Originally, preivous_answers did this job, hence the name confusion.)
user_submissions = List(help='A list of previous submissions', scope=Scope.user_state, default=[])
user_voted = Boolean(help='Specifies if the user has voted on this problem or not.',
scope=Scope.user_state, default=False)
class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
"""
An Xmodule that makes crowdsourced hints.
Currently, only works on capa problems with exactly one numerical response,
and no other parts.
Example usage:
<crowdsource_hinter>
<problem blah blah />
</crowdsource_hinter>
XML attributes:
-moderate="True" will not display hints until staff approve them in the hint manager.
-debug="True" will let users vote as often as they want.
"""
icon_class = 'crowdsource_hinter'
css = {'scss': [resource_string(__name__, 'css/crowdsource_hinter/display.scss')]}
js = {'coffee': [resource_string(__name__, 'js/src/crowdsource_hinter/display.coffee')],
'js': []}
js_module_name = "Hinter"
def __init__(self, *args, **kwargs):
super(CrowdsourceHinterModule, self).__init__(*args, **kwargs)
# We need to know whether we are working with a FormulaResponse problem.
try:
responder = self.get_display_items()[0].lcp.responders.values()[0]
except (IndexError, AttributeError):
log.exception('Unable to find a capa problem child.')
return
self.is_formula = isinstance(self, FormulaResponse)
if self.is_formula:
self.answer_to_str = self.formula_answer_to_str
else:
self.answer_to_str = self.numerical_answer_to_str
# compare_answer is expected to return whether its two inputs are close enough
# to be equal, or raise a StudentInputError if one of the inputs is malformatted.
if hasattr(responder, 'compare_answer') and hasattr(responder, 'validate_answer'):
self.compare_answer = responder.compare_answer
self.validate_answer = responder.validate_answer
else:
# This response type is not supported!
log.exception('Response type not supported for hinting: ' + str(responder))
def get_html(self):
"""
Puts a wrapper around the problem html. This wrapper includes ajax urls of the
hinter and of the problem.
- Dependent on lon-capa problem.
"""
if self.debug == 'True':
# Reset the user vote, for debugging only!
self.user_voted = False
if self.hints == {}:
# Force self.hints to be written into the database. (When an xmodule is initialized,
# fields are not added to the db until explicitly changed at least once.)
self.hints = {}
try:
child = self.get_display_items()[0]
out = child.render(STUDENT_VIEW).content
# The event listener uses the ajax url to find the child.
child_id = child.id
except IndexError:
out = u"Error in loading crowdsourced hinter - can't find child problem."
child_id = ''
# Wrap the module in a <section>. This lets us pass data attributes to the javascript.
out += u'<section class="crowdsource-wrapper" data-url="{ajax_url}" data-child-id="{child_id}"> </section>'.format(
ajax_url=self.runtime.ajax_url,
child_id=child_id
)
return out
def numerical_answer_to_str(self, answer):
"""
Converts capa numerical answer format to a string representation
of the answer.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def formula_answer_to_str(self, answer):
"""
Converts capa formula answer into a string.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def get_matching_answers(self, answer):
"""
Look in self.hints, and find all answer keys that are "equal with tolerance"
to the input answer.
"""
return [key for key in self.hints if self.compare_answer(key, answer)]
def handle_ajax(self, dispatch, data):
"""
This is the landing method for AJAX calls.
"""
if dispatch == 'get_hint':
out = self.get_hint(data)
elif dispatch == 'get_feedback':
out = self.get_feedback(data)
elif dispatch == 'vote':
out = self.tally_vote(data)
elif dispatch == 'submit_hint':
out = self.submit_hint(data)
else:
return json.dumps({'contents': 'Error - invalid operation.'})
if out is None:
out = {'op': 'empty'}
elif 'error' in out:
# Error in processing.
out.update({'op': 'error'})
else:
out.update({'op': dispatch})
return json.dumps({'contents': self.runtime.render_template('hinter_display.html', out)})
def get_hint(self, data):
"""
The student got the incorrect answer found in data. Give him a hint.
Called by hinter javascript after a problem is graded as incorrect.
Args:
`data` -- must be interpretable by answer_to_str.
Output keys:
- 'hints' is a list of hint strings to show to the user.
- 'answer' is the parsed answer that was submitted.
Will record the user's wrong answer in user_submissions, and the hints shown
in previous_answers.
"""
# First, validate our inputs.
try:
answer = self.answer_to_str(data)
except (ValueError, AttributeError):
# Sometimes, we get an answer that's just not parsable. Do nothing.
log.exception('Answer not parsable: ' + str(data))
return
if not self.validate_answer(answer):
# Answer is not in the right form.
log.exception('Answer not valid: ' + str(answer))
return
if answer not in self.user_submissions:
self.user_submissions += [answer]
# For all answers similar enough to our own, accumulate all hints together.
# Also track the original answer of each hint.
matching_answers = self.get_matching_answers(answer)
matching_hints = {}
for matching_answer in matching_answers:
temp_dict = copy.deepcopy(self.hints[matching_answer])
for key, value in temp_dict.items():
# Each value now has hint, votes, matching_answer.
temp_dict[key] = value + [matching_answer]
matching_hints.update(temp_dict)
# matching_hints now maps pk's to lists of [hint, votes, matching_answer]
# Finally, randomly choose a subset of matching_hints to actually show.
if not matching_hints:
# No hints to give. Return.
return
# Get the top hint, plus two random hints.
n_hints = len(matching_hints)
hints = []
# max(dict) returns the maximum key in dict.
# The key function takes each pk, and returns the number of votes for the
# hint with that pk.
best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])
hints.append(matching_hints[best_hint_index][0])
best_hint_answer = matching_hints[best_hint_index][2]
# The brackets surrounding the index are for backwards compatability purposes.
# (It used to be that each answer was paired with multiple hints in a list.)
self.previous_answers += [[best_hint_answer, [best_hint_index]]]
for _ in xrange(min(2, n_hints - 1)):
# Keep making random hints until we hit a target, or run out.
while True:
# random.choice randomly chooses an element from its input list.
# (We then unpack the item, in this case data for a hint.)
(hint_index, (rand_hint, _, hint_answer)) =\
random.choice(matching_hints.items())
if rand_hint not in hints:
break
hints.append(rand_hint)
self.previous_answers += [[hint_answer, [hint_index]]]
return {'hints': hints,
'answer': answer}
def get_feedback(self, data):
"""
The student got it correct. Ask him to vote on hints, or submit a hint.
Args:
`data` -- not actually used. (It is assumed that the answer is correct.)
Output keys:
- 'answer_to_hints': a nested dictionary.
answer_to_hints[answer][hint_pk] returns the text of the hint.
- 'user_submissions': the same thing as self.user_submissions. A list of
the answers that the user previously submitted.
"""
# The student got it right.
# Did he submit at least one wrong answer?
if len(self.user_submissions) == 0:
# No. Nothing to do here.
return
# Make a hint-voting interface for each wrong answer. The student will only
# be allowed to make one vote / submission, but he can choose which wrong answer
# he wants to look at.
answer_to_hints = {} # answer_to_hints[answer text][hint pk] -> hint text
# Go through each previous answer, and populate index_to_hints and index_to_answer.
for i in xrange(len(self.previous_answers)):
answer, hints_offered = self.previous_answers[i]
if answer not in answer_to_hints:
answer_to_hints[answer] = {}
if answer in self.hints:
# Go through each hint, and add to index_to_hints
for hint_id in hints_offered:
if (hint_id is not None) and (hint_id not in answer_to_hints[answer]):
try:
answer_to_hints[answer][hint_id] = self.hints[answer][str(hint_id)][0]
except KeyError:
# Sometimes, the hint that a user saw will have been deleted by the instructor.
continue
return {'answer_to_hints': answer_to_hints,
'user_submissions': self.user_submissions}
def tally_vote(self, data):
"""
Tally a user's vote on his favorite hint.
Args:
`data` -- expected to have the following keys:
'answer': text of answer we're voting on
'hint': hint_pk
'pk_list': A list of [answer, pk] pairs, each of which representing a hint.
We will return a list of how many votes each hint in the list has so far.
It's up to the browser to specify which hints to return vote counts for.
Returns key 'hint_and_votes', a list of (hint_text, #votes) pairs.
"""
if self.user_voted:
return {'error': 'Sorry, but you have already voted!'}
ans = data['answer']
if not self.validate_answer(ans):
# Uh oh. Invalid answer.
log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))
return {'error': 'Failure in voting!'}
hint_pk = str(data['hint'])
# We use temp_dict because we need to do a direct write for the database to update.
temp_dict = self.hints
try:
temp_dict[ans][hint_pk][1] += 1
except KeyError:
log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:
Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))
return {'error': 'Failure in voting!'}
self.hints = temp_dict
# Don't let the user vote again!
self.user_voted = True
# Return a list of how many votes each hint got.
pk_list = json.loads(data['pk_list'])
hint_and_votes = []
for answer, vote_pk in pk_list:
if not self.validate_answer(answer):
log.exception('In hinter tally_vote, couldn\'t parse {ans}'.format(ans=answer))
continue
try:
hint_and_votes.append(temp_dict[answer][str(vote_pk)])
except KeyError:
log.exception('In hinter tally_vote, couldn\'t find: {ans}, {vote_pk}'.format(
ans=answer, vote_pk=str(vote_pk)))
hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)
# Reset self.previous_answers and user_submissions.
self.previous_answers = []
self.user_submissions = []
return {'hint_and_votes': hint_and_votes}
def submit_hint(self, data):
"""
Take a hint submission and add it to the database.
Args:
`data` -- expected to have the following keys:
'answer': text of answer
'hint': text of the new hint that the user is adding
Returns a thank-you message.
"""
# Do html escaping. Perhaps in the future do profanity filtering, etc. as well.
hint = escape(data['hint'])
answer = data['answer']
if not self.validate_answer(answer):
log.exception('Failure in hinter submit_hint: Unable to parse answer: {ans}'.format(
ans=answer))
return {'error': 'Could not submit answer'}
# Only allow a student to vote or submit a hint once.
if self.user_voted:
return {'message': 'Sorry, but you have already voted!'}
# Add the new hint to self.hints or self.mod_queue. (Awkward because a direct write
# is necessary.)
if self.moderate == 'True':
temp_dict = self.mod_queue
else:
temp_dict = self.hints
if answer in temp_dict:
temp_dict[answer][str(self.hint_pk)] = [hint, 1] # With one vote (the user himself).
else:
temp_dict[answer] = {str(self.hint_pk): [hint, 1]}
self.hint_pk += 1
if self.moderate == 'True':
self.mod_queue = temp_dict
else:
self.hints = temp_dict
# Mark the user has having voted; reset previous_answers
self.user_voted = True
self.previous_answers = []
self.user_submissions = []
return {'message': 'Thank you for your hint!'}
class CrowdsourceHinterDescriptor(CrowdsourceHinterFields, RawDescriptor):
module_class = CrowdsourceHinterModule
stores_state = True
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing CrowdsourceHinter. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('crowdsource_hinter')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
|
ahmadiga/min_edx
|
common/lib/xmodule/xmodule/crowdsource_hinter.py
|
Python
|
agpl-3.0
| 17,456
| 0.00338
|
from __future__ import print_function
import argparse
import yaml
from .bocca import make_project, ProjectExistsError
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=argparse.FileType('r'),
help='Project description file')
parser.add_argument('--clobber', action='store_true',
help='Clobber an existing project')
args = parser.parse_args()
try:
make_project(yaml.load(args.file), clobber=args.clobber)
except ProjectExistsError as error:
print('The specified project (%s) already exists. Exiting.' % error)
if __name__ == '__main__':
main()
|
csdms/wmt-exe
|
wmtexe/cmi/make.py
|
Python
|
mit
| 673
| 0
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import NotFoundError, BadRequest, etag
from flexget.plugins.internal.api_tvmaze import APITVMaze as tvm
tvmaze_api = api.namespace('tvmaze', description='TVMaze Shows')
class ObjectsContainer(object):
actor_object = {
'type': 'object',
'properties': {
"last_update": {'type': 'string', 'format': 'date-time'},
"medium_image": {'type': 'string'},
"name": {'type': 'string'},
"original_image": {'type': 'string'},
"tvmaze_id": {'type': 'integer'},
"url": {'type': 'string'}
}
}
schedule_object = {
'type': 'object',
'properties': {
"days": {'type': 'array', 'items': {'type': 'string'}},
"time": {'type': 'string'}
}
}
tvmaze_series_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'status': {'type': 'string'},
'rating': {'type': 'number'},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'weight': {'type': 'integer'},
'updated': {'type': 'string', 'format': 'date-time'},
'name': {'type': 'string'},
'language': {'type': 'string'},
'schedule': schedule_object,
'url': {'type': 'string', 'format': 'url'},
'original_image': {'type': 'string'},
'medium_image': {'type': 'string'},
'tvdb_id': {'type': 'integer'},
'tvrage_id': {'type': 'integer'},
'premiered': {'type': 'string', 'format': 'date-time'},
'year': {'type': 'integer'},
'summary': {'type': 'string'},
'webchannel': {'type': ['string', 'null']},
'runtime': {'type': 'integer'},
'show_type': {'type': 'string'},
'network': {'type': ['string', 'null']},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'status', 'rating', 'genres', 'weight', 'updated', 'name', 'language',
'schedule', 'url', 'original_image', 'medium_image', 'tvdb_id', 'tvrage_id', 'premiered', 'year',
'summary', 'webchannel', 'runtime', 'show_type', 'network', 'last_update'],
'additionalProperties': False
}
tvmaze_episode_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'series_id': {'type': 'integer'},
'number': {'type': 'integer'},
'season_number': {'type': 'integer'},
'title': {'type': 'string'},
'airdate': {'type': 'string', 'format': 'date-time'},
'url': {'type': 'string'},
'original_image': {'type': ['string', 'null']},
'medium_image': {'type': ['string', 'null']},
'airstamp': {'type': 'string', 'format': 'date-time'},
'runtime': {'type': 'integer'},
'summary': {'type': 'string'},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'series_id', 'number', 'season_number', 'title', 'airdate', 'url', 'original_image',
'medium_image', 'airstamp', 'runtime', 'summary', 'last_update'],
'additionalProperties': False
}
tvmaze_series_schema = api.schema_model('tvmaze_series_schema', ObjectsContainer.tvmaze_series_object)
tvmaze_episode_schema = api.schema_model('tvmaze_episode_schema', ObjectsContainer.tvmaze_episode_object)
@tvmaze_api.route('/series/<string:title>/')
@api.doc(params={'title': 'TV Show name or TVMaze ID'})
class TVDBSeriesSearchApi(APIResource):
@etag
@api.response(200, 'Successfully found show', model=tvmaze_series_schema)
@api.response(NotFoundError)
def get(self, title, session=None):
"""TVMaze series lookup"""
try:
tvmaze_id = int(title)
except ValueError:
tvmaze_id = None
try:
if tvmaze_id:
series = tvm.series_lookup(tvmaze_id=tvmaze_id, session=session)
else:
series = tvm.series_lookup(series_name=title, session=session)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(series.to_dict())
episode_parser = api.parser()
episode_parser.add_argument('season_num', type=int, help='Season number')
episode_parser.add_argument('ep_num', type=int, help='Episode number')
episode_parser.add_argument('air_date', type=inputs.date_from_iso8601, help="Air date in the format of '2012-01-01'")
@tvmaze_api.route('/episode/<int:tvmaze_id>/')
@api.doc(params={'tvmaze_id': 'TVMaze ID of show'})
@api.doc(parser=episode_parser)
class TVDBEpisodeSearchAPI(APIResource):
@etag
@api.response(200, 'Successfully found episode', tvmaze_episode_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, tvmaze_id, session=None):
"""TVMaze episode lookup"""
args = episode_parser.parse_args()
air_date = args.get('air_date')
season_num = args.get('season_num')
ep_num = args.get('ep_num')
kwargs = {'tvmaze_id': tvmaze_id,
'session': session}
if air_date:
kwargs['series_id_type'] = 'date'
kwargs['series_date'] = air_date
elif season_num and ep_num:
kwargs['series_id_type'] = 'ep'
kwargs['series_season'] = season_num
kwargs['series_episode'] = ep_num
else:
raise BadRequest('not enough parameters sent for lookup')
try:
episode = tvm.episode_lookup(**kwargs)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(episode.to_dict())
|
qk4l/Flexget
|
flexget/api/plugins/tvmaze_lookup.py
|
Python
|
mit
| 6,102
| 0.001475
|
import argparse
import sys, os
import numpy as np
from copy import copy
parser = argparse.ArgumentParser()
parser.add_argument('qubit', help='qubit name')
parser.add_argument('direction', help='direction (X or Y)')
parser.add_argument('numPulses', type=int, help='log2(n) of the longest sequence n')
parser.add_argument('amplitude', type=float, help='pulse amplitude')
args = parser.parse_args()
from QGL import *
q = QubitFactory(args.qubit)
if args.direction == 'X':
pPulse = Xtheta(q, amp=args.amplitude)
mPulse = X90m(q)
else:
pPulse = Ytheta(q, amp=args.amplitude)
mPulse = Y90m(q)
# Exponentially growing repetitions of the target pulse, e.g.
# (1, 2, 4, 8, 16, 32, 64, 128, ...) x X90
seqs = [[pPulse]*n for n in 2**np.arange(args.numPulses+1)]
# measure each along Z or X/Y
seqs = [s + m for s in seqs for m in [ [MEAS(q)], [mPulse, MEAS(q)] ]]
# tack on calibrations to the beginning
seqs = [[Id(q), MEAS(q)], [X(q), MEAS(q)]] + seqs
# repeat each
repeated_seqs = [copy(s) for s in seqs for _ in range(2)]
fileNames = compile_to_hardware(repeated_seqs, fileName='RepeatCal/RepeatCal')
# plot_pulse_files(fileNames)
|
BBN-Q/Qlab
|
common/@PulseCalibration/PhaseEstimationSequence.py
|
Python
|
apache-2.0
| 1,148
| 0.004355
|
import requests
import urllib2
import argparse
from bs4 import BeautifulSoup
def get_best_torrent(query):
query = urllib2.quote(query)
r = requests.get('http://kat.cr/usearch/{}/'.format(query))
soup = BeautifulSoup(r.content)
torrents = soup.find('table', class_='data').find_all(has_class_odd_or_even, limit=5)
for torrent in torrents:
name = torrent.find('a', class_='cellMainLink').text.encode('utf-8')
print "Name: {}".format(name)
size = torrent.find(class_='nobr center').text
print "Size: {}".format(size)
verified = bool(torrent.find('i', class_='ka ka-verify'))
if verified:
print "Verified Uploader: True"
else:
print "Verified: False"
seeds = torrent.find(class_='green center').text
print "Seeds: {}".format(seeds)
leeches = torrent.find(class_='red lasttd center').text
print "Leeches: {}".format(leeches)
try:
seed_to_leech = float(seeds) / float(leeches)
except ZeroDivisionError:
seed_to_leech = int(seeds)
print "Seed to leech ratio: {}".format(seed_to_leech)
magnet = torrent.find(class_='iaconbox').find('a', class_='imagnet')['href']
print "Magnet: \n{}\n".format(magnet)
def has_class_odd_or_even(tag):
if tag.has_attr('class'):
if 'odd' in tag.attrs['class'] or 'even' in tag.attrs['class']:
return True
return False
def command_line_runner():
parser = argparse.ArgumentParser(description='Get magnet links for torrents from the CLI')
parser.add_argument('name', type=str, nargs='*', help='Name of the torrent you are looking for')
args = parser.parse_args()
if not args.name:
parser.print_help()
else:
get_best_torrent(' '.join(args.name))
if __name__ == '__main__':
command_line_runner()
|
dhamaniasad/magnetor
|
magnetor.py
|
Python
|
unlicense
| 1,885
| 0.002122
|
from xml.dom import minidom
from object_classes import *
from helpers import timeToSeconds
class HindenburgInt(object):
def __init__(self, project_file, version="Hindenburg Journalist 1.26.1936", version_num="1.26.1936"):
self.projectFile = project_file
self.version = version
self.version_num = version_num
def get_session_name(self):
for i in self.projectFile.split("/"):
name = i
name = name.split(".")
return name[0]
def read(self):
projectXML = minidom.parse(self.projectFile)
projectObj = Session(self.get_session_name())
projectXML = projectXML.getElementsByTagName("Session")
project = projectXML[0]
projectObj.samplerate = project.getAttribute('Samplerate')
fileSourceInfo = project.getElementsByTagName("AudioPool")[0]
fileSourcePath = fileSourceInfo.getAttribute("Location") + "/" + fileSourceInfo.getAttribute("Path")
projectObj.audio_folder = fileSourceInfo.getAttribute('Path')
projectObj.folder_path = fileSourceInfo.getAttribute('Location')
audioFiles = project.getElementsByTagName("File")
for file in audioFiles:
projectObj.addFile(fileSourcePath + "/" + file.getAttribute("Name"), int(file.getAttribute('Id')))
markers = project.getElementsByTagName("Marker")
for marker in markers:
projectObj.addMarker(marker.getAttribute('Id'), marker.getAttribute('Name'), float(marker.getAttribute('Time')))
tracks = project.getElementsByTagName("Track")
for track in tracks:
current_track = projectObj.addTrack(track.getAttribute('Name'))
try:
current_track.pan = self.interpretPan(track.getAttribute('Pan'))
except:
current_track.pan = 0
try:
current_track.volume = track.getAttribute('Volume')
except:
current_track.volume = 0
try:
if track.getAttribute('Solo') == "1":
current_track.solo = True
except:
current_track.solo = False
try:
if track.getAttribute('Mute') == "1":
current_track.mute = False
except:
current_track.mute = False
try:
if track.getAttribute('Rec') == "1":
current_track.rec = True
except:
current_track.rec = False
trackItems = track.getElementsByTagName("Region")
for item in trackItems:
new_item = current_track.addItem(projectObj.getFileByID(int(item.getAttribute('Ref'))))
try:
start = float(item.getAttribute('Start'))
except:
start = 0
new_item.startTime = start
try:
startAt = float(item.getAttribute('Offset'))
except:
startAt = 0
new_item.startAt = startAt
length = timeToSeconds(item.getAttribute('Length'))
new_item.length = length
try:
gain = float(item.getAttribute('Gain'))
except:
gain = 0
new_item.gain = gain
new_item.name = item.getAttribute('Name')
fades = item.getElementsByTagName('Fade')
if fades:
autoEnv = current_track.getEnvelope('Volume')
if autoEnv == "Envelope Not Found":
autoEnv = current_track.addEnvelope('Volume')
firstFade = True
for fade in fades:
startTime = new_item.startTime + float(fade.getAttribute('Start'))
if firstFade:
startValue = new_item.gain
else:
startValue = autoEnv.points[-1].value
firstFade = False
endTime = startTime + float(fade.getAttribute('Length'))
try:
endValue = float(fade.getAttribute('Gain'))
except:
endValue = 0
autoEnv.addPoint(startTime, startValue)
autoEnv.addPoint(endTime, endValue)
plugins = track.getElementsByTagName("Plugin")
for plugin in plugins:
if plugin.getAttribute('Name') == 'Compressor':
pluginType = "Native"
else:
pluginType = "Plugin"
new_plugin = current_track.addFX(plugin.getAttribute('Name'), pluginType, int(plugin.getAttribute('Id')))
if pluginType == "Native":
if plugin.getAttribute('Name') == 'Compressor':
new_plugin.addProperty('UID', plugin.getAttribute('UID'))
new_plugin.addProperty('Comp', plugin.getAttribute('Comp'))
return projectObj
#Notes: Need to develop the section that reads the plugins...include support for external plugins, and the native EQ plugin
def write(self, destinationFile):
print('This function still needs to be written')
def interpretPan(self, amount):
num = -float(amount)
num = num*90
return num
|
joshk105/daw-translator
|
hindenburg.py
|
Python
|
mit
| 5,531
| 0.004701
|
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
features = [
# "hu",
# "tutorial",
"haralick",
# "aaronmoments",
# "lbp",
# "pftas",
# "zernike_moments",
# "image_size",
]
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+""
valid_pred_file = sdir+""
test_pred_file = sdir+"test--sharding_blend_pl_blend4_convroll4_doublescale_fs5_no_dropout_33_66.npy"
data_loader = load.PredictionsWithFeaturesDataLoader(
features = features,
train_pred_file=train_pred_file,
valid_pred_file=valid_pred_file,
test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train,
chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 52))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
reg_param = 0.0002
alpha = 0. # 0 -> L2 1-> L1
print "regu", reg_param, alpha
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
# reg_term = sum(T.sum(p**2) for p in params)
L2 = sum(T.sum(p**2) for p in params)
L1 = sum(T.sum(T.abs_(p)) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2)
return nn.objectives.Objective(l_out, loss_function=loss)
|
yejingxin/kaggle-ndsb
|
configurations/featharalick_sharding_blend_pl_blend4_convroll4_doublescale_fs5_no_dropout_33_66.py
|
Python
|
mit
| 2,560
| 0.007813
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Web test-specific impl of the unexpected passes' queries module."""
import os
import posixpath
from blinkpy.web_tests.stale_expectation_removal import constants
from unexpected_passes_common import queries as queries_module
# The target number of results/rows per query when running in large query mode.
# Higher values = longer individual query times and higher chances of running
# out of memory in BigQuery. Lower values = more parallelization overhead and
# more issues with rate limit errors.
TARGET_RESULTS_PER_QUERY = 20000
# This query gets us all results for tests that have had results with a
# Failure, Timeout, or Crash expectation in the past |@num_samples| builds on
# |@builder_name|. Whether these are CI or try results depends on whether
# |builder_type| is "ci" or "try".
BQ_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM `chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT @num_builds
),
results AS (
SELECT
exported.id,
test_id,
status,
duration,
(
SELECT value
FROM tr.tags
WHERE key = "step_name") as step_name,
(
SELECT value
FROM tr.tags
WHERE key = "web_tests_base_timeout") as timeout,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "web_tests_used_expectations_file") as expectation_files
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
{test_filter_clause}
)
SELECT *
FROM results
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
# Very similar to above, but used to get the names of tests that are of
# interest for use as a filter.
TEST_FILTER_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT 50
),
results AS (
SELECT
exported.id,
test_id,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
)
SELECT DISTINCT r.test_id
FROM results r
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
ACTIVE_BUILDER_QUERY_TEMPLATE = """\
WITH
builders AS (
SELECT
(
SELECT value
FROM tr.variant
WHERE key = "builder") as builder_name
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
)
SELECT DISTINCT builder_name
FROM builders
"""
KNOWN_TEST_ID_PREFIXES = [
'ninja://:blink_web_tests/',
'ninja://:webgpu_blink_web_tests',
]
# The default timeout of most web tests is 6 seconds, so use that if we happen
# to get a result that doesn't report its own timeout.
DEFAULT_TIMEOUT = 6
class WebTestBigQueryQuerier(queries_module.BigQueryQuerier):
def _ConvertJsonResultToResultObject(self, json_result):
result = super(WebTestBigQueryQuerier,
self)._ConvertJsonResultToResultObject(json_result)
result.SetDuration(json_result['duration'], json_result['timeout']
or DEFAULT_TIMEOUT)
return result
def _GetRelevantExpectationFilesForQueryResult(self, query_result):
# Files in the query are either relative to the web tests directory or
# are an absolute path. The paths are always POSIX-style. We don't
# handle absolute paths since those typically point to temporary files
# which will not exist locally.
filepaths = []
for f in query_result.get('expectation_files', []):
if posixpath.isabs(f):
continue
f = f.replace('/', os.sep)
f = os.path.join(constants.WEB_TEST_ROOT_DIR, f)
filepaths.append(f)
return filepaths
def _ShouldSkipOverResult(self, result):
# WebGPU web tests are currently unsupported for various reasons.
return 'webgpu/cts.html' in result['test_id']
def _GetQueryGeneratorForBuilder(self, builder, builder_type):
# Look for all tests.
if not self._large_query_mode:
return WebTestFixedQueryGenerator(builder_type, '')
query = TEST_FILTER_QUERY_TEMPLATE.format(builder_type=builder_type)
query_results = self._RunBigQueryCommandsForJsonOutput(
query, {'': {
'builder_name': builder
}})
test_ids = ['"%s"' % r['test_id'] for r in query_results]
if not test_ids:
return None
# Only consider specific test cases that were found to have active
# expectations in the above query. Also perform any initial query
# splitting.
target_num_ids = TARGET_RESULTS_PER_QUERY / self._num_samples
return WebTestSplitQueryGenerator(builder_type, test_ids,
target_num_ids)
def _StripPrefixFromTestId(self, test_id):
# Web test IDs provided by ResultDB are the test name known by the test
# runner prefixed by one of the following:
# "ninja://:blink_web_tests/"
# "ninja://:webgpu_blink_web_tests/"
for prefix in KNOWN_TEST_ID_PREFIXES:
if test_id.startswith(prefix):
return test_id.replace(prefix, '')
raise RuntimeError('Unable to strip prefix from test ID %s' % test_id)
def _GetActiveBuilderQuery(self, builder_type):
return ACTIVE_BUILDER_QUERY_TEMPLATE.format(builder_type=builder_type)
class WebTestFixedQueryGenerator(queries_module.FixedQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
class WebTestSplitQueryGenerator(queries_module.SplitQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
def QueryGeneratorImpl(test_filter_clauses, builder_type):
queries = []
for tfc in test_filter_clauses:
queries.append(
BQ_QUERY_TEMPLATE.format(builder_type=builder_type,
test_filter_clause=tfc))
return queries
|
nwjs/chromium.src
|
third_party/blink/tools/blinkpy/web_tests/stale_expectation_removal/queries.py
|
Python
|
bsd-3-clause
| 7,303
| 0.000685
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file is the entry point for AWS Lambda.
"""
from streamalert.scheduled_queries.command.application import ScheduledQueries
def handler(event, _):
return ScheduledQueries().run(event)
|
airbnb/streamalert
|
streamalert/scheduled_queries/main.py
|
Python
|
apache-2.0
| 760
| 0
|
""" Query modules mapping functions to their query strings
structured:
module_name { query_string: function_for_query }
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import sys
import os
import math
import datetime
import logging
# logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
import random
from uuid import UUID
# Our imports
from emission.core.get_database import get_section_db, get_trip_db, get_routeCluster_db, get_alternatives_db
from . import trip_old as trip
# 0763de67-f61e-3f5d-90e7-518e69793954
# 0763de67-f61e-3f5d-90e7-518e69793954_20150421T230304-0700_0
# helper for getCanonicalTrips
def get_clusters_info(uid):
c_db = get_routeCluster_db()
s_db = get_section_db()
clusterJson = c_db.find_one({"clusters":{"$exists":True}, "user": uid})
if clusterJson is None:
return []
c_info = []
clusterSectionLists= list(clusterJson["clusters"].values())
logging.debug( "Number of section lists for user %s is %s" % (uid, len(clusterSectionLists)))
for sectionList in clusterSectionLists:
first = True
logging.debug( "Number of sections in sectionList for user %s is %s" % (uid, len(sectionList)))
if (len(sectionList) == 0):
# There's no point in returning this cluster, let's move on
continue
distributionArrays = [[] for _ in range(5)]
for section in sectionList:
section_json = s_db.find_one({"_id":section})
if first:
representative_trip = section_json
first = False
appendIfPresent(distributionArrays[0], section_json, "section_start_datetime")
appendIfPresent(distributionArrays[1], section_json, "section_end_datetime")
appendIfPresent(distributionArrays[2], section_json, "section_start_point")
appendIfPresent(distributionArrays[3], section_json, "section_end_point")
appendIfPresent(distributionArrays[4], section_json, "confirmed_mode")
c_info.append((distributionArrays, representative_trip))
return c_info
def appendIfPresent(list,element,key):
if element is not None and key in element:
list.append(element[key])
else:
logging.debug("not appending element %s with key %s" % (element, key))
class AlternativesNotFound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#returns the top trips for the user, defaulting to the top 10 trips
def getCanonicalTrips(uid, get_representative=False): # number returned isnt used
"""
uid is a UUID object, not a string
"""
# canonical_trip_list = []
# x = 0
# if route clusters return nothing, then get common routes for user
#clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
# c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
logging.debug('UUID for canonical %s' % uid)
info = get_clusters_info(uid)
cluster_json_list = []
for (cluster, rt) in info:
json_dict = dict()
json_dict["representative_trip"] = rt
json_dict["start_point_distr"] = cluster[2]
json_dict["end_point_distr"] = cluster[3]
json_dict["start_time_distr"] = cluster[0]
json_dict["end_time_distr"] = cluster[1]
json_dict["confirmed_mode_list"] = cluster[4]
cluster_json_list.append(json_dict)
toRet = cluster_json_list
return toRet.__iter__()
#returns all trips to the user
def getAllTrips(uid):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getAllTrips_Date(uid, dys):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
return get_trip_db().find(query)
#returns all trips with no alternatives to the user
def getNoAlternatives(uid):
# If pipelineFlags exists then we have started alternatives, and so have
# already scheduled the query. No need to reschedule unless the query fails.
# TODO: If the query fails, then remove the pipelineFlags so that we will
# reschedule.
query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
def getNoAlternativesPastMonth(uid):
d = datetime.datetime.now() - datetime.timedelta(days=30)
query = {'user_id':uid, 'type':'move',
'trip_start_datetime':{"$gt":d},
'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
# Returns the trips that are suitable for training
# Currently this is:
# - trips that have alternatives, and
# - have not yet been included in a training set
def getTrainingTrips(uid):
return getTrainingTrips_Date(uid, 30)
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getTrainingTrips_Date(uid, dys):
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}, "pipelineFlags":{"$exists":True}}
#query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
#print get_trip_db().count_documents(query)
return get_trip_db().find(query)
def getAlternativeTrips(trip_id):
#TODO: clean up datetime, and queries here
#d = datetime.datetime.now() - datetime.timedelta(days=6)
#query = {'trip_id':trip_id, 'trip_start_datetime':{"$gt":d}}
query = {'trip_id':trip_id}
alternatives = get_alternatives_db().find(query)
if alternatives.estimated_document_count() > 0:
logging.debug("Number of alternatives for trip %s is %d" % (trip_id, alternatives.estimated_document_count()))
return alternatives
raise AlternativesNotFound("No Alternatives Found")
def getRecentTrips(uid):
raise NotImplementedError()
def getTripsThroughMode(uid):
raise NotImplementedError()
modules = {
# Trip Module
'trips': {
'get_canonical': getCanonicalTrips,
'get_all': getAllTrips,
'get_no_alternatives': getNoAlternatives,
'get_no_alternatives_past_month': getNoAlternativesPastMonth,
'get_most_recent': getRecentTrips,
'get_trips_by_mode': getTripsThroughMode},
# Utility Module
'utility': {
'get_training': getTrainingTrips
},
# Recommender Module
'recommender': {
'get_improve': getCanonicalTrips
},
#Perturbation Module
'perturbation': {},
#Alternatives Module
# note: uses a different collection than section_db
'alternatives': {
'get_alternatives': getAlternativeTrips
}
}
|
e-mission/e-mission-server
|
emission/core/wrapper/filter_modules.py
|
Python
|
bsd-3-clause
| 7,166
| 0.016467
|
import argparse
import datetime
import imutils
import numpy as np
import time
import csv
import cv2
import os.path
#define variable
click_frame = False
divide_x = 0
divide_y = 0
channel_A = 0
channel_B = 0
area_A = 0
area_B = 0
#division fuction (divide_frame)
def divide_frame(event,x,y,flags,param):
global click_frame
global divide_x,divide_y
global shape
if click_frame == False and event == cv2.EVENT_LBUTTONDOWN:
click_frame = True
divide_x = x
divide_y = y
print("First frame selected")
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=100, help="minimum area size")
#ap.add_argument("-s","--shape",type=str,default="rectangle",help="shape of test arena")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
else:
camera = cv2.VideoCapture(args.get("video", None))
fps = camera.get(cv2.cv.CV_CAP_PROP_FPS)
frame_count = 0
firstFrame = None
#Creating window and initializing mouse callback for division
cv2.namedWindow("Security Feed")
cv2.setMouseCallback("Security Feed",divide_frame)
# After selecting firstFrame no tracking should occur for 5s
#def relay(event,flags,param)
# while (frame_count/fps) < 5:
# break
while True:
# grab the current frame and initialize the occupied/unoccupied"rectangle"
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
cv2.imshow("Security Feed", frame)
while click_frame == False:
print("Selected Image")
cv2.waitKey(25)
continue
frame_count += 1
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
fish_x = x+w/2
fish_y = y+h/2
if fish_x < divide_x and fish_y < divide_y:
channel_A += 1
if fish_x > divide_x and fish_y < divide_y:
area_A += 1
if fish_x < divide_x and fish_y > divide_y:
channel_B += 1
if fish_x > divide_x and fish_y > divide_y:
area_B += 1
#division lines
#tags
fontsize = 1
thickness = 1
cv2.putText(frame,"{0:.2f}".format(fps)+" fps",(25,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,255)
cv2.putText(frame,"{0:.2f}".format(channel_A/fps),(divide_x-width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(channel_B/fps),(divide_x-width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_A/fps),(divide_x+width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_B/fps),(divide_x+width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(frame_count/fps)+" time (s)",(divide_x+width/4,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0))
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
# cv2.imshow("Thresh", thresh)
# cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
#print data
print("Total Time [s]: "+"{0:.2f}".format(frame_count/fps))
print("Channel_A [s]: "+"{0:.2f}".format(channel_A/fps))
print("Channel_B [s]: "+"{0:.2f}".format(channel_B/fps))
print("Area_A [s]: "+"{0:.2f}".format(area_A/fps))
print("Area_B [s]: "+"{0:.2f}".format(area_B/fps))
# Print data to file (data.csv)
# Write file and header if file does not already exist
# If file exists data is inserted in a new row and no header is added
# lineterminator = '\n' to remove blank line between rows when program is restarted
file_exists=os.path.isfile("data.csv")
with open('data.csv','a') as csvfile:
dw=csv.DictWriter(csvfile,delimiter=',',fieldnames=["File","Total Time","Channel_A","Channel_B","Area_A","Area_B"],lineterminator='\n')
writer=csv.writer(csvfile)
if file_exists == True:
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
else:
dw.writeheader()
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
|
fritzfrancisco/flumeview
|
FlumeView1.2.py
|
Python
|
apache-2.0
| 5,412
| 0.035292
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <luis.rodriguez@opendeusto.es>
#
class ConfigureError(Exception):
""" Configure error of any kind. """
pass
class PermanentConfigureError(ConfigureError):
""" Configure error that would most likely occur again should we retry """
def __str__(self):
return "PermanentConfigureError()"
class TemporaryConfigureError(ConfigureError):
""" Configure error that is likely to not be permanent. Server will retry whenever this is received. """
def __str__(self):
return "TemporaryConfigureError()"
class UserManager(object):
def __init__(self, cfg_manager):
"""
Creates the UserManager.
@param cfg_manager Config Manager which will be used to read configuration parameters
"""
self.cfg = cfg_manager
self.cancelled = False
def configure(self, sid):
"""
Configures the Virtual Machine for use.
@note This method may block for a long time. It might hence be advisable to account for this delay
and to call it from a worker thread.
@note Implementations might require additional information, which should generally be provided
through the configuration script and accessed through the UserManager's config reader.
@param sid Unique session id of the user.
@return None
@raise ConfigureError If the configure attempt failed. Failure and the ConfigureError should be either
a PermanentConfigureError or a TemporaryConfigureError. Should a different kind of exception be
raised however, it would be considered permanent.
"""
pass
def cancel(self):
self.cancelled = True
|
weblabdeusto/weblabdeusto
|
server/src/experiments/vm/user_manager/manager.py
|
Python
|
bsd-2-clause
| 2,115
| 0.008511
|
# rhn-client-tools
#
# Copyright (c) 2006--2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
import rpcServer
import up2dateErrors
import capabilities
import sys
import xmlrpclib
import OpenSSL
class _DoCallWrapper(object):
"""
A callable object that will handle multiple levels of attributes,
and catch exceptions.
"""
def __init__(self, server, method_name):
self._server = server
self._method_name = method_name
def __getattr__(self, method_name):
""" Recursively build up the method name to pass to the server. """
return _DoCallWrapper(self._server,
"%s.%s" % (self._method_name, method_name))
def __call__(self, *args, **kwargs):
""" Call the method. Catch faults and translate them. """
method = getattr(self._server, self._method_name)
try:
return rpcServer.doCall(method, *args, **kwargs)
except xmlrpclib.Fault:
raise (self.__exception_from_fault(sys.exc_info()[1]), None, sys.exc_info()[2])
except OpenSSL.SSL.Error:
# TODO This should probably be moved to rhnlib and raise an
# exception that subclasses OpenSSL.SSL.Error
# TODO Is there a better way to detect cert failures?
error = str(sys.exc_info()[1])
error = error.strip("[()]")
pieces = error.split(',')
message = ""
if len(pieces) > 2:
message = pieces[2]
elif len(pieces) == 2:
message = pieces[1]
message = message.strip(" '")
if message == 'certificate verify failed':
raise (up2dateErrors.SSLCertificateVerifyFailedError(), None, sys.exc_info()[2])
else:
raise (up2dateErrors.NetworkError(message), None, sys.exc_info()[2])
def __exception_from_fault(self, fault):
if fault.faultCode == -3:
# This username is already taken, or the password is incorrect.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -2:
# Invalid username and password combination.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -110:
# Account is disabled
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -1:
exception = up2dateErrors.UnknownMethodException(fault.faultString)
elif fault.faultCode == -13:
# Username is too short.
exception = up2dateErrors.LoginMinLengthError(fault.faultString)
elif fault.faultCode == -14:
# too short password
exception = up2dateErrors.PasswordMinLengthError(
fault.faultString)
elif fault.faultCode == -15:
# bad chars in username
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -16:
# Invalid product registration code.
# TODO Should this really be a validation error?
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -19:
# invalid
exception = up2dateErrors.NoBaseChannelError(fault.faultString)
elif fault.faultCode == -31:
# No entitlement
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -36:
# rhnException.py says this means "Invalid action."
# TODO find out which is right
exception = up2dateErrors.PasswordError(fault.faultString)
elif abs(fault.faultCode) == 49:
exception = up2dateErrors.AbuseError(fault.faultString)
elif abs(fault.faultCode) == 60:
exception = up2dateErrors.AuthenticationTicketError(fault.faultString)
elif abs(fault.faultCode) == 74:
exception = up2dateErrors.RegistrationDeniedError()
elif abs(fault.faultCode) == 105:
exception = up2dateErrors.RhnUuidUniquenessError(fault.faultString)
elif fault.faultCode == 99:
exception = up2dateErrors.DelayError(fault.faultString)
elif abs(fault.faultCode) == 91:
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -106:
# Invalid username.
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -600:
# Invalid username.
exception = up2dateErrors.InvalidRegistrationNumberError(fault.faultString)
elif fault.faultCode == -601:
# No entitlements associated with given hardware info
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -602:
# No entitlements associated with reg num
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -2001 or fault.faultCode == -700:
exception = up2dateErrors.AuthenticationOrAccountCreationError(
fault.faultString)
elif fault.faultCode == -701:
exception = up2dateErrors.PasswordMaxLengthError(
fault.faultString)
elif fault.faultCode == -61:
exception = up2dateErrors.ActivationKeyUsageLimitError(
fault.faultString)
elif fault.faultCode == -5:
exception = up2dateErrors.UnableToCreateUser(
fault.faultString)
else:
exception = up2dateErrors.CommunicationError(fault.faultString)
return exception
class RhnServer(object):
"""
An rpc server object that calls doCall for you, and catches lower
level exceptions
"""
def __init__(self, serverOverride=None, timeout=None):
self._server = rpcServer.getServer(serverOverride=serverOverride,
timeout=timeout)
self._capabilities = None
def __get_capabilities(self):
if self._capabilities is None:
headers = self._server.get_response_headers()
if headers is None:
self.registration.welcome_message()
headers = self._server.get_response_headers()
self._capabilities = capabilities.Capabilities()
self._capabilities.populate(headers)
return self._capabilities
capabilities = property(__get_capabilities)
def add_header(self, key, value):
self._server.add_header(key, value)
def __getattr__(self, method_name):
""" Return a callable object that will do the work for us. """
return _DoCallWrapper(self._server, method_name)
|
davidhrbac/spacewalk
|
client/rhel/rhn-client-tools/src/up2date_client/rhnserver.py
|
Python
|
gpl-2.0
| 8,608
| 0.001859
|
from setuptools import setup
setup(
name="sgf",
version="0.5",
description="Python library for reading and writing Smart Game Format",
license="MIT",
url="http://github.com/jtauber/sgf",
author="James Tauber",
author_email="jtauber@jtauber.com",
py_modules=["sgf"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Games/Entertainment :: Board Games",
"Topic :: Utilities",
],
)
|
jtauber/sgf
|
setup.py
|
Python
|
mit
| 552
| 0
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# $Id: vboxshell.py $
"""
VirtualBox Python Shell.
This program is a simple interactive shell for VirtualBox. You can query
information and issue commands from a simple command line.
It also provides you with examples on how to use VirtualBox's Python API.
This shell is even somewhat documented, supports TAB-completion and
history if you have Python readline installed.
Finally, shell allows arbitrary custom extensions, just create
.VirtualBox/shexts/ and drop your extensions there.
Enjoy.
P.S. Our apologies for the code quality.
"""
__copyright__ = \
"""
Copyright (C) 2009-2013 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 92173 $"
import os, sys
import traceback
import shlex
import time
import re
import platform
from optparse import OptionParser
from pprint import pprint
#
# Global Variables
#
g_fBatchMode = False
g_sScriptFile = None
g_sCmd = None
g_fHasReadline = True
try:
import readline
import rlcompleter
except ImportError:
g_fHasReadline = False
g_sPrompt = "vbox> "
g_fHasColors = True
g_dTermColors = {
'red': '\033[31m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'magenta': '\033[35m',
'cyan': '\033[36m'
}
def colored(strg, color):
"""
Translates a string to one including coloring settings, if enabled.
"""
if not g_fHasColors:
return strg
col = g_dTermColors.get(color, None)
if col:
return col+str(strg)+'\033[0m'
return strg
if g_fHasReadline:
class CompleterNG(rlcompleter.Completer):
def __init__(self, dic, ctx):
self.ctx = ctx
rlcompleter.Completer.__init__(self, dic)
def complete(self, text, state):
"""
taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496812
"""
if False and text == "":
return ['\t', None][state]
else:
return rlcompleter.Completer.complete(self, text, state)
def canBePath(self, _phrase, word):
return word.startswith('/')
def canBeCommand(self, phrase, _word):
spaceIdx = phrase.find(" ")
begIdx = readline.get_begidx()
firstWord = (spaceIdx == -1 or begIdx < spaceIdx)
if firstWord:
return True
if phrase.startswith('help'):
return True
return False
def canBeMachine(self, phrase, word):
return not self.canBePath(phrase, word) and not self.canBeCommand(phrase, word)
def global_matches(self, text):
"""
Compute matches when text is a simple name.
Return a list of all names currently defined
in self.namespace that match.
"""
matches = []
phrase = readline.get_line_buffer()
try:
if self.canBePath(phrase, text):
(directory, rest) = os.path.split(text)
c = len(rest)
for word in os.listdir(directory):
if c == 0 or word[:c] == rest:
matches.append(os.path.join(directory, word))
if self.canBeCommand(phrase, text):
c = len(text)
for lst in [ self.namespace ]:
for word in lst:
if word[:c] == text:
matches.append(word)
if self.canBeMachine(phrase, text):
c = len(text)
for mach in getMachines(self.ctx, False, True):
# although it has autoconversion, we need to cast
# explicitly for subscripts to work
word = re.sub("(?<!\\\\) ", "\\ ", str(mach.name))
if word[:c] == text:
matches.append(word)
word = str(mach.id)
if word[:c] == text:
matches.append(word)
except Exception, e:
printErr(self.ctx, e)
if g_fVerbose:
traceback.print_exc()
return matches
def autoCompletion(cmds, ctx):
if not g_fHasReadline:
return
comps = {}
for (key, _value) in cmds.items():
comps[key] = None
completer = CompleterNG(comps, ctx)
readline.set_completer(completer.complete)
delims = readline.get_completer_delims()
readline.set_completer_delims(re.sub("[\\./-]", "", delims)) # remove some of the delimiters
readline.parse_and_bind("set editing-mode emacs")
# OSX need it
if platform.system() == 'Darwin':
# see http://www.certif.com/spec_help/readline.html
readline.parse_and_bind ("bind ^I rl_complete")
readline.parse_and_bind ("bind ^W ed-delete-prev-word")
# Doesn't work well
# readline.parse_and_bind ("bind ^R em-inc-search-prev")
readline.parse_and_bind("tab: complete")
g_fVerbose = False
def split_no_quotes(s):
return shlex.split(s)
def progressBar(ctx, progress, wait=1000):
try:
while not progress.completed:
print "%s %%\r" % (colored(str(progress.percent), 'red')),
sys.stdout.flush()
progress.waitForCompletion(wait)
ctx['global'].waitForEvents(0)
if int(progress.resultCode) != 0:
reportError(ctx, progress)
return 1
except KeyboardInterrupt:
print "Interrupted."
ctx['interrupt'] = True
if progress.cancelable:
print "Canceling task..."
progress.cancel()
return 0
def printErr(_ctx, e):
oVBoxMgr = _ctx['global'];
if oVBoxMgr.errIsOurXcptKind(e):
print colored('%s: %s' % (oVBoxMgr.xcptToString(e), oVBoxMgr.xcptGetMessage(e)), 'red');
else:
print colored(str(e), 'red')
def reportError(_ctx, progress):
errorinfo = progress.errorInfo
if errorinfo:
print colored("Error in module '%s': %s" % (errorinfo.component, errorinfo.text), 'red')
def colCat(_ctx, strg):
return colored(strg, 'magenta')
def colVm(_ctx, vmname):
return colored(vmname, 'blue')
def colPath(_ctx, path):
return colored(path, 'green')
def colSize(_ctx, byte):
return colored(byte, 'red')
def colPci(_ctx, pcidev):
return colored(pcidev, 'green')
def colDev(_ctx, pcidev):
return colored(pcidev, 'cyan')
def colSizeM(_ctx, mbyte):
return colored(str(mbyte)+'M', 'red')
def createVm(ctx, name, kind):
vbox = ctx['vb']
mach = vbox.createMachine("", name, [], kind, "")
mach.saveSettings()
print "created machine with UUID", mach.id
vbox.registerMachine(mach)
# update cache
getMachines(ctx, True)
def removeVm(ctx, mach):
uuid = mach.id
print "removing machine ", mach.name, "with UUID", uuid
cmdClosedVm(ctx, mach, detachVmDevice, ["ALL"])
mach = mach.unregister(ctx['global'].constants.CleanupMode_Full)
if mach:
mach.deleteSettings()
# update cache
getMachines(ctx, True)
def startVm(ctx, mach, vmtype):
vbox = ctx['vb']
perf = ctx['perf']
session = ctx['global'].getSessionObject(vbox)
progress = mach.launchVMProcess(session, vmtype, "")
if progressBar(ctx, progress, 100) and int(progress.resultCode) == 0:
# we ignore exceptions to allow starting VM even if
# perf collector cannot be started
if perf:
try:
perf.setup(['*'], [mach], 10, 15)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
session.unlockMachine()
class CachedMach:
def __init__(self, mach):
if mach.accessible:
self.name = mach.name
else:
self.name = '<inaccessible>'
self.id = mach.id
def cacheMachines(_ctx, lst):
result = []
for mach in lst:
elem = CachedMach(mach)
result.append(elem)
return result
def getMachines(ctx, invalidate = False, simple=False):
if ctx['vb'] is not None:
if ctx['_machlist'] is None or invalidate:
ctx['_machlist'] = ctx['global'].getArray(ctx['vb'], 'machines')
ctx['_machlistsimple'] = cacheMachines(ctx, ctx['_machlist'])
if simple:
return ctx['_machlistsimple']
else:
return ctx['_machlist']
else:
return []
def asState(var):
if var:
return colored('on', 'green')
else:
return colored('off', 'green')
def asFlag(var):
if var:
return 'yes'
else:
return 'no'
def getFacilityStatus(ctx, guest, facilityType):
(status, _timestamp) = guest.getFacilityStatus(facilityType)
return asEnumElem(ctx, 'AdditionsFacilityStatus', status)
def perfStats(ctx, mach):
if not ctx['perf']:
return
for metric in ctx['perf'].query(["*"], [mach]):
print metric['name'], metric['values_as_string']
def guestExec(ctx, machine, console, cmds):
exec cmds
def printMouseEvent(_ctx, mev):
print "Mouse : mode=%d x=%d y=%d z=%d w=%d buttons=%x" % (mev.mode, mev.x, mev.y, mev.z, mev.w, mev.buttons)
def printKbdEvent(ctx, kev):
print "Kbd: ", ctx['global'].getArray(kev, 'scancodes')
def printMultiTouchEvent(ctx, mtev):
print "MultiTouch : contacts=%d time=%d" % (mtev.contactCount, mtev.scanTime)
xPositions = ctx['global'].getArray(mtev, 'xPositions')
yPositions = ctx['global'].getArray(mtev, 'yPositions')
contactIds = ctx['global'].getArray(mtev, 'contactIds')
contactFlags = ctx['global'].getArray(mtev, 'contactFlags')
for i in range(0, mtev.contactCount):
print " [%d] %d,%d %d %d" % (i, xPositions[i], yPositions[i], contactIds[i], contactFlags[i])
def monitorSource(ctx, eventSource, active, dur):
def handleEventImpl(event):
evtype = event.type
print "got event: %s %s" % (str(evtype), asEnumElem(ctx, 'VBoxEventType', evtype))
if evtype == ctx['global'].constants.VBoxEventType_OnMachineStateChanged:
scev = ctx['global'].queryInterface(event, 'IMachineStateChangedEvent')
if scev:
print "machine state event: mach=%s state=%s" % (scev.machineId, scev.state)
elif evtype == ctx['global'].constants.VBoxEventType_OnSnapshotTaken:
stev = ctx['global'].queryInterface(event, 'ISnapshotTakenEvent')
if stev:
print "snapshot taken event: mach=%s snap=%s" % (stev.machineId, stev.snapshotId)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestPropertyChanged:
gpcev = ctx['global'].queryInterface(event, 'IGuestPropertyChangedEvent')
if gpcev:
print "guest property change: name=%s value=%s" % (gpcev.name, gpcev.value)
elif evtype == ctx['global'].constants.VBoxEventType_OnMousePointerShapeChanged:
psev = ctx['global'].queryInterface(event, 'IMousePointerShapeChangedEvent')
if psev:
shape = ctx['global'].getArray(psev, 'shape')
if shape is None:
print "pointer shape event - empty shape"
else:
print "pointer shape event: w=%d h=%d shape len=%d" % (psev.width, psev.height, len(shape))
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestMouse:
mev = ctx['global'].queryInterface(event, 'IGuestMouseEvent')
if mev:
printMouseEvent(ctx, mev)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestKeyboard:
kev = ctx['global'].queryInterface(event, 'IGuestKeyboardEvent')
if kev:
printKbdEvent(ctx, kev)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestMultiTouch:
mtev = ctx['global'].queryInterface(event, 'IGuestMultiTouchEvent')
if mtev:
printMultiTouchEvent(ctx, mtev)
class EventListener:
def __init__(self, arg):
pass
def handleEvent(self, event):
try:
# a bit convoluted QI to make it work with MS COM
handleEventImpl(ctx['global'].queryInterface(event, 'IEvent'))
except:
traceback.print_exc()
pass
if active:
listener = ctx['global'].createListener(EventListener)
else:
listener = eventSource.createListener()
registered = False
if dur == -1:
# not infinity, but close enough
dur = 100000
try:
eventSource.registerListener(listener, [ctx['global'].constants.VBoxEventType_Any], active)
registered = True
end = time.time() + dur
while time.time() < end:
if active:
ctx['global'].waitForEvents(500)
else:
event = eventSource.getEvent(listener, 500)
if event:
handleEventImpl(event)
# otherwise waitable events will leak (active listeners ACK automatically)
eventSource.eventProcessed(listener, event)
# We need to catch all exceptions here, otherwise listener will never be unregistered
except:
traceback.print_exc()
pass
if listener and registered:
eventSource.unregisterListener(listener)
g_tsLast = 0
def recordDemo(ctx, console, filename, dur):
demo = open(filename, 'w')
header = "VM=" + console.machine.name + "\n"
demo.write(header)
global g_tsLast
g_tsLast = time.time()
def stamp():
global g_tsLast
tsCur = time.time()
timePassed = int((tsCur-g_tsLast)*1000)
g_tsLast = tsCur
return timePassed
def handleEventImpl(event):
evtype = event.type
#print "got event: %s %s" % (str(evtype), asEnumElem(ctx, 'VBoxEventType', evtype))
if evtype == ctx['global'].constants.VBoxEventType_OnGuestMouse:
mev = ctx['global'].queryInterface(event, 'IGuestMouseEvent')
if mev:
line = "%d: m %d %d %d %d %d %d\n" % (stamp(), mev.mode, mev.x, mev.y, mev.z, mev.w, mev.buttons)
demo.write(line)
elif evtype == ctx['global'].constants.VBoxEventType_OnGuestKeyboard:
kev = ctx['global'].queryInterface(event, 'IGuestKeyboardEvent')
if kev:
line = "%d: k %s\n" % (stamp(), str(ctx['global'].getArray(kev, 'scancodes')))
demo.write(line)
listener = console.eventSource.createListener()
registered = False
# we create an aggregated event source to listen for multiple event sources (keyboard and mouse in our case)
agg = console.eventSource.createAggregator([console.keyboard.eventSource, console.mouse.eventSource])
demo = open(filename, 'w')
header = "VM=" + console.machine.name + "\n"
demo.write(header)
if dur == -1:
# not infinity, but close enough
dur = 100000
try:
agg.registerListener(listener, [ctx['global'].constants.VBoxEventType_Any], False)
registered = True
end = time.time() + dur
while time.time() < end:
event = agg.getEvent(listener, 1000)
if event:
handleEventImpl(event)
# keyboard/mouse events aren't waitable, so no need for eventProcessed
# We need to catch all exceptions here, otherwise listener will never be unregistered
except:
traceback.print_exc()
pass
demo.close()
if listener and registered:
agg.unregisterListener(listener)
def playbackDemo(ctx, console, filename, dur):
demo = open(filename, 'r')
if dur == -1:
# not infinity, but close enough
dur = 100000
header = demo.readline()
print "Header is", header
basere = re.compile(r'(?P<s>\d+): (?P<t>[km]) (?P<p>.*)')
mre = re.compile(r'(?P<a>\d+) (?P<x>-*\d+) (?P<y>-*\d+) (?P<z>-*\d+) (?P<w>-*\d+) (?P<b>-*\d+)')
kre = re.compile(r'\d+')
kbd = console.keyboard
mouse = console.mouse
try:
end = time.time() + dur
for line in demo:
if time.time() > end:
break
match = basere.search(line)
if match is None:
continue
rdict = match.groupdict()
stamp = rdict['s']
params = rdict['p']
rtype = rdict['t']
time.sleep(float(stamp)/1000)
if rtype == 'k':
codes = kre.findall(params)
#print "KBD:", codes
kbd.putScancodes(codes)
elif rtype == 'm':
mm = mre.search(params)
if mm is not None:
mdict = mm.groupdict()
if mdict['a'] == '1':
# absolute
#print "MA: ", mdict['x'], mdict['y'], mdict['z'], mdict['b']
mouse.putMouseEventAbsolute(int(mdict['x']), int(mdict['y']), int(mdict['z']), int(mdict['w']), int(mdict['b']))
else:
#print "MR: ", mdict['x'], mdict['y'], mdict['b']
mouse.putMouseEvent(int(mdict['x']), int(mdict['y']), int(mdict['z']), int(mdict['w']), int(mdict['b']))
# We need to catch all exceptions here, to close file
except KeyboardInterrupt:
ctx['interrupt'] = True
except:
traceback.print_exc()
pass
demo.close()
def takeScreenshotOld(_ctx, console, args):
from PIL import Image
display = console.display
if len(args) > 0:
f = args[0]
else:
f = "/tmp/screenshot.png"
if len(args) > 3:
screen = int(args[3])
else:
screen = 0
(fbw, fbh, _fbbpp, fbx, fby) = display.getScreenResolution(screen)
if len(args) > 1:
w = int(args[1])
else:
w = fbw
if len(args) > 2:
h = int(args[2])
else:
h = fbh
print "Saving screenshot (%d x %d) screen %d in %s..." % (w, h, screen, f)
data = display.takeScreenShotToArray(screen, w, h)
size = (w, h)
mode = "RGBA"
im = Image.frombuffer(mode, size, str(data), "raw", mode, 0, 1)
im.save(f, "PNG")
def takeScreenshot(_ctx, console, args):
display = console.display
if len(args) > 0:
f = args[0]
else:
f = "/tmp/screenshot.png"
if len(args) > 3:
screen = int(args[3])
else:
screen = 0
(fbw, fbh, _fbbpp, fbx, fby) = display.getScreenResolution(screen)
if len(args) > 1:
w = int(args[1])
else:
w = fbw
if len(args) > 2:
h = int(args[2])
else:
h = fbh
print "Saving screenshot (%d x %d) screen %d in %s..." % (w, h, screen, f)
data = display.takeScreenShotPNGToArray(screen, w, h)
pngfile = open(f, 'wb')
pngfile.write(data)
pngfile.close()
def teleport(ctx, _session, console, args):
if args[0].find(":") == -1:
print "Use host:port format for teleport target"
return
(host, port) = args[0].split(":")
if len(args) > 1:
passwd = args[1]
else:
passwd = ""
if len(args) > 2:
maxDowntime = int(args[2])
else:
maxDowntime = 250
port = int(port)
print "Teleporting to %s:%d..." % (host, port)
progress = console.teleport(host, port, passwd, maxDowntime)
if progressBar(ctx, progress, 100) and int(progress.resultCode) == 0:
print "Success!"
else:
reportError(ctx, progress)
def guestStats(ctx, console, args):
guest = console.guest
# we need to set up guest statistics
if len(args) > 0 :
update = args[0]
else:
update = 1
if guest.statisticsUpdateInterval != update:
guest.statisticsUpdateInterval = update
try:
time.sleep(float(update)+0.1)
except:
# to allow sleep interruption
pass
all_stats = ctx['const'].all_values('GuestStatisticType')
cpu = 0
for s in all_stats.keys():
try:
val = guest.getStatistic( cpu, all_stats[s])
print "%s: %d" % (s, val)
except:
# likely not implemented
pass
def plugCpu(_ctx, machine, _session, args):
cpu = int(args[0])
print "Adding CPU %d..." % (cpu)
machine.hotPlugCPU(cpu)
def unplugCpu(_ctx, machine, _session, args):
cpu = int(args[0])
print "Removing CPU %d..." % (cpu)
machine.hotUnplugCPU(cpu)
def mountIso(_ctx, machine, _session, args):
machine.mountMedium(args[0], args[1], args[2], args[3], args[4])
machine.saveSettings()
def cond(c, v1, v2):
if c:
return v1
else:
return v2
def printHostUsbDev(ctx, ud):
print " %s: %s (vendorId=%d productId=%d serial=%s) %s" % (ud.id, colored(ud.product, 'blue'), ud.vendorId, ud.productId, ud.serialNumber, asEnumElem(ctx, 'USBDeviceState', ud.state))
def printUsbDev(_ctx, ud):
print " %s: %s (vendorId=%d productId=%d serial=%s)" % (ud.id, colored(ud.product, 'blue'), ud.vendorId, ud.productId, ud.serialNumber)
def printSf(ctx, sf):
print " name=%s host=%s %s %s" % (sf.name, colPath(ctx, sf.hostPath), cond(sf.accessible, "accessible", "not accessible"), cond(sf.writable, "writable", "read-only"))
def ginfo(ctx, console, _args):
guest = console.guest
if guest.additionsRunLevel != ctx['const'].AdditionsRunLevelType_None:
print "Additions active, version %s" % (guest.additionsVersion)
print "Support seamless: %s" % (getFacilityStatus(ctx, guest, ctx['const'].AdditionsFacilityType_Seamless))
print "Support graphics: %s" % (getFacilityStatus(ctx, guest, ctx['const'].AdditionsFacilityType_Graphics))
print "Balloon size: %d" % (guest.memoryBalloonSize)
print "Statistic update interval: %d" % (guest.statisticsUpdateInterval)
else:
print "No additions"
usbs = ctx['global'].getArray(console, 'USBDevices')
print "Attached USB:"
for ud in usbs:
printUsbDev(ctx, ud)
rusbs = ctx['global'].getArray(console, 'remoteUSBDevices')
print "Remote USB:"
for ud in rusbs:
printHostUsbDev(ctx, ud)
print "Transient shared folders:"
sfs = rusbs = ctx['global'].getArray(console, 'sharedFolders')
for sf in sfs:
printSf(ctx, sf)
def cmdExistingVm(ctx, mach, cmd, args):
session = None
try:
vbox = ctx['vb']
session = ctx['global'].getSessionObject(vbox)
mach.lockMachine(session, ctx['global'].constants.LockType_Shared)
except Exception, e:
printErr(ctx, "Session to '%s' not open: %s" % (mach.name, str(e)))
if g_fVerbose:
traceback.print_exc()
return
if session.state != ctx['const'].SessionState_Locked:
print "Session to '%s' in wrong state: %s" % (mach.name, session.state)
session.unlockMachine()
return
# this could be an example how to handle local only (i.e. unavailable
# in Webservices) functionality
if ctx['remote'] and cmd == 'some_local_only_command':
print 'Trying to use local only functionality, ignored'
session.unlockMachine()
return
console = session.console
ops = {'pause': lambda: console.pause(),
'resume': lambda: console.resume(),
'powerdown': lambda: console.powerDown(),
'powerbutton': lambda: console.powerButton(),
'stats': lambda: perfStats(ctx, mach),
'guest': lambda: guestExec(ctx, mach, console, args),
'ginfo': lambda: ginfo(ctx, console, args),
'guestlambda': lambda: args[0](ctx, mach, console, args[1:]),
'save': lambda: progressBar(ctx, console.saveState()),
'screenshot': lambda: takeScreenshot(ctx, console, args),
'teleport': lambda: teleport(ctx, session, console, args),
'gueststats': lambda: guestStats(ctx, console, args),
'plugcpu': lambda: plugCpu(ctx, session.machine, session, args),
'unplugcpu': lambda: unplugCpu(ctx, session.machine, session, args),
'mountiso': lambda: mountIso(ctx, session.machine, session, args),
}
try:
ops[cmd]()
except KeyboardInterrupt:
ctx['interrupt'] = True
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
session.unlockMachine()
def cmdClosedVm(ctx, mach, cmd, args=[], save=True):
session = ctx['global'].openMachineSession(mach, True)
mach = session.machine
try:
cmd(ctx, mach, args)
except Exception, e:
save = False
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
if save:
try:
mach.saveSettings()
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['global'].closeMachineSession(session)
def cmdAnyVm(ctx, mach, cmd, args=[], save=False):
session = ctx['global'].openMachineSession(mach)
mach = session.machine
try:
cmd(ctx, mach, session.console, args)
except Exception, e:
save = False
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
if save:
mach.saveSettings()
ctx['global'].closeMachineSession(session)
def machById(ctx, uuid):
try:
mach = ctx['vb'].getMachine(uuid)
except:
mach = ctx['vb'].findMachine(uuid)
return mach
class XPathNode:
def __init__(self, parent, obj, ntype):
self.parent = parent
self.obj = obj
self.ntype = ntype
def lookup(self, subpath):
children = self.enum()
matches = []
for e in children:
if e.matches(subpath):
matches.append(e)
return matches
def enum(self):
return []
def matches(self, subexp):
if subexp == self.ntype:
return True
if not subexp.startswith(self.ntype):
return False
match = re.search(r"@(?P<a>\w+)=(?P<v>[^\'\[\]]+)", subexp)
matches = False
try:
if match is not None:
xdict = match.groupdict()
attr = xdict['a']
val = xdict['v']
matches = (str(getattr(self.obj, attr)) == val)
except:
pass
return matches
def apply(self, cmd):
exec(cmd, {'obj':self.obj, 'node':self, 'ctx':self.getCtx()}, {})
def getCtx(self):
if hasattr(self, 'ctx'):
return self.ctx
return self.parent.getCtx()
class XPathNodeHolder(XPathNode):
def __init__(self, parent, obj, attr, heldClass, xpathname):
XPathNode.__init__(self, parent, obj, 'hld '+xpathname)
self.attr = attr
self.heldClass = heldClass
self.xpathname = xpathname
def enum(self):
children = []
for node in self.getCtx()['global'].getArray(self.obj, self.attr):
nodexml = self.heldClass(self, node)
children.append(nodexml)
return children
def matches(self, subexp):
return subexp == self.xpathname
class XPathNodeValue(XPathNode):
def __init__(self, parent, obj, xpathname):
XPathNode.__init__(self, parent, obj, 'val '+xpathname)
self.xpathname = xpathname
def matches(self, subexp):
return subexp == self.xpathname
class XPathNodeHolderVM(XPathNodeHolder):
def __init__(self, parent, vbox):
XPathNodeHolder.__init__(self, parent, vbox, 'machines', XPathNodeVM, 'vms')
class XPathNodeVM(XPathNode):
def __init__(self, parent, obj):
XPathNode.__init__(self, parent, obj, 'vm')
#def matches(self, subexp):
# return subexp=='vm'
def enum(self):
return [XPathNodeHolderNIC(self, self.obj),
XPathNodeValue(self, self.obj.BIOSSettings, 'bios'), ]
class XPathNodeHolderNIC(XPathNodeHolder):
def __init__(self, parent, mach):
XPathNodeHolder.__init__(self, parent, mach, 'nics', XPathNodeVM, 'nics')
self.maxNic = self.getCtx()['vb'].systemProperties.getMaxNetworkAdapters(self.obj.chipsetType)
def enum(self):
children = []
for i in range(0, self.maxNic):
node = XPathNodeNIC(self, self.obj.getNetworkAdapter(i))
children.append(node)
return children
class XPathNodeNIC(XPathNode):
def __init__(self, parent, obj):
XPathNode.__init__(self, parent, obj, 'nic')
def matches(self, subexp):
return subexp == 'nic'
class XPathNodeRoot(XPathNode):
def __init__(self, ctx):
XPathNode.__init__(self, None, None, 'root')
self.ctx = ctx
def enum(self):
return [XPathNodeHolderVM(self, self.ctx['vb'])]
def matches(self, subexp):
return True
def eval_xpath(ctx, scope):
pathnames = scope.split("/")[2:]
nodes = [XPathNodeRoot(ctx)]
for path in pathnames:
seen = []
while len(nodes) > 0:
node = nodes.pop()
seen.append(node)
for s in seen:
matches = s.lookup(path)
for match in matches:
nodes.append(match)
if len(nodes) == 0:
break
return nodes
def argsToMach(ctx, args):
if len(args) < 2:
print "usage: %s [vmname|uuid]" % (args[0])
return None
uuid = args[1]
mach = machById(ctx, uuid)
if mach == None:
print "Machine '%s' is unknown, use list command to find available machines" % (uuid)
return mach
def helpSingleCmd(cmd, h, sp):
if sp != 0:
spec = " [ext from "+sp+"]"
else:
spec = ""
print " %s: %s%s" % (colored(cmd, 'blue'), h, spec)
def helpCmd(_ctx, args):
if len(args) == 1:
print "Help page:"
names = commands.keys()
names.sort()
for i in names:
helpSingleCmd(i, commands[i][0], commands[i][2])
else:
cmd = args[1]
c = commands.get(cmd)
if c == None:
print "Command '%s' not known" % (cmd)
else:
helpSingleCmd(cmd, c[0], c[2])
return 0
def asEnumElem(ctx, enum, elem):
enumVals = ctx['const'].all_values(enum)
for e in enumVals.keys():
if str(elem) == str(enumVals[e]):
return colored(e, 'green')
return colored("<unknown>", 'green')
def enumFromString(ctx, enum, strg):
enumVals = ctx['const'].all_values(enum)
return enumVals.get(strg, None)
def listCmd(ctx, _args):
for mach in getMachines(ctx, True):
try:
if mach.teleporterEnabled:
tele = "[T] "
else:
tele = " "
print "%sMachine '%s' [%s], machineState=%s, sessionState=%s" % (tele, colVm(ctx, mach.name), mach.id, asEnumElem(ctx, "MachineState", mach.state), asEnumElem(ctx, "SessionState", mach.sessionState))
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def infoCmd(ctx, args):
if (len(args) < 2):
print "usage: info [vmname|uuid]"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
vmos = ctx['vb'].getGuestOSType(mach.OSTypeId)
print " One can use setvar <mach> <var> <value> to change variable, using name in []."
print " Name [name]: %s" % (colVm(ctx, mach.name))
print " Description [description]: %s" % (mach.description)
print " ID [n/a]: %s" % (mach.id)
print " OS Type [via OSTypeId]: %s" % (vmos.description)
print " Firmware [firmwareType]: %s (%s)" % (asEnumElem(ctx, "FirmwareType", mach.firmwareType), mach.firmwareType)
print
print " CPUs [CPUCount]: %d" % (mach.CPUCount)
print " RAM [memorySize]: %dM" % (mach.memorySize)
print " VRAM [VRAMSize]: %dM" % (mach.VRAMSize)
print " Monitors [monitorCount]: %d" % (mach.monitorCount)
print " Chipset [chipsetType]: %s (%s)" % (asEnumElem(ctx, "ChipsetType", mach.chipsetType), mach.chipsetType)
print
print " Clipboard mode [clipboardMode]: %s (%s)" % (asEnumElem(ctx, "ClipboardMode", mach.clipboardMode), mach.clipboardMode)
print " Machine status [n/a]: %s (%s)" % (asEnumElem(ctx, "SessionState", mach.sessionState), mach.sessionState)
print
if mach.teleporterEnabled:
print " Teleport target on port %d (%s)" % (mach.teleporterPort, mach.teleporterPassword)
print
bios = mach.BIOSSettings
print " ACPI [BIOSSettings.ACPIEnabled]: %s" % (asState(bios.ACPIEnabled))
print " APIC [BIOSSettings.IOAPICEnabled]: %s" % (asState(bios.IOAPICEnabled))
hwVirtEnabled = mach.getHWVirtExProperty(ctx['global'].constants.HWVirtExPropertyType_Enabled)
print " Hardware virtualization [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_Enabled, value)]: " + asState(hwVirtEnabled)
hwVirtVPID = mach.getHWVirtExProperty(ctx['const'].HWVirtExPropertyType_VPID)
print " VPID support [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_VPID, value)]: " + asState(hwVirtVPID)
hwVirtNestedPaging = mach.getHWVirtExProperty(ctx['const'].HWVirtExPropertyType_NestedPaging)
print " Nested paging [guest win machine.setHWVirtExProperty(ctx[\\'const\\'].HWVirtExPropertyType_NestedPaging, value)]: " + asState(hwVirtNestedPaging)
print " Hardware 3d acceleration [accelerate3DEnabled]: " + asState(mach.accelerate3DEnabled)
print " Hardware 2d video acceleration [accelerate2DVideoEnabled]: " + asState(mach.accelerate2DVideoEnabled)
print " Use universal time [RTCUseUTC]: %s" % (asState(mach.RTCUseUTC))
print " HPET [HPETEnabled]: %s" % (asState(mach.HPETEnabled))
if mach.audioAdapter.enabled:
print " Audio [via audioAdapter]: chip %s; host driver %s" % (asEnumElem(ctx, "AudioControllerType", mach.audioAdapter.audioController), asEnumElem(ctx, "AudioDriverType", mach.audioAdapter.audioDriver))
print " CPU hotplugging [CPUHotPlugEnabled]: %s" % (asState(mach.CPUHotPlugEnabled))
print " Keyboard [keyboardHIDType]: %s (%s)" % (asEnumElem(ctx, "KeyboardHIDType", mach.keyboardHIDType), mach.keyboardHIDType)
print " Pointing device [pointingHIDType]: %s (%s)" % (asEnumElem(ctx, "PointingHIDType", mach.pointingHIDType), mach.pointingHIDType)
print " Last changed [n/a]: " + time.asctime(time.localtime(long(mach.lastStateChange)/1000))
# OSE has no VRDE
try:
print " VRDE server [VRDEServer.enabled]: %s" % (asState(mach.VRDEServer.enabled))
except:
pass
print
print colCat(ctx, " USB Controllers:")
for oUsbCtrl in ctx['global'].getArray(mach, 'USBControllers'):
print " '%s': type %s standard: %#x" \
% (oUsbCtrl.name, asEnumElem(ctx, "USBControllerType", oUsbCtrl.type), oUsbCtrl.USBStandard);
print
print colCat(ctx, " I/O subsystem info:")
print " Cache enabled [IOCacheEnabled]: %s" % (asState(mach.IOCacheEnabled))
print " Cache size [IOCacheSize]: %dM" % (mach.IOCacheSize)
controllers = ctx['global'].getArray(mach, 'storageControllers')
if controllers:
print
print colCat(ctx, " Storage Controllers:")
for controller in controllers:
print " '%s': bus %s type %s" % (controller.name, asEnumElem(ctx, "StorageBus", controller.bus), asEnumElem(ctx, "StorageControllerType", controller.controllerType))
attaches = ctx['global'].getArray(mach, 'mediumAttachments')
if attaches:
print
print colCat(ctx, " Media:")
for a in attaches:
print " Controller: '%s' port/device: %d:%d type: %s (%s):" % (a.controller, a.port, a.device, asEnumElem(ctx, "DeviceType", a.type), a.type)
medium = a.medium
if a.type == ctx['global'].constants.DeviceType_HardDisk:
print " HDD:"
print " Id: %s" % (medium.id)
print " Location: %s" % (colPath(ctx, medium.location))
print " Name: %s" % (medium.name)
print " Format: %s" % (medium.format)
if a.type == ctx['global'].constants.DeviceType_DVD:
print " DVD:"
if medium:
print " Id: %s" % (medium.id)
print " Name: %s" % (medium.name)
if medium.hostDrive:
print " Host DVD %s" % (colPath(ctx, medium.location))
if a.passthrough:
print " [passthrough mode]"
else:
print " Virtual image at %s" % (colPath(ctx, medium.location))
print " Size: %s" % (medium.size)
if a.type == ctx['global'].constants.DeviceType_Floppy:
print " Floppy:"
if medium:
print " Id: %s" % (medium.id)
print " Name: %s" % (medium.name)
if medium.hostDrive:
print " Host floppy %s" % (colPath(ctx, medium.location))
else:
print " Virtual image at %s" % (colPath(ctx, medium.location))
print " Size: %s" % (medium.size)
print
print colCat(ctx, " Shared folders:")
for sf in ctx['global'].getArray(mach, 'sharedFolders'):
printSf(ctx, sf)
return 0
def startCmd(ctx, args):
if len(args) < 2:
print "usage: start name <frontend>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if len(args) > 2:
vmtype = args[2]
else:
vmtype = "gui"
startVm(ctx, mach, vmtype)
return 0
def createVmCmd(ctx, args):
if (len(args) != 3):
print "usage: createvm name ostype"
return 0
name = args[1]
oskind = args[2]
try:
ctx['vb'].getGuestOSType(oskind)
except Exception:
print 'Unknown OS type:', oskind
return 0
createVm(ctx, name, oskind)
return 0
def ginfoCmd(ctx, args):
if (len(args) < 2):
print "usage: ginfo [vmname|uuid]"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'ginfo', '')
return 0
def execInGuest(ctx, console, args, env, user, passwd, tmo, inputPipe=None, outputPipe=None):
if len(args) < 1:
print "exec in guest needs at least program name"
return
guest = console.guest
guestSession = guest.createSession(user, passwd, "", "vboxshell guest exec")
# shall contain program name as argv[0]
gargs = args
print "executing %s with args %s as %s" % (args[0], gargs, user)
flags = 0
if inputPipe is not None:
flags = 1 # set WaitForProcessStartOnly
print args[0]
process = guestSession.processCreate(args[0], gargs, env, [], tmo)
print "executed with pid %d" % (process.PID)
if pid != 0:
try:
while True:
if inputPipe is not None:
indata = inputPipe(ctx)
if indata is not None:
write = len(indata)
off = 0
while write > 0:
w = guest.setProcessInput(pid, 0, 10*1000, indata[off:])
off = off + w
write = write - w
else:
# EOF
try:
guest.setProcessInput(pid, 1, 10*1000, " ")
except:
pass
data = guest.getProcessOutput(pid, 0, 10000, 4096)
if data and len(data) > 0:
sys.stdout.write(data)
continue
progress.waitForCompletion(100)
ctx['global'].waitForEvents(0)
data = guest.getProcessOutput(pid, 0, 0, 4096)
if data and len(data) > 0:
if outputPipe is not None:
outputPipe(ctx, data)
else:
sys.stdout.write(data)
continue
if progress.completed:
break
except KeyboardInterrupt:
print "Interrupted."
ctx['interrupt'] = True
if progress.cancelable:
progress.cancel()
(_reason, code, _flags) = guest.getProcessStatus(pid)
print "Exit code: %d" % (code)
return 0
else:
reportError(ctx, progress)
def copyToGuest(ctx, console, args, user, passwd):
src = args[0]
dst = args[1]
flags = 0
print "Copying host %s to guest %s" % (src, dst)
progress = console.guest.copyToGuest(src, dst, user, passwd, flags)
progressBar(ctx, progress)
def nh_raw_input(prompt=""):
stream = sys.stdout
prompt = str(prompt)
if prompt:
stream.write(prompt)
line = sys.stdin.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getCred(_ctx):
import getpass
user = getpass.getuser()
user_inp = nh_raw_input("User (%s): " % (user))
if len (user_inp) > 0:
user = user_inp
passwd = getpass.getpass()
return (user, passwd)
def gexecCmd(ctx, args):
if (len(args) < 2):
print "usage: gexec [vmname|uuid] command args"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
gargs = args[2:]
env = [] # ["DISPLAY=:0"]
(user, passwd) = getCred(ctx)
gargs.insert(0, lambda ctx, mach, console, args: execInGuest(ctx, console, args, env, user, passwd, 10000))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def gcopyCmd(ctx, args):
if (len(args) < 2):
print "usage: gcopy [vmname|uuid] host_path guest_path"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
gargs = args[2:]
(user, passwd) = getCred(ctx)
gargs.insert(0, lambda ctx, mach, console, args: copyToGuest(ctx, console, args, user, passwd))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def readCmdPipe(ctx, _hcmd):
try:
return ctx['process'].communicate()[0]
except:
return None
def gpipeCmd(ctx, args):
if (len(args) < 4):
print "usage: gpipe [vmname|uuid] hostProgram guestProgram, such as gpipe linux '/bin/uname -a' '/bin/sh -c \"/usr/bin/tee; /bin/uname -a\"'"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hcmd = args[2]
gcmd = args[3]
(user, passwd) = getCred(ctx)
import subprocess
ctx['process'] = subprocess.Popen(split_no_quotes(hcmd), stdout=subprocess.PIPE)
gargs = split_no_quotes(gcmd)
env = []
gargs.insert(0, lambda ctx, mach, console, args: execInGuest(ctx, console, args, env, user, passwd, 10000, lambda ctx:readCmdPipe(ctx, hcmd)))
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
try:
ctx['process'].terminate()
except:
pass
ctx['process'] = None
return 0
def removeVmCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
removeVm(ctx, mach)
return 0
def pauseCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'pause', '')
return 0
def powerdownCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'powerdown', '')
return 0
def powerbuttonCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'powerbutton', '')
return 0
def resumeCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'resume', '')
return 0
def saveCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'save', '')
return 0
def statsCmd(ctx, args):
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'stats', '')
return 0
def guestCmd(ctx, args):
if (len(args) < 3):
print "usage: guest name commands"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if mach.state != ctx['const'].MachineState_Running:
cmdClosedVm(ctx, mach, lambda ctx, mach, a: guestExec (ctx, mach, None, ' '.join(args[2:])))
else:
cmdExistingVm(ctx, mach, 'guest', ' '.join(args[2:]))
return 0
def screenshotCmd(ctx, args):
if (len(args) < 2):
print "usage: screenshot vm <file> <width> <height> <monitor>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'screenshot', args[2:])
return 0
def teleportCmd(ctx, args):
if (len(args) < 3):
print "usage: teleport name host:port <password>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'teleport', args[2:])
return 0
def portalsettings(_ctx, mach, args):
enabled = args[0]
mach.teleporterEnabled = enabled
if enabled:
port = args[1]
passwd = args[2]
mach.teleporterPort = port
mach.teleporterPassword = passwd
def openportalCmd(ctx, args):
if (len(args) < 3):
print "usage: openportal name port <password>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
port = int(args[2])
if (len(args) > 3):
passwd = args[3]
else:
passwd = ""
if not mach.teleporterEnabled or mach.teleporterPort != port or passwd:
cmdClosedVm(ctx, mach, portalsettings, [True, port, passwd])
startVm(ctx, mach, "gui")
return 0
def closeportalCmd(ctx, args):
if (len(args) < 2):
print "usage: closeportal name"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if mach.teleporterEnabled:
cmdClosedVm(ctx, mach, portalsettings, [False])
return 0
def gueststatsCmd(ctx, args):
if (len(args) < 2):
print "usage: gueststats name <check interval>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'gueststats', args[2:])
return 0
def plugcpu(_ctx, mach, args):
plug = args[0]
cpu = args[1]
if plug:
print "Adding CPU %d..." % (cpu)
mach.hotPlugCPU(cpu)
else:
print "Removing CPU %d..." % (cpu)
mach.hotUnplugCPU(cpu)
def plugcpuCmd(ctx, args):
if (len(args) < 2):
print "usage: plugcpu name cpuid"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if str(mach.sessionState) != str(ctx['const'].SessionState_Locked):
if mach.CPUHotPlugEnabled:
cmdClosedVm(ctx, mach, plugcpu, [True, int(args[2])])
else:
cmdExistingVm(ctx, mach, 'plugcpu', args[2])
return 0
def unplugcpuCmd(ctx, args):
if (len(args) < 2):
print "usage: unplugcpu name cpuid"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
if str(mach.sessionState) != str(ctx['const'].SessionState_Locked):
if mach.CPUHotPlugEnabled:
cmdClosedVm(ctx, mach, plugcpu, [False, int(args[2])])
else:
cmdExistingVm(ctx, mach, 'unplugcpu', args[2])
return 0
def setvar(_ctx, _mach, args):
expr = 'mach.'+args[0]+' = '+args[1]
print "Executing", expr
exec expr
def setvarCmd(ctx, args):
if (len(args) < 4):
print "usage: setvar [vmname|uuid] expr value"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdClosedVm(ctx, mach, setvar, args[2:])
return 0
def setvmextra(_ctx, mach, args):
key = args[0]
value = args[1]
print "%s: setting %s to %s" % (mach.name, key, value)
mach.setExtraData(key, value)
def setExtraDataCmd(ctx, args):
if (len(args) < 3):
print "usage: setextra [vmname|uuid|global] key <value>"
return 0
key = args[2]
if len(args) == 4:
value = args[3]
else:
value = None
if args[1] == 'global':
ctx['vb'].setExtraData(key, value)
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdClosedVm(ctx, mach, setvmextra, [key, value])
return 0
def printExtraKey(obj, key, value):
print "%s: '%s' = '%s'" % (obj, key, value)
def getExtraDataCmd(ctx, args):
if (len(args) < 2):
print "usage: getextra [vmname|uuid|global] <key>"
return 0
if len(args) == 3:
key = args[2]
else:
key = None
if args[1] == 'global':
obj = ctx['vb']
else:
obj = argsToMach(ctx, args)
if obj == None:
return 0
if key == None:
keys = obj.getExtraDataKeys()
else:
keys = [ key ]
for k in keys:
printExtraKey(args[1], k, obj.getExtraData(k))
return 0
def quitCmd(_ctx, _args):
return 1
def aliasCmd(ctx, args):
if (len(args) == 3):
aliases[args[1]] = args[2]
return 0
for (key, value) in aliases.items():
print "'%s' is an alias for '%s'" % (key, value)
return 0
def verboseCmd(ctx, args):
global g_fVerbose
if (len(args) > 1):
g_fVerbose = (args[1]=='on')
else:
g_fVerbose = not g_fVerbose
return 0
def colorsCmd(ctx, args):
global g_fHasColors
if (len(args) > 1):
g_fHasColors = (args[1] == 'on')
else:
g_fHasColors = not g_fHasColors
return 0
def hostCmd(ctx, args):
vbox = ctx['vb']
try:
print "VirtualBox version %s" % (colored(vbox.version, 'blue'))
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
props = vbox.systemProperties
print "Machines: %s" % (colPath(ctx, props.defaultMachineFolder))
#print "Global shared folders:"
#for ud in ctx['global'].getArray(vbox, 'sharedFolders'):
# printSf(ctx, sf)
host = vbox.host
cnt = host.processorCount
print colCat(ctx, "Processors:")
print " available/online: %d/%d " % (cnt, host.processorOnlineCount)
for i in range(0, cnt):
print " processor #%d speed: %dMHz %s" % (i, host.getProcessorSpeed(i), host.getProcessorDescription(i))
print colCat(ctx, "RAM:")
print " %dM (free %dM)" % (host.memorySize, host.memoryAvailable)
print colCat(ctx, "OS:")
print " %s (%s)" % (host.operatingSystem, host.OSVersion)
if host.acceleration3DAvailable:
print colCat(ctx, "3D acceleration available")
else:
print colCat(ctx, "3D acceleration NOT available")
print colCat(ctx, "Network interfaces:")
for ni in ctx['global'].getArray(host, 'networkInterfaces'):
print " %s (%s)" % (ni.name, ni.IPAddress)
print colCat(ctx, "DVD drives:")
for dd in ctx['global'].getArray(host, 'DVDDrives'):
print " %s - %s" % (dd.name, dd.description)
print colCat(ctx, "Floppy drives:")
for dd in ctx['global'].getArray(host, 'floppyDrives'):
print " %s - %s" % (dd.name, dd.description)
print colCat(ctx, "USB devices:")
for ud in ctx['global'].getArray(host, 'USBDevices'):
printHostUsbDev(ctx, ud)
if ctx['perf']:
for metric in ctx['perf'].query(["*"], [host]):
print metric['name'], metric['values_as_string']
return 0
def monitorGuestCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuest name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.eventSource, active, dur)])
return 0
def monitorGuestKbdCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestKbd name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.keyboard.eventSource, active, dur)])
return 0
def monitorGuestMouseCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestMouse name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.mouse.eventSource, active, dur)])
return 0
def monitorGuestMultiTouchCmd(ctx, args):
if (len(args) < 2):
print "usage: monitorGuestMultiTouch name (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
dur = 5
if len(args) > 2:
dur = float(args[2])
active = False
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: monitorSource(ctx, console.mouse.eventSource, active, dur)])
return 0
def monitorVBoxCmd(ctx, args):
if (len(args) > 2):
print "usage: monitorVBox (duration)"
return 0
dur = 5
if len(args) > 1:
dur = float(args[1])
vbox = ctx['vb']
active = False
monitorSource(ctx, vbox.eventSource, active, dur)
return 0
def getAdapterType(ctx, natype):
if (natype == ctx['global'].constants.NetworkAdapterType_Am79C970A or
natype == ctx['global'].constants.NetworkAdapterType_Am79C973):
return "pcnet"
elif (natype == ctx['global'].constants.NetworkAdapterType_I82540EM or
natype == ctx['global'].constants.NetworkAdapterType_I82545EM or
natype == ctx['global'].constants.NetworkAdapterType_I82543GC):
return "e1000"
elif (natype == ctx['global'].constants.NetworkAdapterType_Virtio):
return "virtio"
elif (natype == ctx['global'].constants.NetworkAdapterType_Null):
return None
else:
raise Exception("Unknown adapter type: "+natype)
def portForwardCmd(ctx, args):
if (len(args) != 5):
print "usage: portForward <vm> <adapter> <hostPort> <guestPort>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
adapterNum = int(args[2])
hostPort = int(args[3])
guestPort = int(args[4])
proto = "TCP"
session = ctx['global'].openMachineSession(mach)
mach = session.machine
adapter = mach.getNetworkAdapter(adapterNum)
adapterType = getAdapterType(ctx, adapter.adapterType)
profile_name = proto+"_"+str(hostPort)+"_"+str(guestPort)
config = "VBoxInternal/Devices/" + adapterType + "/"
config = config + str(adapter.slot) +"/LUN#0/Config/" + profile_name
mach.setExtraData(config + "/Protocol", proto)
mach.setExtraData(config + "/HostPort", str(hostPort))
mach.setExtraData(config + "/GuestPort", str(guestPort))
mach.saveSettings()
session.unlockMachine()
return 0
def showLogCmd(ctx, args):
if (len(args) < 2):
print "usage: showLog vm <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 2):
log = args[2]
uOffset = 0
while True:
data = mach.readLog(log, uOffset, 4096)
if (len(data) == 0):
break
# print adds either NL or space to chunks not ending with a NL
sys.stdout.write(str(data))
uOffset += len(data)
return 0
def findLogCmd(ctx, args):
if (len(args) < 3):
print "usage: findLog vm pattern <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 3):
log = args[3]
pattern = args[2]
uOffset = 0
while True:
# to reduce line splits on buffer boundary
data = mach.readLog(log, uOffset, 512*1024)
if (len(data) == 0):
break
d = str(data).split("\n")
for s in d:
match = re.findall(pattern, s)
if len(match) > 0:
for mt in match:
s = s.replace(mt, colored(mt, 'red'))
print s
uOffset += len(data)
return 0
def findAssertCmd(ctx, args):
if (len(args) < 2):
print "usage: findAssert vm <num>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
log = 0
if (len(args) > 2):
log = args[2]
uOffset = 0
ere = re.compile(r'(Expression:|\!\!\!\!\!\!)')
active = False
context = 0
while True:
# to reduce line splits on buffer boundary
data = mach.readLog(log, uOffset, 512*1024)
if (len(data) == 0):
break
d = str(data).split("\n")
for s in d:
if active:
print s
if context == 0:
active = False
else:
context = context - 1
continue
match = ere.findall(s)
if len(match) > 0:
active = True
context = 50
print s
uOffset += len(data)
return 0
def evalCmd(ctx, args):
expr = ' '.join(args[1:])
try:
exec expr
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def reloadExtCmd(ctx, args):
# maybe will want more args smartness
checkUserExtensions(ctx, commands, getHomeFolder(ctx))
autoCompletion(commands, ctx)
return 0
def runScriptCmd(ctx, args):
if (len(args) != 2):
print "usage: runScript <script>"
return 0
try:
lf = open(args[1], 'r')
except IOError, e:
print "cannot open:", args[1], ":", e
return 0
try:
lines = lf.readlines()
ctx['scriptLine'] = 0
ctx['interrupt'] = False
while ctx['scriptLine'] < len(lines):
line = lines[ctx['scriptLine']]
ctx['scriptLine'] = ctx['scriptLine'] + 1
done = runCommand(ctx, line)
if done != 0 or ctx['interrupt']:
break
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
lf.close()
return 0
def sleepCmd(ctx, args):
if (len(args) != 2):
print "usage: sleep <secs>"
return 0
try:
time.sleep(float(args[1]))
except:
# to allow sleep interrupt
pass
return 0
def shellCmd(ctx, args):
if (len(args) < 2):
print "usage: shell <commands>"
return 0
cmd = ' '.join(args[1:])
try:
os.system(cmd)
except KeyboardInterrupt:
# to allow shell command interruption
pass
return 0
def connectCmd(ctx, args):
if (len(args) > 4):
print "usage: connect url <username> <passwd>"
return 0
if ctx['vb'] is not None:
print "Already connected, disconnect first..."
return 0
if (len(args) > 1):
url = args[1]
else:
url = None
if (len(args) > 2):
user = args[2]
else:
user = ""
if (len(args) > 3):
passwd = args[3]
else:
passwd = ""
ctx['wsinfo'] = [url, user, passwd]
vbox = ctx['global'].platform.connect(url, user, passwd)
ctx['vb'] = vbox
try:
print "Running VirtualBox version %s" % (vbox.version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['perf'] = ctx['global'].getPerfCollector(ctx['vb'])
return 0
def disconnectCmd(ctx, args):
if (len(args) != 1):
print "usage: disconnect"
return 0
if ctx['vb'] is None:
print "Not connected yet."
return 0
try:
ctx['global'].platform.disconnect()
except:
ctx['vb'] = None
raise
ctx['vb'] = None
return 0
def reconnectCmd(ctx, args):
if ctx['wsinfo'] is None:
print "Never connected..."
return 0
try:
ctx['global'].platform.disconnect()
except:
pass
[url, user, passwd] = ctx['wsinfo']
ctx['vb'] = ctx['global'].platform.connect(url, user, passwd)
try:
print "Running VirtualBox version %s" % (ctx['vb'].version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
return 0
def exportVMCmd(ctx, args):
if len(args) < 3:
print "usage: exportVm <machine> <path> <format> <license>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
path = args[2]
if (len(args) > 3):
fmt = args[3]
else:
fmt = "ovf-1.0"
if (len(args) > 4):
lic = args[4]
else:
lic = "GPL"
app = ctx['vb'].createAppliance()
desc = mach.export(app)
desc.addDescription(ctx['global'].constants.VirtualSystemDescriptionType_License, lic, "")
progress = app.write(fmt, path)
if (progressBar(ctx, progress) and int(progress.resultCode) == 0):
print "Exported to %s in format %s" % (path, fmt)
else:
reportError(ctx, progress)
return 0
# PC XT scancodes
scancodes = {
'a': 0x1e,
'b': 0x30,
'c': 0x2e,
'd': 0x20,
'e': 0x12,
'f': 0x21,
'g': 0x22,
'h': 0x23,
'i': 0x17,
'j': 0x24,
'k': 0x25,
'l': 0x26,
'm': 0x32,
'n': 0x31,
'o': 0x18,
'p': 0x19,
'q': 0x10,
'r': 0x13,
's': 0x1f,
't': 0x14,
'u': 0x16,
'v': 0x2f,
'w': 0x11,
'x': 0x2d,
'y': 0x15,
'z': 0x2c,
'0': 0x0b,
'1': 0x02,
'2': 0x03,
'3': 0x04,
'4': 0x05,
'5': 0x06,
'6': 0x07,
'7': 0x08,
'8': 0x09,
'9': 0x0a,
' ': 0x39,
'-': 0xc,
'=': 0xd,
'[': 0x1a,
']': 0x1b,
';': 0x27,
'\'': 0x28,
',': 0x33,
'.': 0x34,
'/': 0x35,
'\t': 0xf,
'\n': 0x1c,
'`': 0x29
}
extScancodes = {
'ESC' : [0x01],
'BKSP': [0xe],
'SPACE': [0x39],
'TAB': [0x0f],
'CAPS': [0x3a],
'ENTER': [0x1c],
'LSHIFT': [0x2a],
'RSHIFT': [0x36],
'INS': [0xe0, 0x52],
'DEL': [0xe0, 0x53],
'END': [0xe0, 0x4f],
'HOME': [0xe0, 0x47],
'PGUP': [0xe0, 0x49],
'PGDOWN': [0xe0, 0x51],
'LGUI': [0xe0, 0x5b], # GUI, aka Win, aka Apple key
'RGUI': [0xe0, 0x5c],
'LCTR': [0x1d],
'RCTR': [0xe0, 0x1d],
'LALT': [0x38],
'RALT': [0xe0, 0x38],
'APPS': [0xe0, 0x5d],
'F1': [0x3b],
'F2': [0x3c],
'F3': [0x3d],
'F4': [0x3e],
'F5': [0x3f],
'F6': [0x40],
'F7': [0x41],
'F8': [0x42],
'F9': [0x43],
'F10': [0x44 ],
'F11': [0x57],
'F12': [0x58],
'UP': [0xe0, 0x48],
'LEFT': [0xe0, 0x4b],
'DOWN': [0xe0, 0x50],
'RIGHT': [0xe0, 0x4d],
}
def keyDown(ch):
code = scancodes.get(ch, 0x0)
if code != 0:
return [code]
extCode = extScancodes.get(ch, [])
if len(extCode) == 0:
print "bad ext", ch
return extCode
def keyUp(ch):
codes = keyDown(ch)[:] # make a copy
if len(codes) > 0:
codes[len(codes)-1] += 0x80
return codes
def typeInGuest(console, text, delay):
pressed = []
group = False
modGroupEnd = True
i = 0
kbd = console.keyboard
while i < len(text):
ch = text[i]
i = i+1
if ch == '{':
# start group, all keys to be pressed at the same time
group = True
continue
if ch == '}':
# end group, release all keys
for c in pressed:
kbd.putScancodes(keyUp(c))
pressed = []
group = False
continue
if ch == 'W':
# just wait a bit
time.sleep(0.3)
continue
if ch == '^' or ch == '|' or ch == '$' or ch == '_':
if ch == '^':
ch = 'LCTR'
if ch == '|':
ch = 'LSHIFT'
if ch == '_':
ch = 'LALT'
if ch == '$':
ch = 'LGUI'
if not group:
modGroupEnd = False
else:
if ch == '\\':
if i < len(text):
ch = text[i]
i = i+1
if ch == 'n':
ch = '\n'
elif ch == '&':
combo = ""
while i < len(text):
ch = text[i]
i = i+1
if ch == ';':
break
combo += ch
ch = combo
modGroupEnd = True
kbd.putScancodes(keyDown(ch))
pressed.insert(0, ch)
if not group and modGroupEnd:
for c in pressed:
kbd.putScancodes(keyUp(c))
pressed = []
modGroupEnd = True
time.sleep(delay)
def typeGuestCmd(ctx, args):
if len(args) < 3:
print "usage: typeGuest <machine> <text> <charDelay>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
text = args[2]
if len(args) > 3:
delay = float(args[3])
else:
delay = 0.1
gargs = [lambda ctx, mach, console, args: typeInGuest(console, text, delay)]
cmdExistingVm(ctx, mach, 'guestlambda', gargs)
return 0
def optId(verbose, uuid):
if verbose:
return ": "+uuid
else:
return ""
def asSize(val, inBytes):
if inBytes:
return int(val)/(1024*1024)
else:
return int(val)
def listMediaCmd(ctx, args):
if len(args) > 1:
verbose = int(args[1])
else:
verbose = False
hdds = ctx['global'].getArray(ctx['vb'], 'hardDisks')
print colCat(ctx, "Hard disks:")
for hdd in hdds:
if hdd.state != ctx['global'].constants.MediumState_Created:
hdd.refreshState()
print " %s (%s)%s %s [logical %s]" % (colPath(ctx, hdd.location), hdd.format, optId(verbose, hdd.id), colSizeM(ctx, asSize(hdd.size, True)), colSizeM(ctx, asSize(hdd.logicalSize, True)))
dvds = ctx['global'].getArray(ctx['vb'], 'DVDImages')
print colCat(ctx, "CD/DVD disks:")
for dvd in dvds:
if dvd.state != ctx['global'].constants.MediumState_Created:
dvd.refreshState()
print " %s (%s)%s %s" % (colPath(ctx, dvd.location), dvd.format, optId(verbose, dvd.id), colSizeM(ctx, asSize(dvd.size, True)))
floppys = ctx['global'].getArray(ctx['vb'], 'floppyImages')
print colCat(ctx, "Floppy disks:")
for floppy in floppys:
if floppy.state != ctx['global'].constants.MediumState_Created:
floppy.refreshState()
print " %s (%s)%s %s" % (colPath(ctx, floppy.location), floppy.format, optId(verbose, floppy.id), colSizeM(ctx, asSize(floppy.size, True)))
return 0
def listUsbCmd(ctx, args):
if (len(args) > 1):
print "usage: listUsb"
return 0
host = ctx['vb'].host
for ud in ctx['global'].getArray(host, 'USBDevices'):
printHostUsbDev(ctx, ud)
return 0
def findDevOfType(ctx, mach, devtype):
atts = ctx['global'].getArray(mach, 'mediumAttachments')
for a in atts:
if a.type == devtype:
return [a.controller, a.port, a.device]
return [None, 0, 0]
def createHddCmd(ctx, args):
if (len(args) < 3):
print "usage: createHdd sizeM location type"
return 0
size = int(args[1])
loc = args[2]
if len(args) > 3:
fmt = args[3]
else:
fmt = "vdi"
hdd = ctx['vb'].createHardDisk(format, loc)
progress = hdd.createBaseStorage(size, (ctx['global'].constants.MediumVariant_Standard, ))
if progressBar(ctx,progress) and hdd.id:
print "created HDD at %s as %s" % (colPath(ctx,hdd.location), hdd.id)
else:
print "cannot create disk (file %s exist?)" % (loc)
reportError(ctx,progress)
return 0
return 0
def registerHddCmd(ctx, args):
if (len(args) < 2):
print "usage: registerHdd location"
return 0
vbox = ctx['vb']
loc = args[1]
setImageId = False
imageId = ""
setParentId = False
parentId = ""
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
print "registered HDD as %s" % (hdd.id)
return 0
def controldevice(ctx, mach, args):
[ctr, port, slot, devtype, uuid] = args
mach.attachDevice(ctr, port, slot, devtype, uuid)
def attachHddCmd(ctx, args):
if (len(args) < 3):
print "usage: attachHdd vm hdd controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_HardDisk)
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.attachDevice(ctr, port, slot, ctx['global'].constants.DeviceType_HardDisk, hdd.id))
return 0
def detachVmDevice(ctx, mach, args):
atts = ctx['global'].getArray(mach, 'mediumAttachments')
hid = args[0]
for a in atts:
if a.medium:
if hid == "ALL" or a.medium.id == hid:
mach.detachDevice(a.controller, a.port, a.device)
def detachMedium(ctx, mid, medium):
cmdClosedVm(ctx, machById(ctx, mid), detachVmDevice, [medium])
def detachHddCmd(ctx, args):
if (len(args) < 3):
print "usage: detachHdd vm hdd"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
detachMedium(ctx, mach.id, hdd)
return 0
def unregisterHddCmd(ctx, args):
if (len(args) < 2):
print "usage: unregisterHdd path <vmunreg>"
return 0
vbox = ctx['vb']
loc = args[1]
if (len(args) > 2):
vmunreg = int(args[2])
else:
vmunreg = 0
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
if vmunreg != 0:
machs = ctx['global'].getArray(hdd, 'machineIds')
try:
for mach in machs:
print "Trying to detach from %s" % (mach)
detachMedium(ctx, mach, hdd)
except Exception, e:
print 'failed: ', e
return 0
hdd.close()
return 0
def removeHddCmd(ctx, args):
if (len(args) != 2):
print "usage: removeHdd path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
hdd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_HardDisk, ctx['global'].constants.AccessMode_ReadWrite, false)
except:
print "no HDD with path %s registered" % (loc)
return 0
progress = hdd.deleteStorage()
progressBar(ctx, progress)
return 0
def registerIsoCmd(ctx, args):
if (len(args) < 2):
print "usage: registerIso location"
return 0
vbox = ctx['vb']
loc = args[1]
iso = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
print "registered ISO as %s" % (iso.id)
return 0
def unregisterIsoCmd(ctx, args):
if (len(args) != 2):
print "usage: unregisterIso path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
progress = dvd.close()
print "Unregistered ISO at %s" % (colPath(ctx, loc))
return 0
def removeIsoCmd(ctx, args):
if (len(args) != 2):
print "usage: removeIso path"
return 0
vbox = ctx['vb']
loc = args[1]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
progress = dvd.deleteStorage()
if progressBar(ctx, progress):
print "Removed ISO at %s" % (colPath(ctx, dvd.location))
else:
reportError(ctx, progress)
return 0
def attachIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: attachIso vm iso controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.attachDevice(ctr, port, slot, ctx['global'].constants.DeviceType_DVD, dvd))
return 0
def detachIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: detachIso vm iso"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
detachMedium(ctx, mach.id, dvd)
return 0
def mountIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: mountIso vm iso controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, dvd, True])
return 0
def unmountIsoCmd(ctx, args):
if (len(args) < 2):
print "usage: unmountIso vm controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
if len(args) > 3:
ctr = args[2]
(port, slot) = args[3].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, None, True])
return 0
def attachCtr(ctx, mach, args):
[name, bus, ctrltype] = args
ctr = mach.addStorageController(name, bus)
if ctrltype != None:
ctr.controllerType = ctrltype
def attachCtrCmd(ctx, args):
if (len(args) < 4):
print "usage: attachCtr vm cname bus <type>"
return 0
if len(args) > 4:
ctrltype = enumFromString(ctx, 'StorageControllerType', args[4])
if ctrltype == None:
print "Controller type %s unknown" % (args[4])
return 0
else:
ctrltype = None
mach = argsToMach(ctx, args)
if mach is None:
return 0
bus = enumFromString(ctx, 'StorageBus', args[3])
if bus is None:
print "Bus type %s unknown" % (args[3])
return 0
name = args[2]
cmdClosedVm(ctx, mach, attachCtr, [name, bus, ctrltype])
return 0
def detachCtrCmd(ctx, args):
if (len(args) < 3):
print "usage: detachCtr vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
ctr = args[2]
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeStorageController(ctr))
return 0
def usbctr(ctx, mach, console, args):
if (args[0]):
console.attachUSBDevice(args[1])
else:
console.detachUSBDevice(args[1])
def attachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: attachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, True, dev])
return 0
def detachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: detachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, False, dev])
return 0
def guiCmd(ctx, args):
if (len(args) > 1):
print "usage: gui"
return 0
binDir = ctx['global'].getBinDir()
vbox = os.path.join(binDir, 'VirtualBox')
try:
os.system(vbox)
except KeyboardInterrupt:
# to allow interruption
pass
return 0
def shareFolderCmd(ctx, args):
if (len(args) < 4):
print "usage: shareFolder vm path name <writable> <persistent>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
path = args[2]
name = args[3]
writable = False
persistent = False
if len(args) > 4:
for a in args[4:]:
if a == 'writable':
writable = True
if a == 'persistent':
persistent = True
if persistent:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.createSharedFolder(name, path, writable), [])
else:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.createSharedFolder(name, path, writable)])
return 0
def unshareFolderCmd(ctx, args):
if (len(args) < 3):
print "usage: unshareFolder vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
name = args[2]
found = False
for sf in ctx['global'].getArray(mach, 'sharedFolders'):
if sf.name == name:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeSharedFolder(name), [])
found = True
break
if not found:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.removeSharedFolder(name)])
return 0
def snapshotCmd(ctx, args):
if (len(args) < 2 or args[1] == 'help'):
print "Take snapshot: snapshot vm take name <description>"
print "Restore snapshot: snapshot vm restore name"
print "Merge snapshot: snapshot vm merge name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
cmd = args[2]
if cmd == 'take':
if (len(args) < 4):
print "usage: snapshot vm take name <description>"
return 0
name = args[3]
if (len(args) > 4):
desc = args[4]
else:
desc = ""
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.takeSnapshot(name, desc)))
return 0
if cmd == 'restore':
if (len(args) < 4):
print "usage: snapshot vm restore name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'restorecurrent':
if (len(args) < 4):
print "usage: snapshot vm restorecurrent"
return 0
snap = mach.currentSnapshot()
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'delete':
if (len(args) < 4):
print "usage: snapshot vm delete name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.deleteSnapshot(snap.id)))
return 0
print "Command '%s' is unknown" % (cmd)
return 0
def natAlias(ctx, mach, nicnum, nat, args=[]):
"""This command shows/alters NAT's alias settings.
usage: nat <vm> <nicnum> alias [default|[log] [proxyonly] [sameports]]
default - set settings to default values
log - switch on alias logging
proxyonly - switch proxyonly mode on
sameports - enforces NAT using the same ports
"""
alias = {
'log': 0x1,
'proxyonly': 0x2,
'sameports': 0x4
}
if len(args) == 1:
first = 0
msg = ''
for aliasmode, aliaskey in alias.iteritems():
if first == 0:
first = 1
else:
msg += ', '
if int(nat.aliasMode) & aliaskey:
msg += '%d: %s' % (aliasmode, 'on')
else:
msg += '%d: %s' % (aliasmode, 'off')
msg += ')'
return (0, [msg])
else:
nat.aliasMode = 0
if 'default' not in args:
for a in range(1, len(args)):
if not alias.has_key(args[a]):
print 'Invalid alias mode: ' + args[a]
print natAlias.__doc__
return (1, None)
nat.aliasMode = int(nat.aliasMode) | alias[args[a]]
return (0, None)
def natSettings(ctx, mach, nicnum, nat, args):
"""This command shows/alters NAT settings.
usage: nat <vm> <nicnum> settings [<mtu> [[<socsndbuf> <sockrcvbuf> [<tcpsndwnd> <tcprcvwnd>]]]]
mtu - set mtu <= 16000
socksndbuf/sockrcvbuf - sets amount of kb for socket sending/receiving buffer
tcpsndwnd/tcprcvwnd - sets size of initial tcp sending/receiving window
"""
if len(args) == 1:
(mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd) = nat.getNetworkSettings()
if mtu == 0: mtu = 1500
if socksndbuf == 0: socksndbuf = 64
if sockrcvbuf == 0: sockrcvbuf = 64
if tcpsndwnd == 0: tcpsndwnd = 64
if tcprcvwnd == 0: tcprcvwnd = 64
msg = 'mtu:%s socket(snd:%s, rcv:%s) tcpwnd(snd:%s, rcv:%s)' % (mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd)
return (0, [msg])
else:
if args[1] < 16000:
print 'invalid mtu value (%s not in range [65 - 16000])' % (args[1])
return (1, None)
for i in range(2, len(args)):
if not args[i].isdigit() or int(args[i]) < 8 or int(args[i]) > 1024:
print 'invalid %s parameter (%i not in range [8-1024])' % (i, args[i])
return (1, None)
a = [args[1]]
if len(args) < 6:
for i in range(2, len(args)): a.append(args[i])
for i in range(len(args), 6): a.append(0)
else:
for i in range(2, len(args)): a.append(args[i])
#print a
nat.setNetworkSettings(int(a[0]), int(a[1]), int(a[2]), int(a[3]), int(a[4]))
return (0, None)
def natDns(ctx, mach, nicnum, nat, args):
"""This command shows/alters DNS's NAT settings
usage: nat <vm> <nicnum> dns [passdomain] [proxy] [usehostresolver]
passdomain - enforces builtin DHCP server to pass domain
proxy - switch on builtin NAT DNS proxying mechanism
usehostresolver - proxies all DNS requests to Host Resolver interface
"""
yesno = {0: 'off', 1: 'on'}
if len(args) == 1:
msg = 'passdomain:%s, proxy:%s, usehostresolver:%s' % (yesno[int(nat.DNSPassDomain)], yesno[int(nat.DNSProxy)], yesno[int(nat.DNSUseHostResolver)])
return (0, [msg])
else:
nat.DNSPassDomain = 'passdomain' in args
nat.DNSProxy = 'proxy' in args
nat.DNSUseHostResolver = 'usehostresolver' in args
return (0, None)
def natTftp(ctx, mach, nicnum, nat, args):
"""This command shows/alters TFTP settings
usage nat <vm> <nicnum> tftp [prefix <prefix>| bootfile <bootfile>| server <server>]
prefix - alters prefix TFTP settings
bootfile - alters bootfile TFTP settings
server - sets booting server
"""
if len(args) == 1:
server = nat.TFTPNextServer
if server is None:
server = nat.network
if server is None:
server = '10.0.%d/24' % (int(nicnum) + 2)
(server, mask) = server.split('/')
while server.count('.') != 3:
server += '.0'
(a, b, c, d) = server.split('.')
server = '%d.%d.%d.4' % (a, b, c)
prefix = nat.TFTPPrefix
if prefix is None:
prefix = '%s/TFTP/' % (ctx['vb'].homeFolder)
bootfile = nat.TFTPBootFile
if bootfile is None:
bootfile = '%s.pxe' % (mach.name)
msg = 'server:%s, prefix:%s, bootfile:%s' % (server, prefix, bootfile)
return (0, [msg])
else:
cmd = args[1]
if len(args) != 3:
print 'invalid args:', args
print natTftp.__doc__
return (1, None)
if cmd == 'prefix': nat.TFTPPrefix = args[2]
elif cmd == 'bootfile': nat.TFTPBootFile = args[2]
elif cmd == 'server': nat.TFTPNextServer = args[2]
else:
print "invalid cmd:", cmd
return (1, None)
return (0, None)
def natPortForwarding(ctx, mach, nicnum, nat, args):
"""This command shows/manages port-forwarding settings
usage:
nat <vm> <nicnum> <pf> [ simple tcp|udp <hostport> <guestport>]
|[no_name tcp|udp <hostip> <hostport> <guestip> <guestport>]
|[ex tcp|udp <pf-name> <hostip> <hostport> <guestip> <guestport>]
|[delete <pf-name>]
"""
if len(args) == 1:
# note: keys/values are swapped in defining part of the function
proto = {0: 'udp', 1: 'tcp'}
msg = []
pfs = ctx['global'].getArray(nat, 'redirects')
for pf in pfs:
(pfnme, pfp, pfhip, pfhp, pfgip, pfgp) = str(pf).split(', ')
msg.append('%s: %s %s:%s => %s:%s' % (pfnme, proto[int(pfp)], pfhip, pfhp, pfgip, pfgp))
return (0, msg) # msg is array
else:
proto = {'udp': 0, 'tcp': 1}
pfcmd = {
'simple': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 5,
'func':lambda: nat.addRedirect('', proto[args[2]], '', int(args[3]), '', int(args[4]))
},
'no_name': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 7,
'func': lambda: nat.addRedirect('', proto[args[2]], args[3], int(args[4]), args[5], int(args[6]))
},
'ex': {
'validate': lambda: args[1] in pfcmd.keys() and args[2] in proto.keys() and len(args) == 8,
'func': lambda: nat.addRedirect(args[3], proto[args[2]], args[4], int(args[5]), args[6], int(args[7]))
},
'delete': {
'validate': lambda: len(args) == 3,
'func': lambda: nat.removeRedirect(args[2])
}
}
if not pfcmd[args[1]]['validate']():
print 'invalid port-forwarding or args of sub command ', args[1]
print natPortForwarding.__doc__
return (1, None)
a = pfcmd[args[1]]['func']()
return (0, None)
def natNetwork(ctx, mach, nicnum, nat, args):
"""This command shows/alters NAT network settings
usage: nat <vm> <nicnum> network [<network>]
"""
if len(args) == 1:
if nat.network is not None and len(str(nat.network)) != 0:
msg = '\'%s\'' % (nat.network)
else:
msg = '10.0.%d.0/24' % (int(nicnum) + 2)
return (0, [msg])
else:
(addr, mask) = args[1].split('/')
if addr.count('.') > 3 or int(mask) < 0 or int(mask) > 32:
print 'Invalid arguments'
return (1, None)
nat.network = args[1]
return (0, None)
def natCmd(ctx, args):
"""This command is entry point to NAT settins management
usage: nat <vm> <nicnum> <cmd> <cmd-args>
cmd - [alias|settings|tftp|dns|pf|network]
for more information about commands:
nat help <cmd>
"""
natcommands = {
'alias' : natAlias,
'settings' : natSettings,
'tftp': natTftp,
'dns': natDns,
'pf': natPortForwarding,
'network': natNetwork
}
if len(args) < 2 or args[1] == 'help':
if len(args) > 2:
print natcommands[args[2]].__doc__
else:
print natCmd.__doc__
return 0
if len(args) == 1 or len(args) < 4 or args[3] not in natcommands:
print natCmd.__doc__
return 0
mach = ctx['argsToMach'](args)
if mach == None:
print "please specify vm"
return 0
if len(args) < 3 or not args[2].isdigit() or int(args[2]) not in range(0, ctx['vb'].systemProperties.getMaxNetworkAdapters(mach.chipsetType)):
print 'please specify adapter num %d isn\'t in range [0-%d]' % (args[2], ctx['vb'].systemProperties.getMaxNetworkAdapters(mach.chipsetType))
return 0
nicnum = int(args[2])
cmdargs = []
for i in range(3, len(args)):
cmdargs.append(args[i])
# @todo vvl if nicnum is missed but command is entered
# use NAT func for every adapter on machine.
func = args[3]
rosession = 1
session = None
if len(cmdargs) > 1:
rosession = 0
session = ctx['global'].openMachineSession(mach, False)
mach = session.machine
adapter = mach.getNetworkAdapter(nicnum)
natEngine = adapter.NATEngine
(rc, report) = natcommands[func](ctx, mach, nicnum, natEngine, cmdargs)
if rosession == 0:
if rc == 0:
mach.saveSettings()
session.unlockMachine()
elif report is not None:
for r in report:
msg ='%s nic%d %s: %s' % (mach.name, nicnum, func, r)
print msg
return 0
def nicSwitchOnOff(adapter, attr, args):
if len(args) == 1:
yesno = {0: 'off', 1: 'on'}
r = yesno[int(adapter.__getattr__(attr))]
return (0, r)
else:
yesno = {'off' : 0, 'on' : 1}
if args[1] not in yesno:
print '%s isn\'t acceptable, please choose %s' % (args[1], yesno.keys())
return (1, None)
adapter.__setattr__(attr, yesno[args[1]])
return (0, None)
def nicTraceSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> trace [on|off [file]]
'''
(rc, r) = nicSwitchOnOff(adapter, 'traceEnabled', args)
if len(args) == 1 and rc == 0:
r = '%s file:%s' % (r, adapter.traceFile)
return (0, r)
elif len(args) == 3 and rc == 0:
adapter.traceFile = args[2]
return (0, None)
def nicLineSpeedSubCmd(ctx, vm, nicnum, adapter, args):
if len(args) == 1:
r = '%d kbps'% (adapter.lineSpeed)
return (0, r)
else:
if not args[1].isdigit():
print '%s isn\'t a number' % (args[1])
print (1, None)
adapter.lineSpeed = int(args[1])
return (0, None)
def nicCableSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> cable [on|off]
'''
return nicSwitchOnOff(adapter, 'cableConnected', args)
def nicEnableSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> enable [on|off]
'''
return nicSwitchOnOff(adapter, 'enabled', args)
def nicTypeSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> type [Am79c970A|Am79c970A|I82540EM|I82545EM|I82543GC|Virtio]
'''
if len(args) == 1:
nictypes = ctx['const'].all_values('NetworkAdapterType')
for key in nictypes.keys():
if str(adapter.adapterType) == str(nictypes[key]):
return (0, str(key))
return (1, None)
else:
nictypes = ctx['const'].all_values('NetworkAdapterType')
if args[1] not in nictypes.keys():
print '%s not in acceptable values (%s)' % (args[1], nictypes.keys())
return (1, None)
adapter.adapterType = nictypes[args[1]]
return (0, None)
def nicAttachmentSubCmd(ctx, vm, nicnum, adapter, args):
'''
usage: nic <vm> <nicnum> attachment [Null|NAT|Bridged <interface>|Internal <name>|HostOnly <interface>
'''
if len(args) == 1:
nicAttachmentType = {
ctx['global'].constants.NetworkAttachmentType_Null: ('Null', ''),
ctx['global'].constants.NetworkAttachmentType_NAT: ('NAT', ''),
ctx['global'].constants.NetworkAttachmentType_Bridged: ('Bridged', adapter.bridgedInterface),
ctx['global'].constants.NetworkAttachmentType_Internal: ('Internal', adapter.internalNetwork),
ctx['global'].constants.NetworkAttachmentType_HostOnly: ('HostOnly', adapter.hostOnlyInterface),
# @todo show details of the generic network attachment type
ctx['global'].constants.NetworkAttachmentType_Generic: ('Generic', ''),
}
import types
if type(adapter.attachmentType) != types.IntType:
t = str(adapter.attachmentType)
else:
t = adapter.attachmentType
(r, p) = nicAttachmentType[t]
return (0, 'attachment:%s, name:%s' % (r, p))
else:
nicAttachmentType = {
'Null': {
'v': lambda: len(args) == 2,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Null},
'NAT': {
'v': lambda: len(args) == 2,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_NAT},
'Bridged': {
'v': lambda: len(args) == 3,
'p': lambda: adapter.__setattr__('bridgedInterface', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Bridged},
'Internal': {
'v': lambda: len(args) == 3,
'p': lambda: adapter.__setattr__('internalNetwork', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Internal},
'HostOnly': {
'v': lambda: len(args) == 2,
'p': lambda: adapter.__setattr__('hostOnlyInterface', args[2]),
'f': lambda: ctx['global'].constants.NetworkAttachmentType_HostOnly},
# @todo implement setting the properties of a generic attachment
'Generic': {
'v': lambda: len(args) == 3,
'p': lambda: 'do nothing',
'f': lambda: ctx['global'].constants.NetworkAttachmentType_Generic}
}
if args[1] not in nicAttachmentType.keys():
print '%s not in acceptable values (%s)' % (args[1], nicAttachmentType.keys())
return (1, None)
if not nicAttachmentType[args[1]]['v']():
print nicAttachmentType.__doc__
return (1, None)
nicAttachmentType[args[1]]['p']()
adapter.attachmentType = nicAttachmentType[args[1]]['f']()
return (0, None)
def nicCmd(ctx, args):
'''
This command to manage network adapters
usage: nic <vm> <nicnum> <cmd> <cmd-args>
where cmd : attachment, trace, linespeed, cable, enable, type
'''
# 'command name':{'runtime': is_callable_at_runtime, 'op': function_name}
niccomand = {
'attachment': nicAttachmentSubCmd,
'trace': nicTraceSubCmd,
'linespeed': nicLineSpeedSubCmd,
'cable': nicCableSubCmd,
'enable': nicEnableSubCmd,
'type': nicTypeSubCmd
}
if len(args) < 2 \
or args[1] == 'help' \
or (len(args) > 2 and args[3] not in niccomand):
if len(args) == 3 \
and args[2] in niccomand:
print niccomand[args[2]].__doc__
else:
print nicCmd.__doc__
return 0
vm = ctx['argsToMach'](args)
if vm is None:
print 'please specify vm'
return 0
if len(args) < 3 \
or int(args[2]) not in range(0, ctx['vb'].systemProperties.getMaxNetworkAdapters(vm.chipsetType)):
print 'please specify adapter num %d isn\'t in range [0-%d]'% (args[2], ctx['vb'].systemProperties.getMaxNetworkAdapters(vm.chipsetType))
return 0
nicnum = int(args[2])
cmdargs = args[3:]
func = args[3]
session = None
session = ctx['global'].openMachineSession(vm)
vm = session.machine
adapter = vm.getNetworkAdapter(nicnum)
(rc, report) = niccomand[func](ctx, vm, nicnum, adapter, cmdargs)
if rc == 0:
vm.saveSettings()
if report is not None:
print '%s nic %d %s: %s' % (vm.name, nicnum, args[3], report)
session.unlockMachine()
return 0
def promptCmd(ctx, args):
if len(args) < 2:
print "Current prompt: '%s'" % (ctx['prompt'])
return 0
ctx['prompt'] = args[1]
return 0
def foreachCmd(ctx, args):
if len(args) < 3:
print "usage: foreach scope command, where scope is XPath-like expression //vms/vm[@CPUCount='2']"
return 0
scope = args[1]
cmd = args[2]
elems = eval_xpath(ctx, scope)
try:
for e in elems:
e.apply(cmd)
except:
print "Error executing"
traceback.print_exc()
return 0
def foreachvmCmd(ctx, args):
if len(args) < 2:
print "foreachvm command <args>"
return 0
cmdargs = args[1:]
cmdargs.insert(1, '')
for mach in getMachines(ctx):
cmdargs[1] = mach.id
runCommandArgs(ctx, cmdargs)
return 0
def recordDemoCmd(ctx, args):
if (len(args) < 3):
print "usage: recordDemo vm filename (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
filename = args[2]
dur = 10000
if len(args) > 3:
dur = float(args[3])
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: recordDemo(ctx, console, filename, dur)])
return 0
def playbackDemoCmd(ctx, args):
if (len(args) < 3):
print "usage: playbackDemo vm filename (duration)"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
filename = args[2]
dur = 10000
if len(args) > 3:
dur = float(args[3])
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: playbackDemo(ctx, console, filename, dur)])
return 0
def pciAddr(ctx, addr):
strg = "%02x:%02x.%d" % (addr >> 8, (addr & 0xff) >> 3, addr & 7)
return colPci(ctx, strg)
def lspci(ctx, console):
assigned = ctx['global'].getArray(console.machine, 'PCIDeviceAssignments')
for a in assigned:
if a.isPhysicalDevice:
print "%s: assigned host device %s guest %s" % (colDev(ctx, a.name), pciAddr(ctx, a.hostAddress), pciAddr(ctx, a.guestAddress))
atts = ctx['global'].getArray(console, 'attachedPCIDevices')
for a in atts:
if a.isPhysicalDevice:
print "%s: physical, guest %s, host %s" % (colDev(ctx, a.name), pciAddr(ctx, a.guestAddress), pciAddr(ctx, a.hostAddress))
else:
print "%s: virtual, guest %s" % (colDev(ctx, a.name), pciAddr(ctx, a.guestAddress))
return
def parsePci(strg):
pcire = re.compile(r'(?P<b>[0-9a-fA-F]+):(?P<d>[0-9a-fA-F]+)\.(?P<f>\d)')
match = pcire.search(strg)
if match is None:
return -1
pdict = match.groupdict()
return ((int(pdict['b'], 16)) << 8) | ((int(pdict['d'], 16)) << 3) | int(pdict['f'])
def lspciCmd(ctx, args):
if (len(args) < 2):
print "usage: lspci vm"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: lspci(ctx, console)])
return 0
def attachpciCmd(ctx, args):
if (len(args) < 3):
print "usage: attachpci vm hostpci <guestpci>"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hostaddr = parsePci(args[2])
if hostaddr == -1:
print "invalid host PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[2])
return 0
if (len(args) > 3):
guestaddr = parsePci(args[3])
if guestaddr == -1:
print "invalid guest PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[3])
return 0
else:
guestaddr = hostaddr
cmdClosedVm(ctx, mach, lambda ctx, mach, a: mach.attachHostPCIDevice(hostaddr, guestaddr, True))
return 0
def detachpciCmd(ctx, args):
if (len(args) < 3):
print "usage: detachpci vm hostpci"
return 0
mach = argsToMach(ctx, args)
if mach == None:
return 0
hostaddr = parsePci(args[2])
if hostaddr == -1:
print "invalid host PCI %s, accepted format 01:02.3 for bus 1, device 2, function 3" % (args[2])
return 0
cmdClosedVm(ctx, mach, lambda ctx, mach, a: mach.detachHostPCIDevice(hostaddr))
return 0
def gotoCmd(ctx, args):
if (len(args) < 2):
print "usage: goto line"
return 0
line = int(args[1])
ctx['scriptLine'] = line
return 0
aliases = {'s':'start',
'i':'info',
'l':'list',
'h':'help',
'a':'alias',
'q':'quit', 'exit':'quit',
'tg': 'typeGuest',
'v':'verbose'}
commands = {'help':['Prints help information', helpCmd, 0],
'start':['Start virtual machine by name or uuid: start Linux headless', startCmd, 0],
'createVm':['Create virtual machine: createVm macvm MacOS', createVmCmd, 0],
'removeVm':['Remove virtual machine', removeVmCmd, 0],
'pause':['Pause virtual machine', pauseCmd, 0],
'resume':['Resume virtual machine', resumeCmd, 0],
'save':['Save execution state of virtual machine', saveCmd, 0],
'stats':['Stats for virtual machine', statsCmd, 0],
'powerdown':['Power down virtual machine', powerdownCmd, 0],
'powerbutton':['Effectively press power button', powerbuttonCmd, 0],
'list':['Shows known virtual machines', listCmd, 0],
'info':['Shows info on machine', infoCmd, 0],
'ginfo':['Shows info on guest', ginfoCmd, 0],
'gexec':['Executes program in the guest', gexecCmd, 0],
'gcopy':['Copy file to the guest', gcopyCmd, 0],
'gpipe':['Pipe between host and guest', gpipeCmd, 0],
'alias':['Control aliases', aliasCmd, 0],
'verbose':['Toggle verbosity', verboseCmd, 0],
'setvar':['Set VMs variable: setvar Fedora BIOSSettings.ACPIEnabled True', setvarCmd, 0],
'eval':['Evaluate arbitrary Python construction: eval \'for m in getMachines(ctx): print m.name, "has", m.memorySize, "M"\'', evalCmd, 0],
'quit':['Exits', quitCmd, 0],
'host':['Show host information', hostCmd, 0],
'guest':['Execute command for guest: guest Win32 \'console.mouse.putMouseEvent(20, 20, 0, 0, 0)\'', guestCmd, 0],
'monitorGuest':['Monitor what happens with the guest for some time: monitorGuest Win32 10', monitorGuestCmd, 0],
'monitorGuestKbd':['Monitor guest keyboard for some time: monitorGuestKbd Win32 10', monitorGuestKbdCmd, 0],
'monitorGuestMouse':['Monitor guest mouse for some time: monitorGuestMouse Win32 10', monitorGuestMouseCmd, 0],
'monitorGuestMultiTouch':['Monitor guest touch screen for some time: monitorGuestMultiTouch Win32 10', monitorGuestMultiTouchCmd, 0],
'monitorVBox':['Monitor what happens with Virtual Box for some time: monitorVBox 10', monitorVBoxCmd, 0],
'portForward':['Setup permanent port forwarding for a VM, takes adapter number host port and guest port: portForward Win32 0 8080 80', portForwardCmd, 0],
'showLog':['Show log file of the VM, : showLog Win32', showLogCmd, 0],
'findLog':['Show entries matching pattern in log file of the VM, : findLog Win32 PDM|CPUM', findLogCmd, 0],
'findAssert':['Find assert in log file of the VM, : findAssert Win32', findAssertCmd, 0],
'reloadExt':['Reload custom extensions: reloadExt', reloadExtCmd, 0],
'runScript':['Run VBox script: runScript script.vbox', runScriptCmd, 0],
'sleep':['Sleep for specified number of seconds: sleep 3.14159', sleepCmd, 0],
'shell':['Execute external shell command: shell "ls /etc/rc*"', shellCmd, 0],
'exportVm':['Export VM in OVF format: exportVm Win /tmp/win.ovf', exportVMCmd, 0],
'screenshot':['Take VM screenshot to a file: screenshot Win /tmp/win.png 1024 768 0', screenshotCmd, 0],
'teleport':['Teleport VM to another box (see openportal): teleport Win anotherhost:8000 <passwd> <maxDowntime>', teleportCmd, 0],
'typeGuest':['Type arbitrary text in guest: typeGuest Linux "^lls\\n&UP;&BKSP;ess /etc/hosts\\nq^c" 0.7', typeGuestCmd, 0],
'openportal':['Open portal for teleportation of VM from another box (see teleport): openportal Win 8000 <passwd>', openportalCmd, 0],
'closeportal':['Close teleportation portal (see openportal, teleport): closeportal Win', closeportalCmd, 0],
'getextra':['Get extra data, empty key lists all: getextra <vm|global> <key>', getExtraDataCmd, 0],
'setextra':['Set extra data, empty value removes key: setextra <vm|global> <key> <value>', setExtraDataCmd, 0],
'gueststats':['Print available guest stats (only Windows guests with additions so far): gueststats Win32', gueststatsCmd, 0],
'plugcpu':['Add a CPU to a running VM: plugcpu Win 1', plugcpuCmd, 0],
'unplugcpu':['Remove a CPU from a running VM (additions required, Windows cannot unplug): unplugcpu Linux 1', unplugcpuCmd, 0],
'createHdd': ['Create virtual HDD: createHdd 1000 /disk.vdi ', createHddCmd, 0],
'removeHdd': ['Permanently remove virtual HDD: removeHdd /disk.vdi', removeHddCmd, 0],
'registerHdd': ['Register HDD image with VirtualBox instance: registerHdd /disk.vdi', registerHddCmd, 0],
'unregisterHdd': ['Unregister HDD image with VirtualBox instance: unregisterHdd /disk.vdi', unregisterHddCmd, 0],
'attachHdd': ['Attach HDD to the VM: attachHdd win /disk.vdi "IDE Controller" 0:1', attachHddCmd, 0],
'detachHdd': ['Detach HDD from the VM: detachHdd win /disk.vdi', detachHddCmd, 0],
'registerIso': ['Register CD/DVD image with VirtualBox instance: registerIso /os.iso', registerIsoCmd, 0],
'unregisterIso': ['Unregister CD/DVD image with VirtualBox instance: unregisterIso /os.iso', unregisterIsoCmd, 0],
'removeIso': ['Permanently remove CD/DVD image: removeIso /os.iso', removeIsoCmd, 0],
'attachIso': ['Attach CD/DVD to the VM: attachIso win /os.iso "IDE Controller" 0:1', attachIsoCmd, 0],
'detachIso': ['Detach CD/DVD from the VM: detachIso win /os.iso', detachIsoCmd, 0],
'mountIso': ['Mount CD/DVD to the running VM: mountIso win /os.iso "IDE Controller" 0:1', mountIsoCmd, 0],
'unmountIso': ['Unmount CD/DVD from running VM: unmountIso win "IDE Controller" 0:1', unmountIsoCmd, 0],
'attachCtr': ['Attach storage controller to the VM: attachCtr win Ctr0 IDE ICH6', attachCtrCmd, 0],
'detachCtr': ['Detach HDD from the VM: detachCtr win Ctr0', detachCtrCmd, 0],
'attachUsb': ['Attach USB device to the VM (use listUsb to show available devices): attachUsb win uuid', attachUsbCmd, 0],
'detachUsb': ['Detach USB device from the VM: detachUsb win uuid', detachUsbCmd, 0],
'listMedia': ['List media known to this VBox instance', listMediaCmd, 0],
'listUsb': ['List known USB devices', listUsbCmd, 0],
'shareFolder': ['Make host\'s folder visible to guest: shareFolder win /share share writable', shareFolderCmd, 0],
'unshareFolder': ['Remove folder sharing', unshareFolderCmd, 0],
'gui': ['Start GUI frontend', guiCmd, 0],
'colors':['Toggle colors', colorsCmd, 0],
'snapshot':['VM snapshot manipulation, snapshot help for more info', snapshotCmd, 0],
'nat':['NAT (network address translation engine) manipulation, nat help for more info', natCmd, 0],
'nic' : ['Network adapter management', nicCmd, 0],
'prompt' : ['Control shell prompt', promptCmd, 0],
'foreachvm' : ['Perform command for each VM', foreachvmCmd, 0],
'foreach' : ['Generic "for each" construction, using XPath-like notation: foreach //vms/vm[@OSTypeId=\'MacOS\'] "print obj.name"', foreachCmd, 0],
'recordDemo':['Record demo: recordDemo Win32 file.dmo 10', recordDemoCmd, 0],
'playbackDemo':['Playback demo: playbackDemo Win32 file.dmo 10', playbackDemoCmd, 0],
'lspci': ['List PCI devices attached to the VM: lspci Win32', lspciCmd, 0],
'attachpci': ['Attach host PCI device to the VM: attachpci Win32 01:00.0', attachpciCmd, 0],
'detachpci': ['Detach host PCI device from the VM: detachpci Win32 01:00.0', detachpciCmd, 0],
'goto': ['Go to line in script (script-only)', gotoCmd, 0]
}
def runCommandArgs(ctx, args):
c = args[0]
if aliases.get(c, None) != None:
c = aliases[c]
ci = commands.get(c, None)
if ci == None:
print "Unknown command: '%s', type 'help' for list of known commands" % (c)
return 0
if ctx['remote'] and ctx['vb'] is None:
if c not in ['connect', 'reconnect', 'help', 'quit']:
print "First connect to remote server with %s command." % (colored('connect', 'blue'))
return 0
return ci[1](ctx, args)
def runCommand(ctx, cmd):
if len(cmd) == 0: return 0
args = split_no_quotes(cmd)
if len(args) == 0: return 0
return runCommandArgs(ctx, args)
#
# To write your own custom commands to vboxshell, create
# file ~/.VirtualBox/shellext.py with content like
#
# def runTestCmd(ctx, args):
# print "Testy test", ctx['vb']
# return 0
#
# commands = {
# 'test': ['Test help', runTestCmd]
# }
# and issue reloadExt shell command.
# This file also will be read automatically on startup or 'reloadExt'.
#
# Also one can put shell extensions into ~/.VirtualBox/shexts and
# they will also be picked up, so this way one can exchange
# shell extensions easily.
def addExtsFromFile(ctx, cmds, filename):
if not os.path.isfile(filename):
return
d = {}
try:
execfile(filename, d, d)
for (k, v) in d['commands'].items():
if g_fVerbose:
print "customize: adding \"%s\" - %s" % (k, v[0])
cmds[k] = [v[0], v[1], filename]
except:
print "Error loading user extensions from %s" % (filename)
traceback.print_exc()
def checkUserExtensions(ctx, cmds, folder):
folder = str(folder)
name = os.path.join(folder, "shellext.py")
addExtsFromFile(ctx, cmds, name)
# also check 'exts' directory for all files
shextdir = os.path.join(folder, "shexts")
if not os.path.isdir(shextdir):
return
exts = os.listdir(shextdir)
for e in exts:
# not editor temporary files, please.
if e.endswith('.py'):
addExtsFromFile(ctx, cmds, os.path.join(shextdir, e))
def getHomeFolder(ctx):
if ctx['remote'] or ctx['vb'] is None:
if 'VBOX_USER_HOME' in os.environ:
return os.path.join(os.environ['VBOX_USER_HOME'])
return os.path.join(os.path.expanduser("~"), ".VirtualBox")
else:
return ctx['vb'].homeFolder
def interpret(ctx):
if ctx['remote']:
commands['connect'] = ["Connect to remote VBox instance: connect http://server:18083 user password", connectCmd, 0]
commands['disconnect'] = ["Disconnect from remote VBox instance", disconnectCmd, 0]
commands['reconnect'] = ["Reconnect to remote VBox instance", reconnectCmd, 0]
ctx['wsinfo'] = ["http://localhost:18083", "", ""]
vbox = ctx['vb']
if vbox is not None:
try:
print "Running VirtualBox version %s" % (vbox.version)
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['perf'] = None # ctx['global'].getPerfCollector(vbox)
else:
ctx['perf'] = None
home = getHomeFolder(ctx)
checkUserExtensions(ctx, commands, home)
if platform.system() in ['Windows', 'Microsoft']:
global g_fHasColors
g_fHasColors = False
hist_file = os.path.join(home, ".vboxshellhistory")
autoCompletion(commands, ctx)
if g_fHasReadline and os.path.exists(hist_file):
readline.read_history_file(hist_file)
# to allow to print actual host information, we collect info for
# last 150 secs maximum, (sample every 10 secs and keep up to 15 samples)
if ctx['perf']:
try:
ctx['perf'].setup(['*'], [vbox.host], 10, 15)
except:
pass
cmds = []
if g_sCmd is not None:
cmds = g_sCmd.split(';')
it = cmds.__iter__()
while True:
try:
if g_fBatchMode:
cmd = 'runScript %s'% (g_sScriptFile)
elif g_sCmd is not None:
cmd = it.next()
else:
cmd = raw_input(ctx['prompt'])
done = runCommand(ctx, cmd)
if done != 0: break
if g_fBatchMode:
break
except KeyboardInterrupt:
print '====== You can type quit or q to leave'
except StopIteration:
break
except EOFError:
break
except Exception, e:
printErr(ctx, e)
if g_fVerbose:
traceback.print_exc()
ctx['global'].waitForEvents(0)
try:
# There is no need to disable metric collection. This is just an example.
if ct['perf']:
ctx['perf'].disable(['*'], [vbox.host])
except:
pass
if g_fHasReadline:
readline.write_history_file(hist_file)
def runCommandCb(ctx, cmd, args):
args.insert(0, cmd)
return runCommandArgs(ctx, args)
def runGuestCommandCb(ctx, uuid, guestLambda, args):
mach = machById(ctx, uuid)
if mach == None:
return 0
args.insert(0, guestLambda)
cmdExistingVm(ctx, mach, 'guestlambda', args)
return 0
def main(argv):
#
# Parse command line arguments.
#
parse = OptionParser()
parse.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help = "switch on verbose")
parse.add_option("-a", "--autopath", dest="autopath", action="store_true", default=False, help = "switch on autopath")
parse.add_option("-w", "--webservice", dest="style", action="store_const", const="WEBSERVICE", help = "connect to webservice")
parse.add_option("-b", "--batch", dest="batch_file", help = "script file to execute")
parse.add_option("-c", dest="command_line", help = "command sequence to execute")
parse.add_option("-o", dest="opt_line", help = "option line")
global g_fVerbose, g_sScriptFile, g_fBatchMode, g_fHasColors, g_fHasReadline, g_sCmd
(options, args) = parse.parse_args()
g_fVerbose = options.verbose
style = options.style
if options.batch_file is not None:
g_fBatchMode = True
g_fHasColors = False
g_fHasReadline = False
g_sScriptFile = options.batch_file
if options.command_line is not None:
g_fHasColors = False
g_fHasReadline = False
g_sCmd = options.command_line
params = None
if options.opt_line is not None:
params = {}
strparams = options.opt_line
strparamlist = strparams.split(',')
for strparam in strparamlist:
(key, value) = strparam.split('=')
params[key] = value
if options.autopath:
asLocations = [ os.getcwd(), ];
try: sScriptDir = os.path.dirname(os.path.abspath(__file__));
except: pass; # In case __file__ isn't there.
else:
if platform.system() in [ 'SunOS', ]:
asLocations.append(os.path.join(sScriptDir, 'amd64'));
asLocations.append(sScriptDir);
sPath = os.environ.get("VBOX_PROGRAM_PATH")
if sPath is None:
for sCurLoc in asLocations:
if os.path.isfile(os.path.join(sCurLoc, "VirtualBox")) \
or os.path.isfile(os.path.join(sCurLoc, "VirtualBox.exe")):
print "Autodetected VBOX_PROGRAM_PATH as", sCurLoc
os.environ["VBOX_PROGRAM_PATH"] = sCurLoc
sPath = sCurLoc
break;
if sPath:
sys.path.append(os.path.join(sPath, "sdk", "installer"))
sPath = os.environ.get("VBOX_SDK_PATH")
if sPath is None:
for sCurLoc in asLocations:
if os.path.isfile(os.path.join(sCurLoc, "sdk", "bindings", "VirtualBox.xidl")):
print "Autodetected VBOX_SDK_PATH as", sCurLoc
os.environ["VBOX_SDK_PATH"] = sCurLoc
sPath = sCurLoc;
break;
if sPath:
sTmp = os.path.join(sCurLoc, 'sdk', 'bindings', 'xpcom', 'python');
if os.path.isdir(sTmp):
sys.path.append(sTmp);
del sTmp;
del sPath, asLocations;
#
# Set up the shell interpreter context and
#
from vboxapi import VirtualBoxManager
oVBoxMgr = VirtualBoxManager(style, params)
ctx = {
'global': oVBoxMgr,
'vb': oVBoxMgr.vbox,
'const': oVBoxMgr.constants,
'remote': oVBoxMgr.remote,
'type': oVBoxMgr.type,
'run': lambda cmd, args: runCommandCb(ctx, cmd, args),
'guestlambda': lambda uuid, guestLambda, args: runGuestCommandCb(ctx, uuid, guestLambda, args),
'machById': lambda uuid: machById(ctx, uuid),
'argsToMach': lambda args: argsToMach(ctx, args),
'progressBar': lambda p: progressBar(ctx, p),
'typeInGuest': typeInGuest,
'_machlist': None,
'prompt': g_sPrompt,
'scriptLine': 0,
'interrupt': False,
}
interpret(ctx)
oVBoxMgr.deinit()
del oVBoxMgr
if __name__ == '__main__':
main(sys.argv)
|
ruibarreira/linuxtrail
|
usr/lib/virtualbox/vboxshell.py
|
Python
|
gpl-3.0
| 120,819
| 0.00591
|
import glob
json_files = glob.glob("tests/**/output/**/*.json", recursive=True)
html_files = glob.glob("tests/**/output/**/*.html", recursive=True)
html_list = ""
for f_ in html_files:
html_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
json_list = ""
for f_ in json_files:
json_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
html_file = """
<html>
<body>
<h3>HTML</h3>
<ul>
{}
</ul>
<br/><br/>
<h3>JSON</h3>
<ul>
{}
</ul>
</body>
</html>
""".format(
html_list, json_list
)
print(html_file)
|
great-expectations/great_expectations
|
tests/build_index_page.py
|
Python
|
apache-2.0
| 628
| 0
|
from models import Comment
from ..user.serializers import UserProfileSerializer
from rest_framework import serializers
class CommentSerializer(serializers.ModelSerializer):
username = serializers.SerializerMethodField()
class Meta:
model = Comment
fields = ("id","user","username", "topic","comment","comment_html", "action", "date","is_removed","is_modified","ip_address",
"modified_count","likes_count")
read_only_fields = ("user","comment_html","action","date","is_removed","is_modified","modified_count","likes_count")
def get_username(self,obj):
return obj.user.username
def create(self,**kwargs):
comment = Comment.objects.create(user = kwargs['user'],**self.validated_data)
return comment
|
shriyanka/daemo-forum
|
spirit/comment/serializers.py
|
Python
|
mit
| 715
| 0.051748
|
import os.path
import pkgutil
from unittest import TestCase, TestSuite
class TestImports(TestCase):
def __init__(self, mod_name, import_error):
name = f"test_{mod_name.replace('.', '_')}_import"
def run():
try:
__import__(mod_name)
except ImportError as e:
self.assertIsNotNone(import_error)
self.assertEqual(e.msg, import_error)
setattr(self, name, run)
super().__init__(name)
def load_tests(*_args):
expected_exceptions = {
"blueman.main.NetworkManager": "NM python bindings not found.",
"blueman.main.PulseAudioUtils": "Could not load pulseaudio shared library",
}
test_cases = TestSuite()
home, subpath = os.path.dirname(__file__).rsplit("/test/", 1)
for package in pkgutil.iter_modules([f"{home}/blueman/{subpath}"], f"blueman.{subpath.replace('/', '.')}."):
test_cases.addTest(TestImports(package.name, expected_exceptions.get(package.name)))
assert test_cases.countTestCases() > 0
return test_cases
|
blueman-project/blueman
|
test/main/test_imports.py
|
Python
|
gpl-3.0
| 1,075
| 0.002791
|
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
REST URI
``http://localhost/mgmt/cm/device/licensing/pool/regkey``
REST Kind
N/A -- HTTP GET returns an error
"""
from f5.bigiq.cm.device.licensing.pool import Pool
from f5.bigiq.resource import OrganizingCollection
class Licensing(OrganizingCollection):
def __init__(self, device):
super(Licensing, self).__init__(device)
self._meta_data['allowed_lazy_attributes'] = [
Pool
]
|
F5Networks/f5-common-python
|
f5/bigiq/cm/device/licensing/__init__.py
|
Python
|
apache-2.0
| 1,033
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert flat XML files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class flatxml2po:
"""Convert a single XML file to a single PO file."""
SourceStoreClass = flatxml.FlatXMLFile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.source_store = self.SourceStoreClass(inputfile,
root_name=root,
value_name=value,
key_name=key,
namespace=ns)
self.target_store = self.TargetStoreClass()
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass.buildfromunit(unit)
return target_unit
def convert_store(self):
"""Convert a single source file to a target format file."""
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def run(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Wrapper around the converter."""
return flatxml2po(inputfile, outputfile, templatefile,
root, value, key, ns).run()
formats = {
"xml": ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats,
description=__doc__)
parser.add_option("-r", "--root", action="store", dest="root",
default="root",
help='name of the XML root element (default: "root")')
parser.add_option("-v", "--value", action="store", dest="value",
default="str",
help='name of the XML value element (default: "str")')
parser.add_option("-k", "--key", action="store", dest="key",
default="key",
help='name of the XML key attribute (default: "key")')
parser.add_option("-n", "--namespace", action="store", dest="ns",
default=None,
help="XML namespace uri (default: None)")
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.run(argv)
if __name__ == "__main__":
main()
|
unho/translate
|
translate/convert/flatxml2po.py
|
Python
|
gpl-2.0
| 3,817
| 0.000262
|
# Made by mtrix
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "345_MethodToRaiseTheDead"
ADENA = 57
VICTIMS_ARM_BONE = 4274
VICTIMS_THIGH_BONE = 4275
VICTIMS_SKULL = 4276
VICTIMS_RIB_BONE = 4277
VICTIMS_SPINE = 4278
USELESS_BONE_PIECES = 4280
POWDER_TO_SUMMON_DEAD_SOULS = 4281
BILL_OF_IASON_HEINE = 4310
CHANCE = 15
CHANCE2 = 50
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "1" :
st.set("cond","1")
st.setState(STARTED)
htmltext = "30970-02.htm"
st.playSound("ItemSound.quest_accept")
elif event == "2" :
st.set("cond","2")
htmltext = "30970-06.htm"
elif event == "3" :
if st.getQuestItemsCount(ADENA)>=1000 :
st.takeItems(ADENA,1000)
st.giveItems(POWDER_TO_SUMMON_DEAD_SOULS,1)
st.set("cond","3")
htmltext = "30912-03.htm"
st.playSound("ItemSound.quest_itemget")
else :
htmltext = "<html><body>You dont have enough adena!</body></html>"
elif event == "4" :
htmltext = "30973-02.htm"
st.takeItems(POWDER_TO_SUMMON_DEAD_SOULS,-1)
st.takeItems(VICTIMS_ARM_BONE,-1)
st.takeItems(VICTIMS_THIGH_BONE,-1)
st.takeItems(VICTIMS_SKULL,-1)
st.takeItems(VICTIMS_RIB_BONE,-1)
st.takeItems(VICTIMS_SPINE,-1)
st.set("cond","6")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30970 and id != STARTED : return htmltext
level = player.getLevel()
cond = st.getInt("cond")
amount = st.getQuestItemsCount(USELESS_BONE_PIECES)
if npcId==30970 :
if id == CREATED :
if level>=35 :
htmltext = "30970-01.htm"
else :
htmltext = "<html><body>(This is a quest that can only be performed by players of level 35 and above.)</body></html>"
st.exitQuest(1)
elif cond==1 and st.getQuestItemsCount(VICTIMS_ARM_BONE) and st.getQuestItemsCount(VICTIMS_THIGH_BONE) and st.getQuestItemsCount(VICTIMS_SKULL) and st.getQuestItemsCount(VICTIMS_RIB_BONE) and st.getQuestItemsCount(VICTIMS_SPINE) :
htmltext = "30970-05.htm"
elif cond==1 and (st.getQuestItemsCount(VICTIMS_ARM_BONE)+st.getQuestItemsCount(VICTIMS_THIGH_BONE)+st.getQuestItemsCount(VICTIMS_SKULL)+st.getQuestItemsCount(VICTIMS_RIB_BONE)+st.getQuestItemsCount(VICTIMS_SPINE)<5) :
htmltext = "30970-04.htm"
elif cond==7 :
htmltext = "30970-07.htm"
st.set("cond","1")
st.giveItems(ADENA,amount*238)
st.giveItems(BILL_OF_IASON_HEINE,st.getRandom(7)+1)
st.takeItems(USELESS_BONE_PIECES,-1)
if npcId==30912 :
if cond == 2 :
htmltext = "30912-01.htm"
st.playSound("ItemSound.quest_middle")
elif cond == 3 :
htmltext = "<html><body>What did the urn say?</body></html>"
elif cond == 6 :
htmltext = "30912-04.htm"
st.set("cond","7")
if npcId==30973 :
if cond==3 :
htmltext = "30973-01.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
random = st.getRandom(100)
if random<=CHANCE :
if not st.getQuestItemsCount(VICTIMS_ARM_BONE) :
st.giveItems(VICTIMS_ARM_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_THIGH_BONE) :
st.giveItems(VICTIMS_THIGH_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SKULL) :
st.giveItems(VICTIMS_SKULL,1)
elif not st.getQuestItemsCount(VICTIMS_RIB_BONE) :
st.giveItems(VICTIMS_RIB_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SPINE) :
st.giveItems(VICTIMS_SPINE,1)
if random<=CHANCE2 :
st.giveItems(USELESS_BONE_PIECES,st.getRandom(8)+1)
return
QUEST = Quest(345,qn,"Method To Raise The Dead")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30970)
QUEST.addTalkId(30970)
QUEST.addTalkId(30912)
QUEST.addTalkId(30973)
STARTED.addQuestDrop(30970,VICTIMS_ARM_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_THIGH_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SKULL,1)
STARTED.addQuestDrop(30970,VICTIMS_RIB_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SPINE,1)
STARTED.addQuestDrop(30912,POWDER_TO_SUMMON_DEAD_SOULS,1)
QUEST.addKillId(20789)
QUEST.addKillId(20791)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/345_MethodToRaiseTheDead/__init__.py
|
Python
|
gpl-3.0
| 5,090
| 0.0389
|
from django.db import models
from django.contrib.auth.models import User
from building.models import Building, Unit
# Create your models here.
class Listing(models.Model):
"""
An option to lease, rent, or sublease a specific Unit
"""
CYCLE_CHOICES = (
('year', 'Year'),
('month', 'Month'),
('week', 'Week'),
('day', 'Day'),
)
#who is listing the unit:
#person = models.ForeignKey(Person)
#might be better to just use a User account
#this should be required (setting blank and null to assist with migrations)
user = models.ForeignKey(User, blank=True, null=True)
#even though the building is available by way of the Unit
#it may be easier to look at building
#especially when limiting search results on a map
#
#also, it may be better to schedule a nightly task to update/cache
#the number of listings that are available in a building
#otherwise that could be an expensive search
#
#this should be required (setting blank and null to assist with migrations)
building = models.ForeignKey(Building, related_name="listings", blank=True, null=True)
#the unit available
#unit = models.ForeignKey(Unit, related_name="listings", blank=True, null=True)
unit = models.ForeignKey(Unit, related_name="listings")
#sublease, standard?
lease_type = models.CharField(max_length=200, default="Standard")
lease_term = models.CharField(max_length=200, default="12 Months")
active = models.BooleanField(default=True)
#duplicating available_start and rent on unit with current listing
#that will make database lookups simpler
#but it will require coordination when adding a new listing.
#optional
available_start = models.DateTimeField()
#might be useful for subleases:
available_end = models.DateTimeField()
#these may be duplicated at the unit level:
#aka rent? (previously cost)
rent = models.FloatField()
rent_cycle = models.CharField(max_length=10, choices=CYCLE_CHOICES, default="month")
deposit = models.FloatField()
description = models.TextField()
#are pets allowed? if so what kind?
#pets = models.CharField(max_length=200)
#what utilities are included: (to help estimate total cost)
#
#this is set at the building level
#should be consistent within a building,
#and that makes things easier to read if it's not duplicated here:
#TODO:
#application (to apply for lease)
#link to a default one for manager if available
#otherwise allow one to be attached?
#application = models.ForeignKey(BuildingDocument)
#TODO:
#allow photos *(more than 1)* to be submitted for the listing
#but associate them with the unit
added = models.DateTimeField('date published', auto_now_add=True)
updated = models.DateTimeField('date updated', auto_now=True)
|
City-of-Bloomington/green-rental
|
listing/models.py
|
Python
|
agpl-3.0
| 2,921
| 0.015063
|
import numpy as np
from scipy.io import netcdf_file
import bz2
import os
from fnmatch import fnmatch
from numba import jit
@jit
def binsum2D(data, i, j, Nx, Ny):
data_binned = np.zeros((Ny,Nx), dtype=data.dtype)
N = len(data)
for n in range(N):
data_binned[j[n],i[n]] += data[n]
return data_binned
class LatLonAggregator(object):
"""A class for aggregating L2 data into a gridded dataset."""
def __init__(self, dlon=1., dlat=1., lonlim=(-180,180), latlim=(-90,90)):
self.dlon = dlon
self.dlat = dlat
self.lonmin = lonlim[0]
self.lonmax = lonlim[1]
self.latmin = latlim[0]
self.latmax = latlim[1]
# define grids
self.lon = np.arange(self.lonmin, self.lonmax, dlon)
self.lat = np.arange(self.latmin, self.latmax, dlat)
self.Nx, self.Ny = len(self.lon), len(self.lat)
self.lonc = self.lon + self.dlon/2
self.latc = self.lat + self.dlat/2
def binsum(self, data, lon, lat):
"""Bin the data into the lat-lon grid.
Returns gridded dataset."""
i = np.digitize(lon.ravel(), self.lon)
j = np.digitize(lat.ravel(), self.lat)
return binsum2D(data.ravel(), i, j, self.Nx, self.Ny)
def zeros(self, dtype=np.dtype('f4')):
return np.zeros((self.Ny, self.Nx), dtype=dtype)
|
rabernat/satdatatools
|
satdatatools/aggregator.py
|
Python
|
mit
| 1,349
| 0.005189
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/usr/mips64-elf/share/gcc-4.8.4/python'
libdir = '/usr/mips64-elf/mips64-elf/lib/el'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
bryanperris/winN64dev
|
mips64-elf/mips64-elf/lib/el/libstdc++.a-gdb.py
|
Python
|
gpl-2.0
| 2,328
| 0.006873
|
def test_upgrade_atac_alignment_enrichment_quality_metric_1_2(
upgrader, atac_alignment_enrichment_quality_metric_1
):
value = upgrader.upgrade(
'atac_alignment_enrichment_quality_metric',
atac_alignment_enrichment_quality_metric_1,
current_version='1',
target_version='2',
)
assert value['schema_version'] == '2'
assert 'fri_blacklist' not in value
assert value['fri_exclusion_list'] == 0.0013046877081284722
|
ENCODE-DCC/encoded
|
src/encoded/tests/test_upgrade_atac_alignment_enrichment_quality_metric.py
|
Python
|
mit
| 467
| 0.002141
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/videos/show/(?P<id>\d+)/(?P<display_id>[^/.]+)'
_TEST = {
'url': 'http://www.pornovoisines.com/videos/show/919/recherche-appartement.html',
'md5': '6f8aca6a058592ab49fe701c8ba8317b',
'info_dict': {
'id': '919',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:fe10cb92ae2dd3ed94bb4080d11ff493',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutante', 'Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
'subtitles': {
'fr': [{
'ext': 'vtt',
}]
},
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
settings_url = self._download_json(
'http://www.pornovoisines.com/api/video/%s/getsettingsurl/' % video_id,
video_id, note='Getting settings URL')['video_settings_url']
settings = self._download_json(settings_url, video_id)['data']
formats = []
for kind, data in settings['variants'].items():
if kind == 'HLS':
formats.extend(self._extract_m3u8_formats(
data, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
elif kind == 'MP4':
for item in data:
formats.append({
'url': item['url'],
'height': item.get('height'),
'bitrate': item.get('bitrate'),
})
self._sort_formats(formats)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
# The webpage has a bug - there's no space between "thumb" and src=
thumbnail = self._html_search_regex(
r'<img[^>]+class=([\'"])thumb\1[^>]*src=([\'"])(?P<url>[^"]+)\2',
webpage, 'thumbnail', fatal=False, group='url')
upload_date = unified_strdate(self._search_regex(
r'Le\s*<b>([\d/]+)', webpage, 'upload date', fatal=False))
duration = settings.get('main', {}).get('duration')
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_regex(
r'(?s)Catégories\s*:\s*<b>(.+?)</b>', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
subtitles = {'fr': [{
'url': subtitle,
} for subtitle in settings.get('main', {}).get('vtt_tracks', {}).values()]}
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
'subtitles': subtitles,
}
|
epitron/youtube-dl
|
youtube_dl/extractor/pornovoisines.py
|
Python
|
unlicense
| 4,003
| 0.002001
|
# -*- coding: utf-8 -*-
# Copyright 2015 AvanzOsc (http://www.avanzosc.es)
# Copyright 2015-2016 - Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, models
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
make_po_conditions = {
'partner_id', 'state', 'picking_type_id', 'company_id',
'dest_address_id',
}
# Restrict the empty return for these conditions
if (self.env.context and
self.env.context.get('grouping', 'standard') == 'order' and
make_po_conditions.issubset(set(x[0] for x in args))):
return self.browse()
return super(PurchaseOrder, self).search(
args, offset=offset, limit=limit, order=order, count=count)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
# Restrict the empty return for these conditions
if (self.env.context and
self.env.context.get('grouping', 'standard') == 'line' and
len(args) == 1 and args[0][0] == 'order_id' and
args[0][1] == 'in'):
return self.browse()
return super(PurchaseOrderLine, self).search(
args, offset=offset, limit=limit, order=order, count=count)
|
Eficent/purchase-workflow
|
procurement_purchase_no_grouping/models/purchase_order.py
|
Python
|
agpl-3.0
| 1,521
| 0
|
from abc import abstractmethod
from typing import Callable, TypeVar, Protocol
from typing_extensions import runtime_checkable
TSource = TypeVar('TSource')
TResult = TypeVar('TResult')
@runtime_checkable
class Applicative(Protocol[TSource, TResult]):
"""Applicative.
Applicative functors are functors with some extra properties.
Most importantly, they allow you to apply functions inside the
functor (hence the name).
To learn more about Applicative functors:
* http://www.davesquared.net/2012/05/fp-newbie-learns-applicatives.html
"""
@abstractmethod
def apply(self, something):
"""Apply wrapped callable.
Python: apply(self: Applicative, something: Applicative[Callable[[A], B]]) -> Applicative
Haskell: (<*>) :: f (a -> b) -> f a -> f b.
Apply (<*>) is a beefed up fmap. It takes a functor value that
has a function in it and another functor, and extracts that
function from the first functor and then maps it over the second
one.
"""
raise NotImplementedError
#def __mul__(self, something):
# """(<*>) :: f (a -> b) -> f a -> f b.
# Provide the * as an infix version of apply() since we cannot
# represent the Haskell's <*> operator in Python.
# """
# return self.apply(something)
#def lift_a2(self, func, b):
# """liftA2 :: (Applicative f) => (a -> b -> c) -> f a -> f b -> f c."""
# return func % self * b
@classmethod
@abstractmethod
def pure(cls, fn: Callable[[TSource], TResult]) -> 'Applicative[TSource, TResult]':
"""Applicative functor constructor.
Use pure if you're dealing with values in an applicative context
(using them with <*>); otherwise, stick to the default class
constructor.
"""
raise NotImplementedError
|
dbrattli/OSlash
|
oslash/typing/applicative.py
|
Python
|
apache-2.0
| 1,869
| 0.00214
|
"""
ISO 1996-2:2007
ISO 1996-2:2007 describes how sound pressure levels can be determined by direct measurement,
by extrapolation of measurement results by means of calculation, or exclusively by calculation,
intended as a basis for assessing environmental noise.
"""
import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.stats import linregress
import matplotlib.pyplot as plt
from acoustics.decibel import dbsum
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
import weakref
from tabulate import tabulate
TONE_WITHIN_PAUSE_CRITERION_DB = 6.0
"""A tone may exist when the level of any line in the noise pause is 6 dB or more about...."""
TONE_BANDWIDTH_CRITERION_DB = 3.0
"""Bandwidth of the detected peak."""
TONE_LINES_CRITERION_DB = 6.0
"""All lines with levels within 6 dB of the maximum level are classified as tones."""
TONE_SEEK_CRITERION = 1.0
"""Tone seek criterion."""
REGRESSION_RANGE_FACTOR = 0.75
"""Range of regression is usually +/- 0.75 critical bandwidth."""
_WINDOW_CORRECTION = {
'hanning': -1.8,
}
def window_correction(window):
"""Correction to be applied to :math:`L_{pt}` due to use of window."""
try:
return _WINDOW_CORRECTION[window]
except KeyError:
raise ValueError("Window correction is not available for specified window.")
def critical_band(frequency):
"""Bandwidth of critical band of frequency.
:param frequency: Center frequency of tone.
:returns: (bandwidth, center, lower, upper) of band.
"""
if isinstance(frequency, np.ndarray):
center = frequency.copy()
center[frequency < 50.0] = 50.0
else:
center = 50.0 if frequency < 50 else frequency
bandwidth = (center > 500.0) * (center * 0.20) + (center <= 500.0) * 100.0
upper = center + bandwidth / 2.0
lower = center - bandwidth / 2.0
return center, lower, upper, bandwidth
def tones_level(tone_levels):
"""Total sound pressure level of the tones in a critical band given the level of each of the tones.
.. math L_{pt} = 10 \log_{10}{\sum 10^{L_{pti}/10}}
See equation C.1 in section C.2.3.1.
"""
return dbsum(tone_levels)
def masking_noise_level(noise_lines, frequency_resolution, effective_analysis_bandwidth):
"""Masking noise level :math:`L_{pn}`
:param noise_lines: Masking noise lines. See :func:`masking_noise_lines`.
:param frequency_resolution: Frequency resolution :math:`\Delta f`.
:param effective_analysis_bandwidth: Effective analysis bandwidth :math:`B`.
.. math:: L_{pn} = 10 \log_{10}{\sum 10^{L_n/10}} + 10 \log_{10}{\frac{\Delta f}{B}}
See equation C.11 in section C.4.4.
"""
return dbsum(noise_lines) + 10.0 * np.log10(frequency_resolution / effective_analysis_bandwidth)
def masking_noise_lines(levels, line_classifier, center, bandwidth, regression_range_factor):
"""Determine masking noise level lines using regression line. Returns array of :math:`L_n`.
:param levels: Levels as function of frequency.
:type levels: :class:`pd.Series`.
:param lines_classifier: Categorical indicating what each line is.
:param center: Center frequency.
:param bandwidth: bandwidth of critical band.
:param regression_range_factor: Range factor.
:returns: (Array with masking noise lines, slope, intercept).
"""
slicer = slice(center - bandwidth * regression_range_factor, center + bandwidth * regression_range_factor)
levels = levels[slicer]
frequencies = levels.index
regression_levels = levels[line_classifier == 'noise']
slope, intercept = linregress(x=regression_levels.index, y=regression_levels)[0:2]
levels_from_regression = slope * frequencies + intercept
return levels_from_regression, slope, intercept
def tonal_audibility(tones_level, masking_noise_level, center):
"""Tonal audibility.
:param tones_level: Total sound pressure level of the tones in the critical band :math:`L_{pt}.
:param masking_noise_level: Total sound pressure level of the masking noise in the critical band :math:`L_{pn}.
:param center: Center frequency of the critical band :math:`f_c`.
:returns: Tonal audibility :math:`\Delta L_{ta}`
.. math:: \Delta L_{ta} = L_{pt} - L_{pn} + 2 + \log_{10}{1 + \left(\frac{f_c}{502}\right)^{2.5}}
See equation C.3. in section C.2.4.
"""
return tones_level - masking_noise_level + 2.0 + np.log10(1.0 + (center / 502.0)**(2.5))
def tonal_adjustment(tonal_audibility):
"""Adjustment :math:`K`.
:param tonal_audibility: Tonal audibility :math:`L_{ta}`.
See equations C.4, C.5 and C.6 in section C.2.4.
"""
if tonal_audibility > 10.0:
return 6.0
elif tonal_audibility < 4.0:
return 0.0
else:
return tonal_audibility - 4.0
class Tonality:
"""Perform assessment of audibility of tones in noise.
Objective method for assessing the audibility of tones in noise.
"""
def __init__( # pylint: disable=too-many-instance-attributes
self,
signal,
sample_frequency,
window='hanning',
reference_pressure=REFERENCE_PRESSURE,
tsc=TONE_SEEK_CRITERION,
regression_range_factor=REGRESSION_RANGE_FACTOR,
nbins=None,
force_tone_without_pause=False,
force_bandwidth_criterion=False,
):
self.signal = signal
"""Samples in time-domain."""
self.sample_frequency = sample_frequency
"""Sample frequency."""
self.window = window
"""Window to be used."""
self.reference_pressure = reference_pressure
"""Reference sound pressure."""
self.tsc = tsc
"""Tone seeking criterium."""
self.regression_range_factor = regression_range_factor
"""Regression range factor."""
self.nbins = nbins
"""Amount of frequency nbins to use. See attribute `nperseg` of :func:`scipy.signal.welch`."""
self._noise_pauses = list()
"""Private list of noise pauses that were determined or assigned."""
self._spectrum = None
"""Power spectrum as function of frequency."""
self.force_tone_without_pause = force_tone_without_pause
self.force_bandwidth_criterion = force_bandwidth_criterion
@property
def noise_pauses(self):
"""Noise pauses that were determined."""
for noise_pause in self._noise_pauses:
yield noise_pause
@property
def tones(self):
"""Tones that were determined."""
for noise_pause in self.noise_pauses:
if noise_pause.tone is not None:
yield noise_pause.tone
@property
def critical_bands(self):
"""Critical bands that were determined. A critical band is determined for each tone."""
for tone in self.tones:
yield tone.critical_band
@property
def spectrum(self):
"""Power spectrum of the input signal.
"""
if self._spectrum is None:
nbins = self.nbins
if nbins is None:
nbins = self.sample_frequency
nbins //= 1 # Fix because of bug in welch with uneven nbins
f, p = welch(self.signal, fs=self.sample_frequency, nperseg=nbins, window=self.window, detrend=False,
scaling='spectrum')
self._spectrum = pd.Series(10.0 * np.log10(p / self.reference_pressure**2.0), index=f)
return self._spectrum
@property
def frequency_resolution(self):
"""Frequency resolution.
"""
df = np.diff(np.array(self.spectrum.index)).mean()
return df
#return 1.0 / self.sample_frequency
@property
def effective_analysis_bandwidth(self):
"""Effective analysis bandwidth.
In the case of the Hanning window
.. math:: B_{eff} = 1.5 \Delta f
with \Delta f the :attr:`frequency_resolution`.
C.2.2: Note 1.
"""
if self.window == 'hanning':
return 1.5 * self.frequency_resolution
else:
raise ValueError()
def _set_noise_pauses(self, noise_pauses):
"""Manually set noise pauses. Expects iterable of tuples."""
self._noise_pauses = [NoisePause(start, end) for start, end in noise_pauses]
return self
def determine_noise_pauses(self, end=None):
"""Determine noise pauses. The determined noise pauses are available in :attr:`noise_pause_ranges`.
Noise pauses are search for using :func:`noise_pause_seeker`.
"""
self._set_noise_pauses(noise_pause_seeker(np.array(self.spectrum[:end]), self.tsc))
return self
def _construct_line_classifier(self):
"""Set values of line classifier."""
# Build classifier.
levels = self.spectrum
categories = ['noise', 'start', 'end', 'neither', 'tone']
self.line_classifier = pd.Series(
pd.Categorical(['noise'] * len(levels), categories=categories), index=levels.index)
# Add noise pauses
for noise_pause in self.noise_pauses:
# Mark noise pause start and end.
self.line_classifier.iloc[noise_pause.start] = 'start'
self.line_classifier.iloc[noise_pause.end] = 'end'
# Mark all other lines within noise pause as neither tone nor noise.
self.line_classifier.iloc[noise_pause.start + 1:noise_pause.end] = 'neither' # Half-open interval
# Add tone lines
for tone in self.tones:
self.line_classifier.iloc[tone._tone_lines] = 'tone'
return self
def _determine_tones(self):
"""Analyse the noise pauses for tones. The determined tones are available via :attr:`tones`.
Per frequency line results are available via :attr:`line_classifier`.
"""
levels = self.spectrum
# First we need to check for the tones.
for noise_pause in self.noise_pauses:
# Determine the indices of the tones in a noise pause
tone_indices, bandwidth_for_tone_criterion = determine_tone_lines(
levels,
self.frequency_resolution,
noise_pause.start,
noise_pause.end,
self.force_tone_without_pause,
self.force_bandwidth_criterion,
)
# If we have indices, ...
if np.any(tone_indices):
# ...then we create a tone object.
noise_pause.tone = create_tone(levels, tone_indices, bandwidth_for_tone_criterion,
weakref.proxy(noise_pause))
return self
def _determine_critical_bands(self):
"""Put a critical band around each of the determined tones."""
for tone in self.tones:
critical_band = self.critical_band_at(tone.center)
tone.critical_band = critical_band
critical_band.tone = weakref.proxy(tone)
return self
def analyse(self):
"""Analyse the noise pauses for tones and put critical bands around each of these tones.
The tones are available via :attr:`tones` and the critical bands via :attr:`critical_bands`.
Per frequency line results are available via :attr:`line_classifier`.
"""
# Determine tones. Puts noise pause starts/ends in classier as well as tone lines
# and lines that are neither tone nor noise.
self._determine_tones()
# Construct line classifier
self._construct_line_classifier()
# Determine critical bands.
self._determine_critical_bands()
return self
def critical_band_at(self, frequency):
"""Put at a critical band at `frequency`.
In order to use this function :attr:`line_classifier` needs to be available,
which means :meth:`analyse` needs to be used first.
"""
return create_critical_band(self.spectrum, self.line_classifier, frequency, self.frequency_resolution,
self.effective_analysis_bandwidth, self.regression_range_factor, self.window)
def plot_spectrum(self):
"""Plot power spectrum."""
spectrum = self.spectrum
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(spectrum.index, spectrum)
ax.set_xlabel('f in Hz')
ax.set_ylabel('L in dB')
return fig
@property
def dominant_tone(self):
"""Most dominant_tone tone.
The most dominant_tone tone is the tone with the highest tonal audibility :math:`L_{ta}`.
"""
try:
return sorted(self.tones, key=lambda x: x.critical_band.tonal_audibility, reverse=True)[0]
except IndexError:
return None
def plot_results(self, noise_pauses=False, tones=True, critical_bands=True):
"""Plot overview of results."""
df = self.frequency_resolution
levels = self.spectrum
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(levels.index, levels)
ax.set_xlabel("$f$ in Hz")
ax.set_ylabel("$L$ in dB")
if noise_pauses:
for pause in self.noise_pauses:
ax.axvspan(pause.start * df, pause.end * df, color='green', alpha=0.05)
if tones:
for tone in self.tones:
ax.axvline(tone.center, color='black', alpha=0.05)
if critical_bands:
for band in self.critical_bands:
ax.axvspan(band.start, band.end, color='yellow', alpha=0.05)
band = self.dominant_tone.critical_band
ax.axvline(band.start, color='red', linewidth=0.1)
ax.axvline(band.end, color='red', linewidth=0.1)
# Limit xrange
if noise_pauses:
_items = list(self.noise_pauses)
elif critical_bands:
_items = list(self.critical_bands)
ax.set_xlim(min(item.start for item in _items), max(item.end for item in _items))
return fig
def overview(self):
"""Print overview of results."""
try:
cb = self.dominant_tone.critical_band
except AttributeError:
raise ValueError("Cannot show overview (yet). No tones have been determined.")
tones = [("Tone", "{:4.1f} Hz: {:4.1f} dB".format(tone.center, tone.tone_level)) for tone in self.tones]
table = [
("Critical band", "{:4.1f} to {:4.1f} Hz".format(cb.start, cb.end)),
("Masking noise level $L_{pn}$", "{:4.1f} dB".format(cb.masking_noise_level)),
("Tonal level $L_{pt}$", "{:4.1f} dB".format(cb.total_tone_level)),
("Dominant tone", "{:4.1f} Hz".format(cb.tone.center)),
("3 dB bandwidth of tone", "{:2.1f}% of {:4.1f}".format(cb.tone.bandwidth_3db / cb.bandwidth * 100.0,
cb.bandwidth)),
("Tonal audibility $L_{ta}$", "{:4.1f} dB".format(cb.tonal_audibility)),
("Adjustment $K_{t}$", "{:4.1f} dB".format(cb.adjustment)),
("Frequency resolution", "{:4.1f} Hz".format(self.frequency_resolution)),
("Effective analysis bandwidth", "{:4.1f} Hz".format(self.effective_analysis_bandwidth)),
]
table += tones
return tabulate(table)
def results_as_dataframe(self):
"""Return results in dataframe."""
data = ((tone.center, tone.tone_level, tone.bandwidth_3db, tone.critical_band.start, tone.critical_band.end,
tone.critical_band.bandwidth, tone.critical_band.regression_slope,
tone.critical_band.regression_intercept, tone.critical_band.masking_noise_level,
tone.critical_band.total_tone_level, tone.critical_band.tonal_audibility,
tone.critical_band.adjustment) for tone in self.tones)
columns = [
'center', 'tone_level', 'bandwidth_3db', 'critical_band_start', 'critical_band_end',
'critical_band_bandwidth', 'regression_slope', 'regression_intercept', 'masking_noise_level',
'total_tone_level', 'tonal_audibility', 'adjustment'
]
return pd.DataFrame(list(data), columns=columns)
class NoisePause:
def __init__(self, start, end, tone=None):
self.start = start
self.end = end
self.tone = tone
def __str__(self):
return "(start={},end={})".format(self.start, self.end)
def __repr__(self):
return "NoisePause{}".format(str(self))
def __iter__(self):
yield self.start
yield self.stop
def _repr_html_(self):
table = [("Start", self.start), ("End", self.end)]
return tabulate(table, tablefmt="html")
def create_tone(levels, tone_lines, bandwidth_for_tone_criterion, noise_pause):
"""Create an instance of Tone."""
center = levels.iloc[tone_lines].idxmax()
tone_level = tones_level(levels.iloc[tone_lines])
return Tone(center, tone_lines, tone_level, noise_pause, bandwidth_for_tone_criterion)
class Tone:
"""Tone."""
def __init__(self, center, tone_lines, tone_level, noise_pause, bandwidth_3db, critical_band=None):
self.center = center
self._tone_lines = tone_lines
self.tone_level = tone_level
self.noise_pause = noise_pause
self.bandwidth_3db = bandwidth_3db
self.critical_band = critical_band
def __str__(self):
return "(center={:4.1f}, levels={:4.1f})".format(self.center, self.tone_level)
def __repr__(self):
return "Tone{}".format(str(self))
def _repr_html_(self):
table = [("Center frequency", "{:4.1f} Hz".format(self.center)),
("Tone level", "{:4.1f} dB".format(self.tone_level))]
return tabulate(table, tablefmt='html')
def create_critical_band(
levels,
line_classifier,
frequency,
frequency_resolution,
effective_analysis_bandwidth,
regression_range_factor,
window,
tone=None,
):
"""Create an instance of CriticalBand."""
center, start, end, bandwidth = critical_band(frequency)
# Masking noise lines
noise_lines, regression_slope, regression_intercept = masking_noise_lines(levels, line_classifier, center,
bandwidth, regression_range_factor)
# Masking noise level
noise_level = masking_noise_level(noise_lines, frequency_resolution, effective_analysis_bandwidth)
# Total tone level
tone_lines = levels[line_classifier == 'tone'][start:end]
tone_level = tones_level(tone_lines) - window_correction(window)
# Tonal audibility
audibility = tonal_audibility(tone_level, noise_level, center)
# Adjustment Kt
adjustment = tonal_adjustment(audibility)
return CriticalBand(
center,
start,
end,
bandwidth,
regression_range_factor,
regression_slope,
regression_intercept,
noise_level,
tone_level,
audibility,
adjustment,
tone,
)
class CriticalBand:
def __init__( # pylint: disable=too-many-instance-attributes
self,
center,
start,
end,
bandwidth,
regression_range_factor,
regression_slope,
regression_intercept,
noise_level,
tone_level,
audibility,
adjustment,
tone=None,
):
self.center = center
"""Center frequency of the critical band."""
self.start = start
"""Lower band-edge frequency of the critical band."""
self.end = end
"""Upper band-edge frequency of the critical band."""
self.bandwidth = bandwidth
"""Bandwidth of the critical band."""
self.regression_range_factor = regression_range_factor
"""Range of regression factor. See also :attr:`REGRESSION_RANGE_FACTOR`."""
self.regression_slope = regression_slope
"""Linear regression slope."""
self.regression_intercept = regression_intercept
"""Linear regression intercept."""
self.masking_noise_level = noise_level
"""Masking noise level :math:`L_{pn}`."""
self.total_tone_level = tone_level
"""Total tone level :math:`L_{pt}`."""
self.tonal_audibility = audibility
"""Tonal audibility :math:`L_{ta}`."""
self.adjustment = adjustment
"""Adjustment :math:`K_{t}`."""
self.tone = tone
def __str__(self):
return "(center={:4.1f}, bandwidth={:4.1f}, tonal_audibility={:4.1f}, adjustment={:4.1f}".format(
self.center, self.bandwidth, self.tonal_audibility, self.adjustment)
def __repr__(self):
return "CriticalBand{}".format(str(self))
def _repr_html_(self):
table = [
("Center frequency", "{:4.1f} Hz".format(self.center)),
("Start frequency", "{:4.1f} Hz".format(self.start)),
("End frequency", "{:4.1f} Hz".format(self.end)),
("Bandwidth", "{:4.1f} Hz".format(self.bandwidth)),
("Regression factor", "{:4.1f}".format(self.regression_range_factor)),
("Regression slope", "{:4.1f}".format(self.regression_slope)),
("Regression intercept", "{:4.1f}".format(self.regression_intercept)),
("Masking noise level", "{:4.1f} dB".format(self.masking_noise_level)),
("Total tone level", "{:4.1f} dB".format(self.total_tone_level)),
("Tonal audibility $L_{ta}$", "{:4.1f} dB".format(self.tonal_audibility)),
("Adjustment $K_{t}$", "{:4.1f} dB".format(self.adjustment)),
]
return tabulate(table, tablefmt='html')
#----------Noise pauses----------------------------
def _search_noise_pauses(levels, tsc):
pauses = list()
possible_start = None
for i in range(2, len(levels) - 2):
if (levels[i] - levels[i - 1]) >= tsc and (levels[i - 1] - levels[i - 2]) < tsc:
possible_start = i
if (levels[i] - levels[i + 1]) >= tsc and (levels[i + 1] - levels[i + 2]) < tsc:
if possible_start:
pauses.append((possible_start, i))
possible_start = None
return pauses
def noise_pause_seeker(levels, tsc):
"""Given the levels of a spectrum and a tone seeking criterium this top level function seeks possible noise pauses.
:param levels: Spectral levels.
:param df: Frequency resolution.
:param tsc: Tone seeking criterium.
Possible start and end indices of noise pauses are determined using :func:`possible_noise_pauses.
Then, only those that correspond to the smallest intervals that do not overlap other intervals are kept.
"""
n = len(levels)
forward_pauses = _search_noise_pauses(levels, tsc)
backward_pauses = _search_noise_pauses(levels[::-1], tsc)
backward_pauses = [(n - 1 - start, n - 1 - end) for end, start in reversed(backward_pauses)]
possible_pauses = sorted(list(set(forward_pauses) & set(backward_pauses)))
return possible_pauses
#------------------- Tone seeking--------------------
def determine_tone_lines(levels, df, start, end, force_tone_without_pause=False, force_bandwidth_criterion=False):
"""Determine tone lines in noise pause.
:param levels: Series with levels as function of frequency.
:param df: Frequency resolution.
:param start: Index of noise pause start.
:param end: Index of noise pause end.
:returns: Array with indices of tone lines in noise pause.
"""
# Noise pause range object
npr = slice(start, end + 1)
# Return values
tone_indices = np.array([])
bandwidth_for_tone_criterion = None
# Levels but with integeres as indices instead of frequencies.
# Benefit over np.array is that the index is maintained when the object is sliced.
levels_int = levels.reset_index(drop=True)
# If any of the lines is six 6 dB above. See section C.4.3.
if np.any((levels.iloc[npr] >= TONE_WITHIN_PAUSE_CRITERION_DB + levels.iloc[start - 1]) &
(levels.iloc[npr] >= TONE_WITHIN_PAUSE_CRITERION_DB + levels.iloc[end + 1])) or force_tone_without_pause:
# Indices of values that are within -3 dB point.
indices_3db = (levels.iloc[npr] >= levels.iloc[npr].max() - TONE_BANDWIDTH_CRITERION_DB).to_numpy().nonzero()[0]
# -3 dB bandwidth
bandwidth_for_tone_criterion = (indices_3db.max() - indices_3db.min()) * df
# Frequency of tone.
tone_center_frequency = levels.iloc[npr].idxmax()
#tone_center_index = levels.reset_index(drop=True).iloc[npr].idxmax()
# Critical band
_, _, _, critical_band_bandwidth = critical_band(tone_center_frequency)
# Fullfill bandwidth criterion? See section C.4.3
if (bandwidth_for_tone_criterion < 0.10 * critical_band_bandwidth) or force_bandwidth_criterion:
# All values within 6 decibel are designated as tones.
tone_indices = (levels_int.iloc[npr][
levels_int.iloc[npr] >= levels_int.iloc[npr].max() - TONE_LINES_CRITERION_DB]).index.values
return tone_indices, bandwidth_for_tone_criterion
|
python-acoustics/python-acoustics
|
acoustics/standards/iso_1996_2_2007.py
|
Python
|
bsd-3-clause
| 25,331
| 0.003948
|
#=========================================================================
# visitors.py
#=========================================================================
from __future__ import print_function
import ast, _ast
import re
import warnings
from ..ast_helpers import get_closure_dict, print_simple_ast
from ...model.signals import Wire, Signal, InPort, OutPort, _SignalSlice
from ...model.Model import Model
from ...model.PortBundle import PortBundle
from ...model.signal_lists import PortList, WireList
from ...datatypes.Bits import Bits
from exceptions import VerilogTranslationError
#-------------------------------------------------------------------------
# AnnotateWithObjects
#-------------------------------------------------------------------------
# Annotates AST Nodes with the live Python objects they reference.
# TODO: store objects in PyObj wrapper, or not?
class AnnotateWithObjects( ast.NodeTransformer ):
def __init__( self, model, func ):
self.model = model
self.func = func
self.closed_vars = get_closure_dict( func )
self.current_obj = None
def visit_Attribute( self, node ):
self.generic_visit( node )
# TODO: handle self.current_obj == None. These are temporary
# locals that we should check to ensure their types don't
# change!
if self.current_obj:
try :
x = self.current_obj.getattr( node.attr )
self.current_obj.update( node.attr, x )
except AttributeError:
if node.attr not in ['next', 'value', 'n', 'v']:
raise Exception('Unknown attribute "{}" in model "{}"'
.format( node.attr, self.model.__class__ ))
node._object = self.current_obj.inst if self.current_obj else None
return node
def visit_Name( self, node ):
# Check if the name is a global constant
if node.id in self.func.func_globals:
new_obj = PyObj( '', self.func.func_globals[ node.id ] )
# If the name is not in closed_vars or func_globals, it's a local temporary
elif node.id not in self.closed_vars:
new_obj = None
# If the name points to the model, this is a reference to self (or s)
elif self.closed_vars[ node.id ] is self.model:
new_obj = PyObj( '', self.closed_vars[ node.id ] )
# Otherwise, we have some other variable captured by the closure...
# TODO: should we allow this?
else:
new_node = node
new_obj = PyObj( node.id, self.closed_vars[ node.id ] )
# Store the new_obj
self.current_obj = new_obj
node._object = self.current_obj.inst if self.current_obj else None
# Return the new_node
return node
def visit_Subscript( self, node ):
# Visit the object being sliced
new_value = self.visit( node.value )
# Visit the index of the slice; stash and restore the current_obj
stash, self.current_obj = self.current_obj, None
new_slice = self.visit( node.slice )
self.current_obj = stash
# Update the current_obj
# TODO: check that type of all elements in item are identical
# TODO: won't work for lists that are initially empty
# TODO: what about lists that initially contain None?
# TODO: do we want the array, or do we want element 0 of the array...
node._object = self.current_obj.inst if self.current_obj else None
if self.current_obj:
self.current_obj.update( '[]', self.current_obj.inst[0] )
return node
def visit_List( self, node ):
node._object = []
for item in node.elts:
self.visit( item )
node._object.append( item._object )
return node
#-------------------------------------------------------------------------
# AnnotateAssignments
#-------------------------------------------------------------------------
class AnnotateAssignments( ast.NodeTransformer ):
'Annotate assign nodes with ._is_blocking attribute'
def visit_Assign( self, node ):
# catch untranslatable constructs
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
# annotate the assignment with _is_blocking if not sequential update
lhs = node.targets[0]
seq = isinstance( lhs, ast.Attribute ) and lhs.attr in ['next','n']
node._is_blocking = not seq
self.generic_visit( node )
return node
def visit_AugAssign( self, node ):
# annotate the assignment with _is_blocking if not sequential update
lhs = node.target
seq = isinstance( lhs, ast.Attribute ) and lhs.attr in ['next','n']
node._is_blocking = not seq
self.generic_visit( node )
return node
#-------------------------------------------------------------------------
# RemoveValueNext
#-------------------------------------------------------------------------
# Remove .value and .next.
class RemoveValueNext( ast.NodeTransformer ):
def visit_Attribute( self, node ):
if node.attr in ['next', 'value', 'n', 'v']:
# Update the Load/Store information
node.value.ctx = node.ctx
return ast.copy_location( node.value, node )
return node
#-------------------------------------------------------------------------
# RemoveSelf
#-------------------------------------------------------------------------
# Remove references to self.
# TODO: make Attribute attached to self a Name node?
class RemoveSelf( ast.NodeTransformer ):
def __init__( self, model ):
self.model = model
def visit_Name( self, node ):
if node._object == self.model:
return None
return node
#-------------------------------------------------------------------------
# FlattenSubmodAttrs
#-------------------------------------------------------------------------
# Transform AST branches for submodule signals. A PyMTL signal referenced
# as 's.submodule.port' would appear in the AST as:
#
# Attribute(port)
# |- Attribute(submodule)
#
# This visitor transforms the AST and name to 's.submodule_port':
#
# Attribute(submodule$port)
#
class FlattenSubmodAttrs( ast.NodeTransformer ):
def __init__( self ):
self.submodule = None
def visit_Attribute( self, node ):
# Visit children
self.generic_visit( node )
# If the direct child of this attribute was a submodule then the node
# will be removed by the visitor. We must update our name to include
# submodule name for proper mangling.
if self.submodule:
new_node = _ast.Name( id = '{}${}'.format(self.submodule, node.attr ),
ctx = node.ctx )
new_node._object = node._object
node = new_node
# Attribute is a submodel remove the node, set the submodule name
if hasattr( node._object, 'class_name' ):
self.submodule = node._object.name
return None
# Otherwise, clear the submodule name, return node unmodified
self.submodule = None
return ast.copy_location( node, node )
#-------------------------------------------------------------------------
# FlattenPortBundles
#-------------------------------------------------------------------------
# Transform AST branches for PortBundle signals. A PyMTL signal referenced
# as 's.portbundle.port' would appear in the AST as:
#
# Attribute(port)
# |- Attribute(portbundle)
#
# This visitor transforms the AST and name to 's.submodule_port':
#
# Attribute(portbundle_port)
#
class FlattenPortBundles( ast.NodeTransformer ):
def __init__( self ):
self.portbundle = None
def visit_Attribute( self, node ):
# Visit children
self.generic_visit( node )
# If the direct child of this attribute was a portbundle then the node
# will be removed by the visitor. We must update our name to include
# portbundle name for proper mangling.
if self.portbundle:
new_node = _ast.Name( id = '{}_{}'.format(self.portbundle, node.attr ),
ctx = node.ctx )
new_node._object = node._object
node = new_node
# Attribute is a PortBundle, remove the node, set the submodule name
if isinstance( node._object, PortBundle ):
self.portbundle = node.attr
return None
# Otherwise, clear the submodule name, return node unmodified
self.portbundle = None
return ast.copy_location( node, node )
def visit_Name( self, node ):
# Name is a PortBundle, remove the node, set the submodule name
if isinstance( node._object, PortBundle ):
self.portbundle = node.id
return None
# Otherwise, clear the submodule name, return node unmodified
self.portbundle = None
return node
#-------------------------------------------------------------------------
# FlattenListAttrs
#-------------------------------------------------------------------------
# Transform AST branches for attribute accesses from indexed objects.
# Attributes referenced as 's.sig[i].attr' would appear in the AST as:
#
# Attribute(attr)
# |- Subscript()
# |- Attribute(sig)
# |- Index()
# |- Name(i)
#
# This visitor transforms the AST and name to 's.sig_attr[i]':
#
# Subscript()
# |- Attribute(sig_attr)
# |- Index()
# |- Name(i)
#
class FlattenListAttrs( ast.NodeTransformer ):
def __init__( self ):
self.attr = None
def visit_Attribute( self, node ):
# If a parent node is going to be removed, update this name
# SubModel List
if self.attr and isinstance( node._object[0], Model ):
node.attr = '{}${}'.format( node.attr, self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = node.attr
# PortBundle List
elif self.attr and isinstance( node._object[0], PortBundle ):
node.attr = '{}_{}'.format( node.attr, self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = node.attr
# Unknown!!!
elif self.attr:
raise Exception( "Don't know how to flatten this!" )
# Exit early if theres no value attribute
if not hasattr( node, 'value' ):
return node
# If the child is a subscript node, this node will be removed
if isinstance( node.value, ast.Subscript ):
self.attr = node.attr
self.generic_visit( node )
self.attr = None
node = node.value
return ast.copy_location( node, node )
def visit_Subscript( self, node ):
# Update the _object in Subscript too!
if self.attr:
if isinstance( node._object[0], Model ):
name = '{}${}'.format( node._object[0].name.split('[')[0], self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = name
elif isinstance( node._object[0], PortBundle ):
if isinstance( node.value, ast.Name ):
bundle_name = node.value.id
elif isinstance( node.value, ast.Attribute ):
bundle_name = node.value.attr
else:
raise Exception()
name = '{}_{}'.format( bundle_name, self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = name
else:
return node
# Visit the slice
stash = self.attr
self.attr = None
node.slice = self.visit( node.slice )
self.attr = stash
# Visit the value
node.value = self.visit( node.value )
return node
def visit_Name( self, node ):
# SubModel List
if self.attr and isinstance( node._object[0], Model ):
node.id = '{}${}'.format( node.id, self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = node.id
# PortBundle List
elif self.attr and isinstance( node._object[0], PortBundle ):
node.id = '{}_{}'.format( node.id, self.attr )
node._object = PortList([ getattr( x, self.attr ) for x in node._object ])
node._object.name = node.id
# Unknown!!!
elif self.attr:
raise Exception( "Don't know how to flatten this!" )
return node
#-------------------------------------------------------------------------
# RemoveModule
#-------------------------------------------------------------------------
# Remove the module node.
class RemoveModule( ast.NodeTransformer ):
def visit_Module( self, node ):
#self.generic_visit( node ) # visit children, uneeded?
# copy the function body, delete module references
return ast.copy_location( node.body[0], node )
#-------------------------------------------------------------------------
# SimplifyDecorator
#-------------------------------------------------------------------------
# Make the decorator contain text strings, not AST Trees
class SimplifyDecorator( ast.NodeTransformer ):
def visit_FunctionDef( self, node ):
#self.generic_visit( node ) # visit children, uneeded?
# TODO: currently assume decorator is of the form 'self.dec_name'
if len( node.decorator_list ) != 1:
def get_dec_name( dec ):
if hasattr( dec, 'id' ): return dec.id
elif hasattr( dec, 'attr' ): return dec.attr
else: return dec
raise VerilogTranslationError(
'Translated behavioral blocks should only have one decorator!\n'
'Current decorators: {}'.format(
[ get_dec_name( x ) for x in node.decorator_list ]
), node.lineno
)
dec = node.decorator_list[0].attr
# create a new FunctionDef node that deletes the decorators
new_node = ast.FunctionDef( name=node.name, args=node.args,
body=node.body, decorator_list=[dec])
return ast.copy_location( new_node, node )
#-------------------------------------------------------------------------
# ThreeExprLoops
#-------------------------------------------------------------------------
# Replace calls to range()/xrange() in a for loop with a Slice object
# describing the bounds (upper/lower/step) of the iteration.
class ThreeExprLoops( ast.NodeTransformer ):
def visit_For( self, node ):
self.generic_visit( node )
if not ( isinstance( node.iter, _ast.Call ) and
isinstance( node.iter.func, _ast.Name ) and
node.iter.func.id in ['range', 'xrange'] ):
raise VerilogTranslationError(
'For loops are only translatable when using range or xrange!\n'
'Please use "for i in range(...)/xrange(...)".',
node.lineno
)
call = node.iter
if len( call.args ) == 1:
start = _ast.Num( n=0 )
stop = call.args[0]
step = _ast.Num( n=1 )
elif len( call.args ) == 2:
start = call.args[0]
stop = call.args[1]
step = _ast.Num( n=1 ) # TODO: should be an expression
elif len( call.args ) == 3:
start = call.args[0]
stop = call.args[1]
step = call.args[2]
else:
raise VerilogTranslationError(
'An invalid number of arguments provided to (x)range function!\n',
node.lineno
)
# Must know if the step is negative or positive in order to set the
# correct bound check. This is because of Python's range behavior.
try:
if hasattr( step, '_object' ): step_val = step._object
elif hasattr( step, 'n' ): step_val = step.n
assert step_val != 0
except (UnboundLocalError,AssertionError):
raise VerilogTranslationError(
'An error occurred when translating a "for loop"!\n'
'The "step" parameter to range must be a constant integer value != 0!',
node.lineno
)
node.iter = _ast.Slice( lower=start, upper=stop, step=step )
node.iter.lt_gt = '<' if step_val > 0 else '>'
return node
#-------------------------------------------------------------------------
# ConstantToSlice
#-------------------------------------------------------------------------
class ConstantToSlice( ast.NodeTransformer ):
def visit_Attribute( self, node ):
self.generic_visit( node )
if isinstance( node._object, slice ):
if node._object.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Slice( ast.Num( node._object.start ),
ast.Num( node._object.stop ),
None )
return ast.copy_location( new_node, node )
return node
def visit_Name( self, node ):
self.generic_visit( node )
if isinstance( node._object, slice ):
if node._object.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Slice( ast.Num( node._object.start ),
ast.Num( node._object.stop ),
None )
return ast.copy_location( new_node, node )
return node
#-------------------------------------------------------------------------
# BitStructToSlice
#-------------------------------------------------------------------------
class BitStructToSlice( ast.NodeTransformer ):
def visit_Attribute( self, node ):
self.generic_visit( node )
if isinstance( node._object, _SignalSlice ):
if node._object.slice.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Subscript( node.value,
ast.Slice( ast.Num( node._object.slice.start ),
ast.Num( node._object.slice.stop ),
None ),
None,
)
new_node._object = node._object
return ast.copy_location( new_node, node )
return node
#-------------------------------------------------------------------------
# InferTemporaryTypes
#-------------------------------------------------------------------------
import copy
class InferTemporaryTypes( ast.NodeTransformer ):
last_model = None
func_id = 0
def __init__( self, model ):
self.model = model
self.infer_dict = {}
# Create unique ids for each function we visit in a given model.
# This ensures we can assign unique names to temporaries to provide
# 'scoping' behavior similar to Python.
if id(self.model) != InferTemporaryTypes.last_model:
InferTemporaryTypes.last_model = id(self.model)
InferTemporaryTypes.func_id = 0
else:
InferTemporaryTypes.func_id += 1
def _insert( self, node, value ):
node.targets[0]._object = value
self.infer_dict[ node.targets[0].id ] = value
def _uniq_name( self, node_id ):
return node_id + '__{}'.format( self.func_id )
def visit_Assign( self, node ):
# Catch untranslatable constructs
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
if isinstance(node.targets[0], ast.Tuple):
raise VerilogTranslationError(
'Multiple items on the left of an assignment are not supported!\n'
'Please modify "x,y = ..." to be two separate lines.',
node.lineno
)
# First visit the RHS to update Name nodes that have been inferred
self.visit( node.value )
# Need this to visit potential temporaries used in slice indices!
self.visit( node.targets[0] )
# The LHS doesn't have a type, we need to infer it
if node.targets[0]._object == None:
# The LHS should be a Name node!
if not isinstance(node.targets[0], _ast.Name):
raise VerilogTranslationError(
'An internal error occured when performing type inference!\n'
'Please contact the PyMTL developers!',
node.lineno
)
# Assign unique name to this temporary in case the same temporary
# name is used in another concurrent block.
node.targets[0].id = self._uniq_name( node.targets[0].id )
# Copy the object returned by the RHS, set the name appropriately
if isinstance( node.value, ast.Name ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Attribute ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Num ):
self._insert( node, (node.targets[0].id, int( node.value.n )) )
elif isinstance( node.value, ast.BoolOp ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Compare ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Subscript ):
# TODO: assumes ast.Index does NOT contain a slice object
if not isinstance( node.value.slice, ast.Index ):
raise VerilogTranslationError(
'Type inference from slices > 1-bit is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
if isinstance( node.value._object, Signal ):
obj = Wire( 1 )
elif isinstance( node.value._object, list ) and \
isinstance( node.value._object[0], Signal ):
obj = Wire( node.value._object[0].nbits )
else:
raise VerilogTranslationError(
'Type inference from unsupported list construct!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Call ):
func_name = node.value.func.id
if func_name in ['sext', 'zext']:
nbits_arg = node.value.args[1]
if isinstance( nbits_arg, ast.Num ): nbits = nbits_arg.n
else: nbits = nbits_arg._object
if not isinstance( nbits, int ):
raise VerilogTranslationError(
'The second argument to function "{}" must be an int!'
.format( func_name ),
node.lineno
)
obj = Wire( nbits )
elif func_name == 'concat':
nbits = sum( [x._object.nbits for x in node.value.args ] )
obj = Wire( nbits )
elif func_name in ['reduce_and', 'reduce_or', 'reduce_xor']:
obj = Wire( 1 )
elif func_name == 'Bits':
nbits_arg = node.value.args[0]
if isinstance( nbits_arg, ast.Num ): nbits = nbits_arg.n
else: nbits = nbits_arg._object
if not isinstance( nbits, int ):
raise VerilogTranslationError(
'The first argument to the Bits constructor must be an int!',
node.lineno
)
obj = Wire( nbits )
else:
print_simple_ast( node )
raise VerilogTranslationError(
'Type inference from the function "{}" is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( func_name, node.targets[0].id ),
node.lineno
)
obj.name = node.targets[0].id
self._insert( node, obj )
else:
print_simple_ast( node )
raise VerilogTranslationError(
'Type inference of "{}" AST nodes is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( type(node.value).__name__, node.targets[0].id ),
node.lineno
)
return node
def visit_Name( self, node ):
try:
if node._object == None:
# Update all other references to inferred temporaries to use the
# newly assigned uniquified name.
temp_name = self._uniq_name( node.id )
node._object = self.infer_dict[ temp_name ]
node.id = temp_name
except KeyError:
pass
return node
#-------------------------------------------------------------------------
# PortListNameHack
#-------------------------------------------------------------------------
# Temporary hack to handle cases where port lists are named improperly.
class PortListNameHack( ast.NodeTransformer ):
def __init__( self, model ):
self.model = model
def visit_Subscript( self, node ):
self.generic_visit( node )
plist = node._object
# skip check if this isn't a PortList or WireList
if not isinstance( plist, (PortList, WireList) ):
return node
# if the PortList parent is not the same as the current modules parent, but
# there is no '$' in the name, it's been named improperly! fix it!
if plist.parent != self.model and not '$' in plist.name:
# this only works if all children of the list have the same parent, throw
# an error if we can detect that that is not the case
if len(plist) > 1 and plist[0].parent != plist[1].parent:
raise Exception( "Error during translation!" )
# generate the updated name, and also make a copy of the PortList to make
# sure we aren't impacting any other AST references to this object
name = '{}${}'.format( plist.parent.name, plist.name )
node._object = PortList( node._object )
node._object.name = name
return node
#-------------------------------------------------------------------------
# GetRegsIntsTempsArrays
#-------------------------------------------------------------------------
# TODO: for loop temporaries (ComplexBitSplit)
class GetRegsIntsParamsTempsArrays( ast.NodeVisitor ):
def get( self, tree ):
self._is_lhs = False
self.store = {}
self.loopvar = set()
self.params = set()
self.arrays = set()
self.arrayelms = set()
self.visit( tree )
return set( self.store.values() ), self.loopvar, self.params, self.arrays
def visit_Attribute( self, node ):
if isinstance( node._object, (int,Bits) ):
self.params.add( (node.attr, node._object) )
#self.generic_visit( node )
def visit_Name( self, node ):
if isinstance( node._object, (int,Bits) ):
self.params.add( (node.id, node._object) )
#self.generic_visit( node )
def visit_Assign( self, node ):
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
self._is_lhs = True
self.visit( node.targets[0] )
self._is_lhs = False
self.visit( node.value )
obj = node.targets[0]._object
# NOTE:
# - currently possible to have inferences with different bitwidths
# - currently possible for a signal to be stored as both a reg and loopvar
# handle this in verilog_structural.create_declarations
if obj in self.arrayelms: return
elif isinstance( obj, Signal ): self.store[ obj.fullname ] = obj
elif isinstance( obj, tuple ): self.loopvar.add( obj[0] )
# FIXME:
# - if one field of a bitstruct is assigned in a behavioral block,
# the **entire** bitstruct is assumed to be a reg!
elif isinstance( obj, _SignalSlice ):
self.store[ obj._signal.fullname ] = obj._signal
def visit_For( self, node ):
if not (isinstance( node.iter, _ast.Slice ) and
isinstance( node.target, _ast.Name )):
raise VerilogTranslationError(
'An internal error occured when translating a for loop!\n'
'Please contact the PyMTL developers!',
node.lineno
)
self.loopvar.add( node.target.id )
self.generic_visit( node )
def visit_Subscript( self, node ):
# TODO: Check for PortList/WireList explicitly?
if isinstance( node._object, list ):
# Keep track if this array ever appears on the lhs
# (if so, should be declared reg)
try: node._object.is_lhs |= self._is_lhs
except AttributeError: node._object.is_lhs = self._is_lhs
# Add arrays to our tracking datastructures
self.arrays .add ( node._object )
self.arrayelms.update( node._object )
# visit value to find nested subscripts
self.visit( node.value )
if (isinstance( node._object, list ) and
isinstance( node._object[0], list )):
self.arrays.remove( node._object[0] )
# visit slice to find params
# _is_lhs is false because vars in index are only read, not written!
stash_is_lhs, self._is_lhs = self._is_lhs, False
self.visit( node.slice )
self._is_lhs = stash_is_lhs
def visit_Print( self, node ):
return node
#------------------------------------------------------------------------
# PyObj
#------------------------------------------------------------------------
class PyObj( object ):
def __init__( self, name, inst ):
self.name = name
self.inst = inst
def update( self, name, inst ):
self.name += name
self.inst = inst
def getattr( self, name ):
return getattr( self.inst, name )
def __repr__( self ):
return "PyObj( name={} inst={} )".format( self.name, type(self.inst) )
|
Abhinav117/pymtl
|
pymtl/tools/translation/visitors.py
|
Python
|
bsd-3-clause
| 29,868
| 0.028994
|
"""
Test the metropolis hastings algorithm.
"""
import numpy as np
import chronometer as gc
import matplotlib.pyplot as plt
import corner
import emcee
def model(par, x):
return par[0] + par[1]*x
def lnlike(par, x, y, yerr, par_inds):
y_mod = model(par, x)
return sum(-.5*((y_mod - y)/yerr)**2)
def test_metropolis_hastings():
# Straight line model
x = np.arange(0, 10, .1)
err = 2.
yerr = np.ones_like(x) * err
y = .7 + 2.5*x + np.random.randn(len(x))*err
# Plot the data.
plt.clf()
plt.errorbar(x, y, yerr=yerr, fmt="k.")
plt.savefig("data")
print("Running Metropolis Hastings")
N = 1000000 # N samples
pars = np.array([.5, 2.5]) # initialisation
t = np.array([.01, .01])
par_inds = np.arange(len(pars))
args = [x, y, yerr, par_inds]
samples, par, probs = gc.MH(pars, lnlike, N, t, *args)
results = [np.percentile(samples[:, i], 50) for i in range(2)]
upper = [np.percentile(samples[:, i], 64) for i in range(2)]
lower = [np.percentile(samples[:, i], 15) for i in range(2)]
print(lower, "lower")
print(results, "results")
print(upper, "upper")
assert lower < results
assert results < upper
plt.clf()
plt.errorbar(x, y, yerr=yerr, fmt="k.")
plt.plot(x, results[0] + results[1]*x)
plt.savefig("test")
fig = corner.corner(samples, truths=[.7, 2.5], labels=["m", "c"])
fig.savefig("corner_MH_test")
plt.clf()
plt.plot(probs)
plt.savefig("prob_test")
if __name__ == "__main__":
test_metropolis_hastings()
|
RuthAngus/chronometer
|
chronometer/test_MH.py
|
Python
|
mit
| 1,566
| 0.000639
|
# -*- coding: utf-8 -*-
import pytest
from boussole.exceptions import FinderException
def test_001(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop"
def test_002(finder):
results = finder.get_relative_from_paths("/etc/plop.plip", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop.plip"
def test_003(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/etc",
])
assert results == "plop"
def test_004(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar",
"/etc/ping",
])
assert results == "plop"
def test_005(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/etc/ping",
])
assert results == "plop"
def test_006(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/home/bar",
])
assert results == "pika/plop"
def test_007(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/etc",
"/home/foo/pika",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_008(finder):
results = finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bam",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_009(finder):
"""
Unable to find relative path raise an exception
"""
with pytest.raises(FinderException):
finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
|
sveetch/boussole
|
tests/002_finder/005_relativefrompaths.py
|
Python
|
mit
| 2,097
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from collections import OrderedDict
from difflib import get_close_matches
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"RealmBertModel",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"SegformerDecodeHead", # Building part of bigger (tested) model.
"PLBartEncoder", # Building part of bigger (tested) model.
"PLBartDecoder", # Building part of bigger (tested) model.
"PLBartDecoderWrapper", # Building part of bigger (tested) model.
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"RealmBertModel", # Building part of bigger (tested) model.
"RealmReader", # Not regular model.
"RealmScorer", # Not regular model.
"RealmForOpenQA", # Not regular model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"Speech2Text2DecoderWrapper", # Building part of bigger (tested) model.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"TrOCRDecoderWrapper", # Building part of bigger (tested) model.
"SeparableConv1D", # Building part of bigger (tested) model.
"FlaxBartForCausalLM", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"camembert/test_modeling_camembert.py",
"mt5/test_modeling_flax_mt5.py",
"mbart/test_modeling_mbart.py",
"mt5/test_modeling_mt5.py",
"pegasus/test_modeling_pegasus.py",
"camembert/test_modeling_tf_camembert.py",
"mt5/test_modeling_tf_mt5.py",
"xlm_roberta/test_modeling_tf_xlm_roberta.py",
"xlm_roberta/test_modeling_flax_xlm_roberta.py",
"xlm_prophetnet/test_modeling_xlm_prophetnet.py",
"xlm_roberta/test_modeling_xlm_roberta.py",
"vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py",
"vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"ViltForQuestionAnswering",
"ViltForImagesAndTextClassification",
"ViltForImageAndTextRetrieval",
"ViltForMaskedLM",
"XGLMEncoder",
"XGLMDecoder",
"XGLMDecoderWrapper",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"SegformerDecodeHead",
"FlaxBeitForMaskedImageModeling",
"PLBartEncoder",
"PLBartDecoder",
"PLBartDecoderWrapper",
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForMaskedLM",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"RealmEmbedder",
"RealmForOpenQA",
"RealmScorer",
"RealmReader",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"SEWForCTC",
"SEWDForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
"MaskFormerForInstanceSegmentation",
]
# Update this list for models that have multiple model types for the same
# model doc
MODEL_TYPE_TO_DOC_MAPPING = OrderedDict(
[
("data2vec-text", "data2vec"),
("data2vec-audio", "data2vec"),
]
)
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
def check_model_list():
"""Check the model list inside the transformers library."""
# Get the models from the directory structure of `src/transformers/models/`
models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models")
_models = []
for model in os.listdir(models_dir):
model_dir = os.path.join(models_dir, model)
if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir):
_models.append(model)
# Get the models from the directory structure of `src/transformers/models/`
models = [model for model in dir(transformers.models) if not model.startswith("__")]
missing_models = sorted(list(set(_models).difference(models)))
if missing_models:
raise Exception(
f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}."
)
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_encoder_decoder",
"modeling_flax_utils",
"modeling_speech_encoder_decoder",
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_flax_encoder_decoder",
"test_modeling_flax_speech_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
"test_modeling_tf_encoder_decoder",
]
test_files = []
for file_or_dir in os.listdir(PATH_TO_TESTS):
path = os.path.join(PATH_TO_TESTS, file_or_dir)
if os.path.isdir(path):
filenames = [os.path.join(file_or_dir, file) for file in os.listdir(path)]
else:
filenames = [file_or_dir]
for filename in filenames:
if (
os.path.isfile(os.path.join(PATH_TO_TESTS, filename))
and "test_modeling" in filename
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file]
if len(test_file) == 0:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
elif len(test_file) > 1:
failures.append(f"{module.__name__} has several test files: {test_file}.")
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return [cls for cls in result]
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
for doc_file in Path(PATH_TO_DOC).glob("**/*.mdx"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall("\[\[autodoc\]\]\s+(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
"TFTrainer",
"TFTrainingArguments",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"DummyObject", # Just picked by mistake sometimes.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"cached_path", # Internal used for downloading models.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
check_docstrings_are_in_md()
check_model_type_doc_match()
def check_model_type_doc_match():
"""Check all doc pages have a corresponding model type."""
model_doc_folder = Path(PATH_TO_DOC) / "model_doc"
model_docs = [m.stem for m in model_doc_folder.glob("*.mdx")]
model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys())
model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types]
errors = []
for m in model_docs:
if m not in model_types and m != "auto":
close_matches = get_close_matches(m, model_types)
error_message = f"{m} is not a proper model identifier."
if len(close_matches) > 0:
close_matches = "/".join(close_matches)
error_message += f" Did you mean {close_matches}?"
errors.append(error_message)
if len(errors) > 0:
raise ValueError(
"Some model doc pages do not match any existing model type:\n"
+ "\n".join(errors)
+ "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in "
"models/auto/configuration_auto.py."
)
# Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`.
_re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`")
# Re pattern to catch things between double backquotes.
_re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)")
# Re pattern to catch example introduction.
_re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE)
def is_rst_docstring(docstring):
"""
Returns `True` if `docstring` is written in rst.
"""
if _re_rst_special_words.search(docstring) is not None:
return True
if _re_double_backquotes.search(docstring) is not None:
return True
if _re_rst_example.search(docstring) is not None:
return True
return False
def check_docstrings_are_in_md():
"""Check all docstrings are in md"""
files_with_rst = []
for file in Path(PATH_TO_TRANSFORMERS).glob("**/*.py"):
with open(file, "r") as f:
code = f.read()
docstrings = code.split('"""')
for idx, docstring in enumerate(docstrings):
if idx % 2 == 0 or not is_rst_docstring(docstring):
continue
files_with_rst.append(file)
break
if len(files_with_rst) > 0:
raise ValueError(
"The following files have docstrings written in rst:\n"
+ "\n".join([f"- {f}" for f in files_with_rst])
+ "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n"
"(`pip install git+https://github.com/huggingface/doc-builder`)"
)
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are included.")
check_model_list()
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
if __name__ == "__main__":
check_repo_quality()
|
huggingface/transformers
|
utils/check_repo.py
|
Python
|
apache-2.0
| 30,085
| 0.002393
|
from django.http import HttpResponse
from django.shortcuts import render
def video_calling(request):
return render(request,'video_calling.html')
|
ishandongol/voli-fix-vetnae
|
videocalling/views.py
|
Python
|
mit
| 150
| 0.013333
|
# -*- coding: utf-8 -*-
from django.conf import settings as settings
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from mitxmako.shortcuts import render_to_response
from courseware.courses import get_opt_course_with_access
from courseware.access import has_access
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from .models import Revision, Article, Namespace, CreateArticleForm, RevisionFormWithTitle, RevisionForm
import wiki_settings
def wiki_reverse(wiki_page, article=None, course=None, namespace=None, args=[], kwargs={}):
kwargs = dict(kwargs) # TODO: Figure out why if I don't do this kwargs sometimes contains {'article_path'}
if not 'course_id' in kwargs and course:
kwargs['course_id'] = course.id
if not 'article_path' in kwargs and article:
kwargs['article_path'] = article.get_path()
if not 'namespace' in kwargs and namespace:
kwargs['namespace'] = namespace
return reverse(wiki_page, kwargs=kwargs, args=args)
def update_template_dictionary(dictionary, request=None, course=None, article=None, revision=None):
if article:
dictionary['wiki_article'] = article
dictionary['wiki_title'] = article.title # TODO: What is the title when viewing the article in a course?
if not course and 'namespace' not in dictionary:
dictionary['namespace'] = article.namespace.name
if course:
dictionary['course'] = course
if 'namespace' not in dictionary:
dictionary['namespace'] = "edX"
else:
dictionary['course'] = None
if revision:
dictionary['wiki_article_revision'] = revision
dictionary['wiki_current_revision_deleted'] = not (revision.deleted == 0)
if request:
dictionary.update(csrf(request))
if request and course:
dictionary['staff_access'] = has_access(request.user, course, 'staff')
else:
dictionary['staff_access'] = False
def view(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, article.current_revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def view_revision(request, revision_number, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
try:
revision = Revision.objects.get(counter=int(revision_number), article=article)
except:
d = {'wiki_err_norevision': revision_number}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_error.html', d)
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True, revision=revision)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def root_redirect(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
#TODO: Add a default namespace to settings.
namespace = "edX"
try:
root = Article.get_root(namespace)
return HttpResponseRedirect(reverse('wiki_view', kwargs={'course_id': course_id, 'article_path': root.get_path()}))
except:
# If the root is not found, we probably are loading this class for the first time
# We should make sure the namespace exists so the root article can be created.
Namespace.ensure_namespace(namespace)
err = not_found(request, namespace + '/', course)
return err
def create(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
article_path_components = article_path.split('/')
# Ensure the namespace exists
if not len(article_path_components) >= 1 or len(article_path_components[0]) == 0:
d = {'wiki_err_no_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
namespace = None
try:
namespace = Namespace.objects.get(name__exact=article_path_components[0])
except Namespace.DoesNotExist, ValueError:
d = {'wiki_err_bad_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
# See if the article already exists
article_slug = article_path_components[1] if len(article_path_components) >= 2 else ''
#TODO: Make sure the slug only contains legal characters (which is already done a bit by the url regex)
try:
existing_article = Article.objects.get(namespace=namespace, slug__exact=article_slug)
#It already exists, so we just redirect to view the article
return HttpResponseRedirect(wiki_reverse("wiki_view", existing_article, course))
except Article.DoesNotExist:
#This is good. The article doesn't exist
pass
#TODO: Once we have permissions for namespaces, we should check for create permissions
#check_permissions(request, #namespace#, check_locked=False, check_write=True, check_deleted=True)
if request.method == 'POST':
f = CreateArticleForm(request.POST)
if f.is_valid():
article = Article()
article.slug = article_slug
if not request.user.is_anonymous():
article.created_by = request.user
article.title = f.cleaned_data.get('title')
article.namespace = namespace
a = article.save()
new_revision = f.save(commit=False)
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.article = article
new_revision.save()
return HttpResponseRedirect(wiki_reverse("wiki_view", article, course))
else:
f = CreateArticleForm(initial={'title': request.GET.get('wiki_article_name', article_slug),
'contents': _('Headline\n===\n\n')})
d = {'wiki_form': f, 'create_article': True, 'namespace': namespace.name}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_edit.html', d)
def edit(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
# Check write permissions
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True, check_deleted=False)
if perm_err:
return perm_err
if wiki_settings.WIKI_ALLOW_TITLE_EDIT:
EditForm = RevisionFormWithTitle
else:
EditForm = RevisionForm
if request.method == 'POST':
f = EditForm(request.POST)
if f.is_valid():
new_revision = f.save(commit=False)
new_revision.article = article
if request.POST.__contains__('delete'):
if (article.current_revision.deleted == 1): # This article has already been deleted. Redirect
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
new_revision.contents = ""
new_revision.deleted = 1
elif not new_revision.get_diff():
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.save()
if wiki_settings.WIKI_ALLOW_TITLE_EDIT:
new_revision.article.title = f.cleaned_data['title']
new_revision.article.save()
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
else:
startContents = article.current_revision.contents if (article.current_revision.deleted == 0) else 'Headline\n===\n\n'
f = EditForm({'contents': startContents, 'title': article.title})
d = {'wiki_form': f}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_edit.html', d)
def history(request, article_path, page=1, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=False)
if perm_err:
return perm_err
page_size = 10
if page is None:
page = 1
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.filter(article__exact=article).order_by('-counter').select_related('previous_revision__counter', 'revision_user', 'wiki_article')
if request.method == 'POST':
if request.POST.__contains__('revision'): # They selected a version, but they can be either deleting or changing the version
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
redirectURL = wiki_reverse('wiki_view', article, course)
try:
r = int(request.POST['revision'])
revision = Revision.objects.get(id=r)
if request.POST.__contains__('change'):
article.current_revision = revision
article.save()
elif request.POST.__contains__('view'):
redirectURL = wiki_reverse('wiki_view_revision', course=course,
kwargs={'revision_number': revision.counter, 'article_path': article.get_path()})
#The rese of these are admin functions
elif request.POST.__contains__('delete') and request.user.is_superuser:
if (revision.deleted == 0):
revision.adminSetDeleted(2)
elif request.POST.__contains__('restore') and request.user.is_superuser:
if (revision.deleted == 2):
revision.adminSetDeleted(0)
elif request.POST.__contains__('delete_all') and request.user.is_superuser:
Revision.objects.filter(article__exact=article, deleted=0).update(deleted=2)
elif request.POST.__contains__('lock_article'):
article.locked = not article.locked
article.save()
except Exception as e:
print str(e)
pass
finally:
return HttpResponseRedirect(redirectURL)
#
#
# <input type="submit" name="delete" value="Delete revision"/>
# <input type="submit" name="restore" value="Restore revision"/>
# <input type="submit" name="delete_all" value="Delete all revisions">
# %else:
# <input type="submit" name="delete_article" value="Delete all revisions">
#
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_history.html', d)
def revision_feed(request, page=1, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
page_size = 10
if page is None:
page = 1
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.order_by('-revision_date').select_related('revision_user', 'article', 'previous_revision')
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser,
'namespace': namespace}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_revision_feed.html', d)
def search_articles(request, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
# blampe: We should check for the presence of other popular django search
# apps and use those if possible. Only fall back on this as a last resort.
# Adding some context to results (eg where matches were) would also be nice.
# todo: maybe do some perm checking here
if request.method == 'GET':
querystring = request.GET.get('value', '').strip()
else:
querystring = ""
results = Article.objects.all()
if namespace:
results = results.filter(namespace__name__exact=namespace)
if request.user.is_superuser:
results = results.order_by('current_revision__deleted')
else:
results = results.filter(current_revision__deleted=0)
if querystring:
for queryword in querystring.split():
# Basic negation is as fancy as we get right now
if queryword[0] == '-' and len(queryword) > 1:
results._search = lambda x: results.exclude(x)
queryword = queryword[1:]
else:
results._search = lambda x: results.filter(x)
results = results._search(Q(current_revision__contents__icontains=queryword) | \
Q(title__icontains=queryword))
results = results.select_related('current_revision__deleted', 'namespace')
results = sorted(results, key=lambda article: (article.current_revision.deleted, article.get_path().lower()))
if len(results) == 1 and querystring:
return HttpResponseRedirect(wiki_reverse('wiki_view', article=results[0], course=course))
else:
d = {'wiki_search_results': results,
'wiki_search_query': querystring,
'namespace': namespace}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_searchresults.html', d)
def search_add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True)
if perm_err:
return perm_err
search_string = request.GET.get('query', None)
self_pk = request.GET.get('self', None)
if search_string:
results = []
related = Article.objects.filter(title__istartswith=search_string)
others = article.related.all()
if self_pk:
related = related.exclude(pk=self_pk)
if others:
related = related.exclude(related__in=others)
related = related.order_by('title')[:10]
for item in related:
results.append({'id': str(item.id),
'value': item.title,
'info': item.get_url()})
else:
results = []
json = simplejson.dumps({'results': results})
return HttpResponse(json, mimetype='application/json')
def add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
related_id = request.POST['id']
rel = Article.objects.get(id=related_id)
has_already = article.related.filter(id=related_id).count()
if has_already == 0 and not rel == article:
article.related.add(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def remove_related(request, course_id, namespace, slug, related_id):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
rel_id = int(related_id)
rel = Article.objects.get(id=rel_id)
article.related.remove(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def random_article(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
from random import randint
num_arts = Article.objects.count()
article = Article.objects.all()[randint(0, num_arts - 1)]
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
def not_found(request, article_path, course):
"""Generate a NOT FOUND message for some URL"""
d = {'wiki_err_notfound': True,
'article_path': article_path,
'namespace': "edX"}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
def get_article(request, article_path, course):
err = None
article = None
try:
article = Article.get_article(article_path)
except Article.DoesNotExist, ValueError:
err = not_found(request, article_path, course)
return (article, err)
def check_permissions(request, article, course, check_read=False, check_write=False, check_locked=False, check_deleted=False, revision=None):
read_err = check_read and not article.can_read(request.user)
write_err = check_write and not article.can_write(request.user)
locked_err = check_locked and article.locked
if revision is None:
revision = article.current_revision
deleted_err = check_deleted and not (revision.deleted == 0)
if (request.user.is_superuser):
deleted_err = False
locked_err = False
if read_err or write_err or locked_err or deleted_err:
d = {'wiki_article': article,
'wiki_err_noread': read_err,
'wiki_err_nowrite': write_err,
'wiki_err_locked': locked_err,
'wiki_err_deleted': deleted_err, }
update_template_dictionary(d, request, course)
# TODO: Make this a little less jarring by just displaying an error
# on the current page? (no such redirect happens for an anon upload yet)
# benjaoming: I think this is the nicest way of displaying an error, but
# these errors shouldn't occur, but rather be prevented on the other pages.
return render_to_response('simplewiki/simplewiki_error.html', d)
else:
return None
####################
# LOGIN PROTECTION #
####################
if wiki_settings.WIKI_REQUIRE_LOGIN_VIEW:
view = login_required(view)
history = login_required(history)
search_articles = login_required(search_articles)
root_redirect = login_required(root_redirect)
revision_feed = login_required(revision_feed)
random_article = login_required(random_article)
search_add_related = login_required(search_add_related)
not_found = login_required(not_found)
view_revision = login_required(view_revision)
if wiki_settings.WIKI_REQUIRE_LOGIN_EDIT:
create = login_required(create)
edit = login_required(edit)
add_related = login_required(add_related)
remove_related = login_required(remove_related)
if wiki_settings.WIKI_CONTEXT_PREPROCESSORS:
settings.TEMPLATE_CONTEXT_PROCESSORS += wiki_settings.WIKI_CONTEXT_PREPROCESSORS
|
elimence/edx-platform
|
lms/djangoapps/simplewiki/views.py
|
Python
|
agpl-3.0
| 21,289
| 0.003852
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import parser
def test_function_for_markdown_reference(unused_arg):
"""Docstring with reference to @{test_function}."""
pass
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
pass
def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
"""Docstring for second test function."""
pass
def test_function_with_fancy_docstring(arg):
"""Function with a fancy docstring.
Args:
arg: An argument.
Returns:
arg: the input, and
arg: the input, again.
"""
return arg, arg
class TestClass(object):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
class ChildClass(object):
"""Docstring for a child class."""
pass
@property
def a_property(self):
"""Docstring for a property."""
pass
CLASS_MEMBER = 'a class member'
class ParserTest(googletest.TestCase):
def test_documentation_path(self):
self.assertEqual('test.md', parser.documentation_path('test'))
self.assertEqual('test/module.md', parser.documentation_path('test.module'))
def test_documentation_path_empty(self):
self.assertEqual('index.md', parser.documentation_path(''))
def test_replace_references(self):
string = 'A @{reference}, another @{tf.reference}, and a @{third}.'
duplicate_of = {'third': 'fourth'}
result = parser.replace_references(string, '../..', duplicate_of)
self.assertEqual(
'A [`reference`](../../reference.md), another '
'[`tf.reference`](../../reference.md), '
'and a [`third`](../../fourth.md).',
result)
def test_generate_markdown_for_class(self):
index = {
'TestClass': TestClass,
'TestClass.a_method': TestClass.a_method,
'TestClass.a_property': TestClass.a_property,
'TestClass.ChildClass': TestClass.ChildClass,
'TestClass.CLASS_MEMBER': TestClass.CLASS_MEMBER
}
tree = {
'TestClass': ['a_method', 'a_property', 'ChildClass', 'CLASS_MEMBER']
}
docs = parser.generate_markdown(full_name='TestClass', py_object=TestClass,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(TestClass) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_method) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_property) in docs)
# Make sure that the signature is extracted properly and omits self.
self.assertTrue('a_method(arg=\'default\')' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class ChildClass`](./TestClass/ChildClass.md)' in docs)
# Make sure CLASS_MEMBER is mentioned.
self.assertTrue('CLASS_MEMBER' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_module(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'TestModule.test_function': test_function,
'TestModule.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass': TestClass,
}
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='TestModule', py_object=module,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(module) in docs)
# Make sure that links to the members are there (not asserting on exact link
# text for functions).
self.assertTrue('./TestModule/test_function.md' in docs)
self.assertTrue('./TestModule/test_function_with_args_kwargs.md' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class TestClass`](./TestModule/TestClass.md)' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_function(self):
index = {
'test_function': test_function
}
tree = {
'': ['test_function']
}
docs = parser.generate_markdown(full_name='test_function',
py_object=test_function,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function(unused_arg, unused_kwarg=\'default\')' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
tree = {
'': ['test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function_with_args_kwargs) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function_with_args_kwargs(unused_arg,'
' *unused_args, **unused_kwargs)' in docs)
def test_references_replaced_in_generated_markdown(self):
index = {
'test_function_for_markdown_reference':
test_function_for_markdown_reference
}
tree = {
'': ['test_function_for_markdown_reference']
}
docs = parser.generate_markdown(
full_name='test_function_for_markdown_reference',
py_object=test_function_for_markdown_reference,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up and is properly processed.
expected_docs = parser.replace_references(
inspect.getdoc(test_function_for_markdown_reference),
relative_path_to_root='.', duplicate_of={})
self.assertTrue(expected_docs in docs)
def test_docstring_special_section(self):
index = {
'test_function': test_function_with_fancy_docstring
}
tree = {
'': 'test_function'
}
docs = parser.generate_markdown(
full_name='test_function',
py_object=test_function_with_fancy_docstring,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
expected = '\n'.join([
'Function with a fancy docstring.',
'',
'#### Args:',
'',
'* <b>`arg`</b>: An argument.',
'',
'',
'#### Returns:',
'',
'* <b>`arg`</b>: the input, and',
'* <b>`arg`</b>: the input, again.',
''])
self.assertTrue(expected in docs)
def test_generate_index(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'test_function': test_function,
'TestModule.test_function': test_function,
'TestModule.TestClass': TestClass,
'TestModule.TestClass.a_method': TestClass.a_method,
'TestModule.TestClass.a_property': TestClass.a_property,
'TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
duplicate_of = {
'TestModule.test_function': 'test_function'
}
docs = parser.generate_global_index('TestLibrary', 'test',
index=index,
duplicate_of=duplicate_of)
# Make sure duplicates and non-top-level symbols are in the index, but
# methods and properties are not.
self.assertTrue('a_method' not in docs)
self.assertTrue('a_property' not in docs)
self.assertTrue('TestModule.TestClass' in docs)
self.assertTrue('TestModule.TestClass.ChildClass' in docs)
self.assertTrue('TestModule.test_function' in docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
self.assertTrue('`test_function' in docs)
def test_argspec_for_functoos_partial(self):
# pylint: disable=unused-argument
def test_function_for_partial1(arg1, arg2, kwarg1=1, kwarg2=2):
pass
def test_function_for_partial2(arg1, arg2, *my_args, **my_kwargs):
pass
# pylint: enable=unused-argument
# pylint: disable=protected-access
# Make sure everything works for regular functions.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
expected = inspect.ArgSpec(['arg2', 'kwarg1', 'kwarg2'], None, None, (1, 2))
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['kwarg2',], None, None, (2,))
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg2'], None, None, (2,))
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1'], None, None, (1,))
partial = functools.partial(test_function_for_partial1, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['arg1'], None, None, ())
partial = functools.partial(test_function_for_partial1,
arg2=0, kwarg1=0, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure *args, *kwargs is accounted for.
expected = inspect.ArgSpec([], 'my_args', 'my_kwargs', ())
partial = functools.partial(test_function_for_partial2, 0, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# pylint: enable=protected-access
if __name__ == '__main__':
googletest.main()
|
odejesush/tensorflow
|
tensorflow/tools/docs/parser_test.py
|
Python
|
apache-2.0
| 12,002
| 0.003
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.