repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Bismarrck/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py
|
25
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.cached_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl_divergence(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl_divergence(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl_divergence(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl_divergence(
baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl_divergence(
baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl_divergence(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl_divergence(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(2, "analytical_covariance:\n{}".format(
analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
logging.vlog(2, "kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
logging.vlog(2, "kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
logging.vlog(
2, "kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
logging.vlog(
2, "kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
logging.vlog(2, "kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
logging.vlog(2, "kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.cached_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
joyent/zookeeper
|
refs/heads/master
|
src/contrib/zkpython/src/test/callback_test.py
|
159
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, zktestbase, unittest, threading, gc
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class CallbackTest(zktestbase.TestBase):
"""
Test whether callbacks (watchers/completions) are correctly invoked
"""
# to do: startup and teardown via scripts?
def setUp(self):
zktestbase.TestBase.setUp(self)
self.cv = threading.Condition()
def create_callback(self, callback):
"""
Returns a callable which signals cv and then calls callback
"""
def wrapper(*args, **kwargs):
self.cv.acquire()
callback(*args, **kwargs)
self.cv.notify()
self.cv.release()
return wrapper
def test_none_callback(self):
"""
Test that no errors are raised when None is passed as a callback.
"""
self.ensureCreated("/zk-python-none-callback-test","test")
# To do this we need to issue two operations, waiting on the second
# to ensure that the first completes
zookeeper.get(self.handle, "/zk-python-none-callback-test", None)
(d,s) = zookeeper.get(self.handle, "/zk-python-none-callback-test")
self.assertEqual(d, "test")
def callback_harness(self, trigger, test):
self.callback_flag = False
self.cv.acquire()
trigger()
self.cv.wait(15)
test()
def test_dispatch_types(self):
"""
Test all the various dispatch mechanisms internal to the module.
"""
def dispatch_callback(*args, **kwargs):
self.callback_flag = True
self.ensureCreated("/zk-python-dispatch-test")
self.callback_harness( lambda: zookeeper.adelete(self.handle,
"/zk-python-dispatch-test",
-1,
self.create_callback(dispatch_callback)),
lambda: self.assertEqual(True, self.callback_flag, "Void dispatch not fired"))
self.ensureCreated("/zk-python-dispatch-test")
self.callback_harness( lambda: zookeeper.aexists(self.handle,
"/zk-python-dispatch-test",
None,
self.create_callback(dispatch_callback)),
lambda: self.assertEqual(True, self.callback_flag, "Stat dispatch not fired"))
self.callback_harness( lambda: zookeeper.aget(self.handle,
"/zk-python-dispatch-test",
None,
self.create_callback(dispatch_callback)),
lambda: self.assertEqual(True, self.callback_flag, "Data dispatch not fired"))
self.callback_harness( lambda: zookeeper.aget_children(self.handle,
"/",
None,
self.create_callback( dispatch_callback )),
lambda: self.assertEqual(True, self.callback_flag, "Strings dispatch not fired"))
self.callback_harness( lambda: zookeeper.async(self.handle,
"/",
self.create_callback( dispatch_callback )),
lambda: self.assertEqual(True, self.callback_flag, "String dispatch not fired"))
self.callback_harness( lambda: zookeeper.aget_acl(self.handle,
"/",
self.create_callback( dispatch_callback )),
lambda: self.assertEqual(True, self.callback_flag, "ACL dispatch not fired"))
def test_multiple_watchers(self):
"""
Test whether multiple watchers are correctly called
"""
cv1, cv2 = threading.Condition(), threading.Condition()
def watcher1(*args, **kwargs):
cv1.acquire()
self.watcher1 = True
cv1.notify()
cv1.release()
def watcher2(*args, **kwargs):
cv2.acquire()
self.watcher2 = True
cv2.notify()
cv2.release()
nodename = "/zk-python-multiple-watcher-test"
self.ensureCreated(nodename, "test")
cv1.acquire()
cv2.acquire()
zookeeper.get(self.handle, nodename, watcher1)
zookeeper.get(self.handle, nodename, watcher2)
zookeeper.set(self.handle, nodename, "test")
cv1.wait(15)
cv2.wait(15)
self.assertTrue(self.watcher1 and self.watcher2, "One or more watchers failed to fire")
def test_lose_scope(self):
"""
The idea is to test that the reference counting doesn't
fail when we retain no references outside of the module
"""
self.ensureDeleted("/zk-python-lose-scope-test")
self.ensureCreated("/zk-python-lose-scope-test")
def set_watcher():
def fn(): self.callback_flag = True
self.callback_flag = False
zookeeper.exists(self.handle, "/zk-python-lose-scope-test",
self.create_callback( lambda handle, type, state, path: fn() )
)
set_watcher()
gc.collect()
self.cv.acquire()
zookeeper.set(self.handle, "/zk-python-lose-scope-test", "test")
self.cv.wait(15)
self.assertEqual(self.callback_flag, True)
if __name__ == '__main__':
unittest.main()
|
kaedroho/django
|
refs/heads/master
|
tests/migrations/test_migrations_plan/0001_initial.py
|
73
|
from django.db import migrations, models
def grow_tail(x, y):
"""Grow salamander tail."""
pass
def shrink_tail(x, y):
"""Shrink salamander tail."""
pass
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
'Salamander',
[
('id', models.AutoField(primary_key=True)),
('tail', models.IntegerField(default=0)),
('silly_field', models.BooleanField(default=False)),
],
),
migrations.RunPython(grow_tail, shrink_tail),
]
|
olymk2/maidstone-hackspace
|
refs/heads/master
|
website/pages/donate.py
|
2
|
from flask import Blueprint
from flask import request
from flask import redirect, abort
from scaffold import web
from scaffold.core.validate import validate
from pages import header, footer
from data import donate, site_user, badges, members
from libs.payments import payment
from config.settings import *
donate_pages = Blueprint('donate_pages', __name__, template_folder='templates')
@donate_pages.route("/donate", methods=['GET'])
@donate_pages.route("/donate/", methods=['GET'])
def index():
web.template.create('Maidstone Hackspace')
header('Maidstone Hackspace Donations')
web.page.create('Make a donation')
web.paragraph.create(
"""If you would like to donate to the space please type an amount and use the reference code for what ever your donating for, for example use #lair to donate to getting a space.
We may run pledges in the future for equipment in which case use the reference for the equipment your pledging towards.""")
web.page.section(web.paragraph.render())
#~ for item in donate.get_pledges({'environment':int(gocardless_environment=='production')}):
#~ web.paragraph.create(
#~ """Currently raised £%.2f towards %s target is £%.2f.""" % (
#~ item.get('total', 0) if item.get('total', 0) else 0.0,
#~ item.get('name'),
#~ item.get('target', 0)))
#~ web.page.section(web.paragraph.render())
web.form.create('Donate to Maidstone Hackspace', '/donate/submit')
web.form.append(name='provider', label='GoCardless', placeholder='gocardless', value='gocardless', input_type='radio')
web.form.append(name='provider', label='PayPal', placeholder='', value='paypal', input_type='radio')
web.form.append(name='reference', label='Reference', placeholder='#lair', value='#lair', input_type='select')
web.form.append(name='amount', label='Donation Amount', placeholder='50.00', value='50.00')
web.page.append(web.form.render())
web.template.body.append(web.page.set_classes('page col s10 offset-s1').render())
return web.render()
@donate_pages.route("/donate/populate", methods=['GET'])
def populate_by_name():
web.template.create('Maidstone Hackspace')
header('Maidstone Hackspace Donations')
pledge = donate.get_pledge({'name': '#lair'}).get()
import gocardless
gocardless.environment = gocardless_environment
gocardless.set_details(**gocardless_credentials)
merchant = gocardless.client.merchant()
web.template.body.append('Adding Badges')
badges.create_badge().execute({'name': 'member'})
badges.create_badge().execute({'name': 'backer'})
badges.create_badge().execute({'name': 'teacher'})
badges.create_badge().execute({'name': 'chairman'})
badges.create_badge().execute({'name': 'treasurer'})
badges.create_badge().execute({'name': 'secretary'})
web.template.body.append('Populating users')
user_list = {}
#make sure we have all users in the system
#~ users_emails = []
for user in merchant.users():
user_list[user.id] = user.email
#~ users_emails.append(user.email)
site_user.create_basic_user().execute({
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name
})
#get the users ids and emails
#~ users = {}
#~ for member in members.get_members():
#~ for key, value in user_list.items():
#~ if value == member.get('email'):
#~ user_list[key] = member.get('user_id')
badge_lookup = {badge.get('name'):badge.get('id') for badge in badges.fetch_badges()}
web.template.body.append('Setting Donation Badges')
environment = int(gocardless_environment=='production')
for bill in merchant.bills():
web.template.body.append(str(bill))
matched_user = None
for user_id, user_email in user_list.items():
if user_email == user.email:
matched_user = user_id
donate.add_payment().execute({'user_id': matched_user,'pledge_id': pledge.get('id') , 'reference': bill.id, 'amount': bill.amount_minus_fees, 'environment': environment})
if matched_user:
badges.assign_badge().execute({'badge_id': badge_lookup.get('backer'), 'user_id': matched_user})
return web.render()
@donate_pages.route("/donate/submit", methods=['POST'])
def submit_donation():
provider = payment(
provider='paypal',
style='payment')
# convert donation amount to 2 decimal places, paypal seems to require this else it errors
donation_amount = '{0:.2f}'.format(float(request.form.get('amount')))
url = provider.make_donation(
amount=donation_amount,
reference=request.form.get('reference', ''),
redirect_success='%s/donate/success' % app_domain,
redirect_failure='%s/donate/failure' % app_domain
)
return redirect(url)
@donate_pages.route("/donate/success", methods=['GET'])
def donation_successfull():
provider = payment(
provider='paypal',
style='payment')
bill = provider.confirm(request.args)
if bill:
pledge = donate.get_pledge({'name': bill.get('name')}).get()
environment = int(provider.environment=='production')
donate.add_payment().execute({
'provider_id': provider.provider_id,
'pledge_id': pledge.get('id',''),
'reference': bill.get('reference'),
'amount': bill.get('amount'),
'environment': environment})
web.page.create('Thanks for your donation')
web.paragraph.create(
"""Thanks your payment has been recieved.""")
else:
web.page.create('Something went wrong')
web.paragraph.create(
"""We could not confirm the payment something may have gone terribly wrong.""")
web.template.create('Maidstone Hackspace')
header('Maidstone Hackspace Donations')
web.page.create('Thanks for your donation')
web.paragraph.create(
"""Thanks your payment has been recieved.""")
web.page.section(web.paragraph.render())
web.template.body.append(web.page.render())
return web.render()
@donate_pages.route("/donate/failure", methods=['GET'])
def donation_failed():
web.template.create('Maidstone Hackspace')
header('Maidstone Hackspace Donations')
web.page.create('Looks like something went wrong.')
web.paragraph.create(
"""Sorry looks like something went wrong while trying to take this payment.""")
web.page.section(web.paragraph.render())
web.template.body.append(web.page.render())
return web.render()
|
elias-winberg/PhantomJS
|
refs/heads/master
|
phantomjs/test_phantom.py
|
2
|
from .page import Page
from .phantom import Phantom
from .driver import Driver
import pytest
@pytest.fixture()
def phantom(request):
driver = Driver(engine='phantomjs', port=3000)
driver.start()
driver.wait_for_ready()
phantom = Phantom(driver=driver)
request.addfinalizer(driver.kill)
return phantom
def test_create_page(phantom):
value = phantom.create_page()
assert isinstance(value, Page)
def test_get_property(phantom):
value = phantom.get_property('cookiesEnabled')
assert value is True
def test_set_property(phantom):
value = phantom.set_property('cookiesEnabled', False)
assert value is False
def test_invoke_function(phantom):
value = phantom.deleteCookie('Nonexistent-Cookie')
assert value is False
|
Southpaw-TACTIC/TACTIC
|
refs/heads/4.7
|
src/tactic/ui/table/expression_element_wdg.py
|
1
|
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ExpressionElementWdg', "ExpressionValueElementWdg"]
import types, re
from pyasm.common import TacticException, Container, FormatValue, jsonloads, jsondumps, SPTDate, Common
from pyasm.search import Search, SearchKey, SearchType
from pyasm.security import Sudo
from pyasm.web import DivWdg, Widget
from pyasm.widget import IconWdg, TextWdg, TextAreaWdg, SelectWdg, CheckboxWdg
from pyasm.biz import ExpressionParser
from tactic.ui.common import BaseTableElementWdg, SimpleTableElementWdg
from .table_element_wdg import TypeTableElementWdg
import datetime
import six
basestring = six.string_types
class ExpressionElementWdg(TypeTableElementWdg):
'''General purpose element widget for expressions'''
ARGS_KEYS = {
'expression': {
'description': 'Expression to evaluate the widget',
'type': 'TextAreaWdg',
'order': 1,
'category': 'Options'
},
'display_expression': {
'description': 'Expression for display purposes',
'type': 'TextAreaWdg',
'order': 2,
'category': 'Options'
},
'display_format': {
'description': 'Predefined format for display',
'type': 'TextWdg',
'order': 3,
'category': 'Options'
},
'link_expression': {
'description': 'Expression for linking to another sobject',
'type': 'TextAreaWdg',
'order': 4,
'category': 'Options',
},
'link_view': {
'description': 'View to link result to another view',
'type': 'TextWdg',
'order': 5,
'category': 'Options',
},
'inline_styles': 'Styles to add to the DIV generated that contains the result of the expression',
'return': {
'descripton' : 'Determines what the expression return type should be',
'type': 'SelectWdg',
'values': 'single|list'
},
'bottom': {
'description': 'Expression to calculate the bottom row of the table',
'type': 'TextAreaWdg',
},
'group_bottom': {
'description': 'Expression to calculate the bottom of a group',
'type': 'TextAreaWdg',
},
'mode': {
'description': 'Display mode for this widget',
'type': 'SelectWdg',
'values': 'value|check|boolean',
'order': 3
},
'expression_mode': {
'description': 'If absolute mode is selected, it does not relate to the current SObject',
'type': 'SelectWdg',
'values': 'default|absolute',
'order': 6
},
'calc_mode': {
'description': '(ALPHA) fast|slow - fast uses new calculation mode. Only @SUM, @COUNT, @SOBJECT and @GET are current supported',
'type': 'SelectWdg',
'values': 'slow|fast',
'order': 7
},
'show_retired': {
'description': 'true|false - true shows all the retired entries during the expression evaluation',
'type': 'SelectWdg',
'values': 'true|false',
'category': 'Options',
'order': 8
},
'enable_eval_listener': {
'description': '''Currently javascript expression evaluation is not fully baked, so only use the client side evaluation listener when needed and NOT by default''',
'category': 'internal'
,
},
'use_cache': {
'description': 'Determines whether or not to use the cached value. Gets value from column with the same name as the element',
'type': 'SelectWdg',
'values': 'true|false',
'order': 0,
'category': 'Cache'
},
'order_by': {
'description': 'Turn on Order by',
'type': 'TextWdg',
'order': 8,
'category': 'Options'
},
'group_by': {
'description': 'Turn on Group by',
'type': 'SelectWdg',
'values': 'true|false',
'order': 9,
'category': 'Options'
},
'group_by_time': {
'description': 'Turn on Group by',
'type': 'SelectWdg',
'values': 'true|false',
'order': 10,
'category': 'Options'
},
'justify': {
'description': 'Result justification',
'type': 'SelectWdg',
'values': 'default|left|right|center',
'order': 11,
'category': 'Options'
},
'filter_name': {
'description': 'Name of filter to use',
'type': 'TextWdg',
'order': 12,
'category': 'Options'
},
'empty': {
'description': "vAlue to display if empty"
}
}
def init(self):
self.td = None
self.expression = None
self.alt_expression = None
self.alt_result = None
self.report_value = None
self.cache_results = None
def preprocess(self):
order_by = self.get_option("order_by")
# for backward compatibility when order_by used to be true/false
if not order_by or order_by =='true':
expression = self.get_option("expression")
if expression.startswith("@GET(") and expression.endswith(")") and expression.count("@") == 1:
template = expression.lstrip("@GET(")
template = template.rstrip(")")
# remove white spaces
template= template.strip()
# if it's a simple local sType expression e.g. @GET(.id), strip the .
if template.startswith("."):
template = template.lstrip('.')
self.set_option("order_by", template)
self.init_kwargs()
def get_data(self, sobject):
# use current sobject
if not self.expression:
self.init_kwargs()
try:
use_cache = self.get_option("use_cache") in ['true', True]
# TEST TESt TEST
#use_cache = True
use_cache = False
if use_cache:
result = sobject.get_value(self.get_name())
else:
result = self._get_result(sobject, self.expression)
except Exception as e:
result = ""
return result
def get_onload_js(self):
name = self.get_name()
value_class = "spt_%s_expr" % name
return '''
var value = sobject[element_name]
var value_el = cell.getElement(".%s");
value_el.setAttribute("search_key", sobject.__search_key__);
value_el.innerHTML = value;
''' % value_class
def get_required_columns(self):
'''method to get the require columns for this'''
return []
def get_header_option_wdg(self):
return
if self.kwargs.get("use_cache2") not in ['true', True]:
return
div = DivWdg()
div.add("Last Calculated: 5 days ago<br/><hr/>")
div.add("Recalculate")
div.add_class("hand")
#from tactic.ui.widget import ActionButtonWdg
#button = ActionButtonWdg(title="Recalculate")
#div.add(button)
div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var table = bvr.src_el.getParent(".spt_table");
//var search_keys = spt.dg_table.get_search_keys();
var search_keys = spt.dg_table.get_selected_search_keys(table);
if (search_keys.length == 0) {
spt.alert("No rows selected");
return;
}
var header = bvr.src_el.getParent(".spt_table_th");
var element_name = header.getAttribute("spt_element_name");
spt.app_busy.show("Recalculating ...");
var kwargs = {
element_name: element_name,
search_keys: search_keys,
}
spt.app_busy.show("Recalculating ...");
var server = TacticServerStub.get();
var class_name = 'tactic.ui.table.ExpressionRecalculateCmd';
server.execute_cmd(class_name, kwargs);
spt.app_busy.hide("Recalculating ...");
'''
} )
return div
def is_sortable(self):
use_cache = self.get_option("use_cache") in ['true', True]
if use_cache:
return True
order_by = self.get_option("order_by")
# false is the word to prevent the auto-adoption (preprocess) of the expression to order-by
if order_by and order_by !='false':
parts = order_by.split(".")
if "connect" in parts:
return False
return True
else:
return False
def is_groupable(self):
use_cache = self.get_option("use_cache") in ['true', True]
if use_cache:
return True
group_by = self.get_option("group_by")
if group_by:
return True
else:
return False
def is_time_groupable(self):
group_by = self.get_option("group_by_time")
if group_by:
return True
else:
return False
def get_vars(self):
# create variables
element_name = self.get_name()
self.vars = {
'ELEMENT_NAME': element_name
}
# get info from search critiera
# FIXME: this should be formalized
search_vars = Container.get("Message:search_vars")
if search_vars:
for name, value in search_vars.items():
self.vars[name] = value
return self.vars
def get_input_by_arg_key(self, key):
if key == 'expression':
input = TextAreaWdg("option_expression")
else:
input = TextWdg("value")
return input
get_input_by_arg_key = classmethod(get_input_by_arg_key)
def handle_td(self, td):
if self.alt_result:
td.add_attr("spt_input_value", self.alt_result)
elif self.alt_result:
td.add_attr("spt_input_value", self.value)
if self.report_value:
td.add_attr("spt_report_value", self.report_value)
super(ExpressionElementWdg,self).handle_td(td)
def is_editable(self):
return 'optional'
def _get_result(self, sobject, expression):
'''get the result of the expression'''
element_name = self.get_name()
mode = self.get_option("data_mode")
if mode == "report":
value = sobject.get_value(element_name)
return value
use_cache = self.kwargs.get("use_cache")
if use_cache == "true":
try:
return sobject.get_value(element_name)
except Exception as e:
print("Error: ", e.message)
if isinstance(sobject, list):
if sobject.is_insert():
return ''
self.vars = {
'ELEMENT_NAME': element_name,
'ELEMENT': element_name,
'SOBJECT_ID': sobject.get_id(),
'SOBJECT_CODE': sobject.get_code(),
}
return_type = self.kwargs.get("return")
if return_type == 'single':
ret_single = True
ret_list = False
elif return_type == 'list':
ret_single = False
ret_list = True
else:
ret_single = True
ret_list = False
# if this expression is an absolute expression, then don't bother
# with the sobject
expression_mode = self.get_option('expression_mode')
if expression_mode == 'absolute':
sobject = None
calc_mode = self.get_option("calc_mode")
if not calc_mode:
calc_mode = 'slow'
#calc_mode = 'fast'
# parse the expression
parser = ExpressionParser()
# expression element widget defintion should only be saved by admin, so it can run
# the expression in sudo mode
sudo = Sudo()
if calc_mode == 'fast':
if self.cache_results == None:
self.cache_results = parser.eval(expression, self.sobjects, vars=self.vars, dictionary=True, show_retired=self.show_retired)
if isinstance(self.cache_results, basestring):
if self.cache_results:
self.cache_results = eval(self.cache_results)
else:
self.cache_results = {}
search_key = sobject.get_search_key()
result = self.cache_results.get(search_key)
if ret_single:
if result and len(result):
result = result[0]
else:
result = ''
else:
result = parser.eval(expression, sobject, vars=self.vars, single=ret_single, list=ret_list, show_retired=self.show_retired)
# if the result has a get_display_value call, then use that.
try:
if not ret_list:
result = result.get_display_value()
except AttributeError as e:
pass
if ret_list and result:
# turn non basestring into string
encoded_result = []
for res in result:
if isinstance(res, datetime.datetime):
res = SPTDate.convert_to_local(res)
res = str(res)
elif not Common.IS_Pv3 and not isinstance(res, basestring):
res = unicode(res).encode('utf-8','ignore')
encoded_result.append(res)
#delimiter = ', '
#result = delimiter.join(encoded_result)
result = encoded_result
if result == None or result == []:
result = ''
if isinstance(result, datetime.datetime):
result = SPTDate.convert_to_local(result)
return result
def init_kwargs(self):
'''initialize kwargs'''
state = self.kwargs.get("state")
if state:
parent_key = state.get("parent_key")
if parent_key:
self.sobject = SearchKey.get_by_search_key(parent_key)
self.expression = self.get_option("expression")
if not self.expression:
self.expression = self.kwargs.get("expression")
self.alt_expression = self.get_option("alt_expression")
if not self.alt_expression:
self.alt_expression = self.kwargs.get("alt_expression")
self.mode = self.get_option("mode")
if not self.mode:
self.mode = self.kwargs.get("mode")
if not self.mode:
self.mode = 'value'
self.show_retired = self.get_option("show_retired")
if not self.show_retired:
self.show_retired = self.kwargs.get("show_retired")
# default to False
if self.show_retired == 'true':
self.show_retired = True
else:
self.show_retired = False
self.enable_eval_listener = False
if self.get_option("enable_eval_listener") in [ True, "true", "True", "TRUE" ]:
self.enable_eval_listener = True
def get_text_value(self):
'''for csv export'''
self.sobject = self.get_current_sobject()
# expressions won't work on virtual sobjects
if self.sobject.get_base_search_type() == "sthpw/virtual":
return self.sobject.get_value( self.get_name() )
if not self.expression and not self.alt_expression:
return super(ExpressionElementWdg, self).get_display()
if self.alt_expression:
result = self._get_result(self.sobject, self.alt_expression)
else:
result = self._get_result(self.sobject, self.expression)
if isinstance(result, list):
delimiter = ','
result = delimiter.join(result)
format_str = self.kwargs.get("display_format")
if format_str:
format_val = FormatValue()
format_value = format_val.get_format_value( result, format_str )
result = format_value
name = self.get_name()
self.sobject.set_value(name, result)
return result
def set_td(self, td):
self.td = td
def get_display(self):
#self.init_kwargs()
self.sobject = self.get_current_sobject()
if not self.sobject:
return ""
if self.sobject.is_insert():
pass
name = self.get_name()
if not self.expression:
div = DivWdg()
sobject_id = '000'
if self.sobject:
sobject_id = self.sobject.get_id()
div.add_class( "spt_%s_expr_id%s" % ( name, sobject_id ) )
div.add_class( "spt_%s_expr" % name )
raw_result = super(ExpressionElementWdg, self).get_display()
div.add( raw_result )
# Now check to see if there are inline CSS styles provided ...
inline_styles = self.kwargs.get('inline_styles')
if inline_styles:
style_list = inline_styles.split(";")
for style in style_list:
div.add_style( style )
return div
try:
use_cache = self.get_option("use_cache") in ['true', True]
if use_cache:
result = self.sobject.get_value(self.get_name())
else:
result = self._get_result(self.sobject, self.expression)
# calculte the alt expression if defined
# DEPRECATED: use format expression instead
if self.alt_expression:
self.alt_result = self._get_result(self.sobject, self.alt_expression)
else:
self.alt_result = result
except Exception as e:
print("Expression error: ", e)
print(" in column [%s] with [%s]" % (self.get_name(), self.expression))
#from pyasm.widget import ExceptionWdg
#widget = ExceptionWdg(e)
#return widget
widget = DivWdg()
widget.add("Expression error: %s" % e)
return widget
if isinstance(result, list):
delimiter = ', '
self.value = delimiter.join(result)
results = result
else:
self.value = result
results = [result]
if not results or (len(results) == 1 and results[0] == ''):
empty = self.get_option("empty")
if empty:
div = DivWdg()
div.add_style("white-space: nowrap")
div.add(empty)
div.add_style("opacity: 0.5")
return div
if self.sobject:
# only set if the value does not exist as a key. This widget should
# not be able to change existing data of an sobject
self.sobject.set_value(name, result, no_exception=True)
outer = DivWdg()
cbjs_action = self.kwargs.get("cbjs_action")
if cbjs_action:
outer.add_behavior( {
'type': 'click_up',
'cbjs_action': cbjs_action
})
for i, result in enumerate(results):
div = DivWdg()
outer.add(div)
if len(results) == 1:
div.add_style("display: inline-block")
if self.sobject:
div.add_class( "spt_%s_expr_id%s" % ( name, self.sobject.get_id() ) )
div.add_class( "spt_%s_expr" % name )
# by default, the value is added
if self.mode == 'value':
display_expr = self.kwargs.get("display_expression")
format_str = self.get_option('display_format')
if display_expr:
if not isinstance( result, basestring ):
display_result = str(result)
else:
display_result = result
return_type = self.kwargs.get("return")
if return_type == 'single':
_single = True
_list = False
elif return_type in ['list']:
_single = False
_list = True
else:
_single = True
_list = False
try:
display_result = Search.eval(display_expr, self.sobject, list=_list, single=_single, vars={'VALUE': display_result }, show_retired=self.show_retired)
except Exception as e:
print("WARNING in display expression [%s]: " % display_expr, e)
display_result = "ERROR: %s" % e
elif format_str:
# This import needs to be here because of a deep
# circular import
from tactic.ui.widget import FormatValueWdg
format_wdg = FormatValueWdg(format=format_str, value=result)
display_result = format_wdg
else:
display_result = result
return_type = self.kwargs.get("return")
if return_type in ['list']:
#div.add( "- " )
div.add_style("max-width: 400px")
div.add( display_result )
div.add_style("min-height: 15px")
outer.add_style("width: 100%")
self.report_value = display_result
# if a DG table td has been provided and if there is an alternate expression
# specified then use it for the 'spt_input_value' of the td ...
#if self.td and alt_result:
# self.td.set_attr("spt_input_value", str(alt_result))
justify = self.get_option("justify")
if justify and justify != 'default':
if justify != "left":
div.add_style("width: 100%")
if justify == "right":
div.add_style("margin-right: 10px")
div.add_style("text-align: %s" % justify)
elif isinstance(result, datetime.datetime):
div.add_style("text-align: left")
elif not isinstance(result, basestring):
div.add_style("text-align: right")
div.add_style("margin-right: 5px")
# Now check to see if there are inline CSS styles provided ...
inline_styles = self.kwargs.get('inline_styles')
if inline_styles:
style_list = inline_styles.split(";")
for style in style_list:
div.add_style( style )
# display a link if specified
link_expr = self.kwargs.get("link_expression")
if link_expr:
# using direct behavior because new_tab isn't working consistently
div.add_style("text-decoration", "underline")
div.add_attr("search_key", self.sobject.get_search_key())
div.add_attr("expression", link_expr)
div.add_class("hand")
search_type_sobj = self.sobject.get_search_type_obj()
sobj_title = search_type_sobj.get_title()
#div.add_attr("name", "%s: %s" % (sobj_title, name))
if display_result:
name = display_result
title = display_result
div.add_attr("name", name)
div.add_attr("title", title)
# click up blocks any other behavior
div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.table.open_link(bvr);
'''
} )
link_view = self.kwargs.get("link_view")
if link_view:
# using direct behavior because new_tab isn't working consistently
div.add_style("text-decoration", "underline")
div.add_attr("search_key", self.sobject.get_search_key())
div.add_attr("view", link_view)
div.add_class("hand")
search_type_sobj = self.sobject.get_search_type_obj()
sobj_title = search_type_sobj.get_title()
#div.add_attr("name", "%s: %s" % (sobj_title, name))
div.add_attr("name", display_result)
# click up blocks any other behavior
div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.table.open_link(bvr);
'''
} )
elif self.mode == 'boolean':
div.add_style("text-align: center")
if not result:
color = 'red'
elif result in [False, 'false']:
color = 'red'
elif result in [True, 'true']:
color = 'green'
else:
color = 'green'
if color == 'red':
div.add( IconWdg("None", IconWdg.DOT_RED) )
else:
div.add( IconWdg(str(result), IconWdg.DOT_GREEN) )
elif self.mode == 'check':
div.add_style("text-align: center")
try:
value = int(result)
except ValueError:
value = 0
if value > 0:
div.add( IconWdg(str(result), IconWdg.CHECK) )
else:
div.add( ' ' )
elif self.mode == 'icon':
if not result:
result = 0
vars = {
'VALUE': result
}
icon_expr = self.get_option("icon_expr")
icon = Search.eval(icon_expr, vars=vars)
icon = str(icon).upper()
div.add_style("text-align: center")
try:
icon_wdg = eval("IconWdg.%s" % icon)
except:
icon = "ERROR"
icon_wdg = eval("IconWdg.%s" % icon)
div.add( IconWdg(str(result), icon_wdg ) )
else:
raise TacticException("Unsupported expression display mode [%s] for column [%s]" % (self.mode, self.get_name() ))
if self.sobject and self.enable_eval_listener:
self.add_js_expression(div, self.sobject, self.expression)
# test link
#link = self.get_option("link")
#if link:
# div.add_behavior( {
# 'type': 'click_up',
# 'cbjs_action': 'document.location = "http://%s"' % link
# } )
# test behavior
behavior = self.get_option("behavior")
if behavior:
behavior = behavior.replace('\\\\', '\\')
behavior = jsonloads(behavior)
if behavior.get("type") in ['click_up', 'click']:
div.add_class('hand')
behavior['cbjs_action'] = '''
var search_key = bvr.src_el.getParent('.spt_table_tbody').getAttribute('spt_search_key');
bvr = {
script_code: '61MMS',
search_key: search_key
};
spt.CustomProject.custom_script(evt, bvr);
'''
div.add_behavior( behavior )
"""
# test dynamic updates on expressions
if self.get_name() == "customer":
outer.add_update( {
'search_key': self.sobject.get_search_key(),
'column': "customer_code",
'interval': 4,
'cbjs_action': "spt.panel.refresh_element(bvr.src_el)",
} )
self.set_as_panel(outer);
"""
return outer
def get_bottom_wdg(self):
self.init_kwargs()
sobjects = self.sobjects
# ignore the first 2 (edit and insert) if it's on the old TableLayoutWdg
if self.get_layout_wdg().get_layout_version() == '1':
sobjects = sobjects[2:]
if not sobjects:
return None
expression = self.get_option("bottom")
if not expression:
return None
# parse the expression
self.vars = self.get_vars()
parser = ExpressionParser()
result = parser.eval(expression, sobjects=sobjects, vars=self.vars)
format_str = self.kwargs.get("display_format")
if format_str:
from tactic.ui.widget import FormatValueWdg
format_wdg = FormatValueWdg(format=format_str, value=result)
result = format_wdg
else:
result = str(result)
div = DivWdg()
div.add(result)
div.add_style("text-align: right")
div.add_class( "spt_%s_expr_bottom" % (self.get_name()) )
# add a listener
for sobject in sobjects:
if sobject.is_insert():
continue
if self.enable_eval_listener:
self.add_js_expression(div, sobject, expression)
return div
def get_group_bottom_wdg(self, sobjects):
expression = self.get_option("group_bottom")
if not expression:
return None
# parse the expression
self.vars = self.get_vars()
parser = ExpressionParser()
raw_result = parser.eval(expression, sobjects=sobjects, vars=self.vars)
format_str = self.kwargs.get("display_format")
if format_str:
from tactic.ui.widget import FormatValueWdg
format_wdg = FormatValueWdg(format=format_str, value=raw_result)
result = format_wdg
else:
result = str(raw_result)
div = DivWdg()
div.add(result)
div.add_style("text-align: right")
#div.add_class( "spt_%s_expr_bottom" % (self.get_name()) )
# add a listener
#for sobject in sobjects:
# if sobject.is_insert():
# continue
#
# if self.enable_eval_listener:
# self.add_js_expression(div, sobject, expression)
return div, raw_result
def add_js_expression(self, widget, sobject, expression):
js_expression, columns = self.convert_to_js(sobject, expression)
element_name = self.get_name()
for column in columns:
# ignore itself
#if column == element_name:
# continue
search_key = SearchKey.get_by_sobject(sobject)
event_name = 'change|%s|%s' % (search_key, column)
behavior = {
'type': 'listen',
'event_name': event_name,
'expression': js_expression,
'cbjs_action': 'spt.expression.calculate_cbk(evt, bvr)'
}
widget.add_behavior(behavior)
def convert_to_js(self, sobject, expression):
# HACK!!: to very robust implementation
pattern = re.compile('@(\w+)\((.*?)\)')
matches = pattern.findall(expression)
if not matches:
return '', expression
js_expression = expression
columns = []
for match in matches:
method = match[0]
item = match[1]
if method == 'GET':
search_key = SearchKey.build_by_sobject(sobject)
parts = item.split(".")
column = parts[-1]
replace = '"%s","%s"' % (search_key, parts[-1])
columns.append(column)
else:
parts = item.split(".")
column = parts[-1]
replace = '"%s"' % column
columns.append(column)
js_expression = js_expression.replace(item, replace)
return js_expression, columns
__all__.append("ExpressionRecalculateCmd")
from pyasm.command import Command
class ExpressionRecalculateCmd(Command):
def execute(self):
search_keys = self.kwargs.get("search_keys")
if not search_keys:
return
element_name = self.kwargs.get("element_name")
# get all of the sobjects
sobjects = Search.get_by_search_keys(search_keys)
if not sobjects:
return
from pyasm.widget import WidgetConfigView
search_type = sobjects[0].get_base_search_type()
view = "definition"
config = WidgetConfigView.get_by_search_type(search_type, view)
# TEST
widget = config.get_display_widget(element_name)
for sobject in sobjects:
widget.set_sobject(sobject)
value = widget.get_text_value()
sobject.set_value(element_name, value)
sobject.commit()
#for result, sobject in zip(results, sobjects):
# sobject.set_value(element_name, result)
# sobject.commit()
class ExpressionValueElementWdg(SimpleTableElementWdg):
ARGS_KEYS = {
}
def is_editable(self):
return True
def get_text_value(self):
sobject = self.get_current_sobject()
if sobject.is_insert():
return
value = self.get_value()
# assume the value is an expression
try:
value = Search.eval(value)
except Exception as e:
value = 0
return value
def get_display(self):
sobject = self.get_current_sobject()
if sobject.is_insert():
return
value = self.get_value()
# assume the value is an expression
try:
#value = Search.eval(value)
env_sobjects = {
'sobject': sobject
}
parser = ExpressionParser()
value = parser.eval(value, env_sobjects=env_sobjects, use_cache=False)
except Exception as e:
print(e.message)
value = "Error [%s]" % value
return "%s" % value
|
zyc9012/sdrl
|
refs/heads/master
|
sdrl/Gui/Domains/InfCartPoleBalance.py
|
1
|
#-*- coding: utf-8 -*-
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import uic
from sdrl.Gui import BaseFrame
from sdrl.Gui.Utils import *
from rlpy.Domains import InfCartPoleBalance
class InfCartPoleBalanceFrame( BaseFrame ):
title = 'InfCartPoleBalance'
def __init__( self, parent=None ):
super( InfCartPoleBalanceFrame, self ).__init__(parent,
uifile=os.path.join(os.path.dirname(__file__), 'InfCartPoleBalanceFrame.ui'))
def initConfig(self):
domain=InfCartPoleBalance()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ 45.016
self.agentConfig['QLearning'] = {'lambda':0.6596, 'gamma':0.9, 'alpha':0.993, 'alpha_decay_mode':'boyan', 'boyan_N0':235}
self.agentConfig['Sarsa'] = {'lambda':0.6596, 'gamma':0.9, 'alpha':0.993, 'alpha_decay_mode':'boyan', 'boyan_N0':235}
self.policyConfig['eGreedy'] = {'epsilon':0.1}
self.representationConfig['Tabular'] = {'discretization':6}
self.representationConfig['IncrementalTabular'] = {'discretization':6}
self.representationConfig['RBF'] = {'num_rbfs':206, 'resolution_max':25, 'resolution_min':25}
self.representationConfig['iFDD'] = {'discretization':6, 'discover_threshold':0.037282}
self.representationConfig['KernelizediFDD']={'sparsify':1,'kernel':gaussian_kernel,
'kernel_args':[kernel_width],
'active_threshold':0.01,
'discover_threshold':0.01356,
'max_active_base_feat':10,
'max_base_feat_sim':0.5,
'kernel_resolution':45.016}
@pyqtSlot()
def on_btnConfigAgent_clicked(self):
self.showDialogByName( str(self.lstAgent.currentItem().text()), self.agentConfig )
@pyqtSlot()
def on_btnConfigRepresentation_clicked(self):
self.showDialogByName( str(self.lstRepresentation.currentItem().text()), self.representationConfig )
@pyqtSlot()
def on_btnConfigPolicy_clicked(self):
self.showDialogByName( str(self.lstPolicy.currentItem().text()), self.policyConfig )
def makeComponents(self):
domain = InfCartPoleBalance()
representation = RepresentationFactory.get(config=self.representationConfig,
name=str(self.lstRepresentation.currentItem().text()),
domain=domain)
policy = PolicyFactory.get(config=self.policyConfig,
name=str(self.lstPolicy.currentItem().text()),
representation=representation)
agent = AgentFactory.get(config=self.agentConfig,
name=str(self.lstAgent.currentItem().text()),
representation=representation,
policy=policy)
return domain, agent
|
loongson-community/EFI-MIPS
|
refs/heads/master
|
ToolKit/cmds/python/Lib/test/good/test_call.py
|
90
|
import unittest
from test import test_support
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.has_key)
def test_varargs1(self):
{}.has_key(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.has_key, 0, 1)
def test_varargs0_ext(self):
try:
{}.has_key(*())
except TypeError:
pass
def test_varargs1_ext(self):
{}.has_key(*(0,))
def test_varargs2_ext(self):
try:
{}.has_key(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_0_kw(self):
try:
{}.keys(x=2)
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
def test_main():
test_support.run_unittest(CFunctionCalls)
if __name__ == "__main__":
test_main()
|
mindbody/API-Examples
|
refs/heads/master
|
SDKs/Python/test/test_size.py
|
1
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.size import Size # noqa: E501
from swagger_client.rest import ApiException
class TestSize(unittest.TestCase):
"""Size unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSize(self):
"""Test Size"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.size.Size() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
Tranzystorek/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_special_methods.py
|
32
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface SpecialMethods {
getter long long (unsigned long index);
setter long long (unsigned long index, long long value);
creator long long (unsigned long index, long long value);
getter boolean (DOMString name);
setter boolean (DOMString name, boolean value);
creator boolean (DOMString name, boolean value);
deleter boolean (DOMString name);
readonly attribute unsigned long length;
};
interface SpecialMethodsCombination {
setter creator long long (unsigned long index, long long value);
getter deleter boolean (DOMString name);
setter creator boolean (DOMString name, boolean value);
};
""")
results = parser.finish()
def checkMethod(method, QName, name,
static=False, getter=False, setter=False, creator=False,
deleter=False, legacycaller=False, stringifier=False):
harness.ok(isinstance(method, WebIDL.IDLMethod),
"Should be an IDLMethod")
harness.check(method.identifier.QName(), QName, "Method has the right QName")
harness.check(method.identifier.name, name, "Method has the right name")
harness.check(method.isStatic(), static, "Method has the correct static value")
harness.check(method.isGetter(), getter, "Method has the correct getter value")
harness.check(method.isSetter(), setter, "Method has the correct setter value")
harness.check(method.isCreator(), creator, "Method has the correct creator value")
harness.check(method.isDeleter(), deleter, "Method has the correct deleter value")
harness.check(method.isLegacycaller(), legacycaller, "Method has the correct legacycaller value")
harness.check(method.isStringifier(), stringifier, "Method has the correct stringifier value")
harness.check(len(results), 2, "Expect 2 interfaces")
iface = results[0]
harness.check(len(iface.members), 8, "Expect 8 members")
checkMethod(iface.members[0], "::SpecialMethods::__indexedgetter", "__indexedgetter",
getter=True)
checkMethod(iface.members[1], "::SpecialMethods::__indexedsetter", "__indexedsetter",
setter=True)
checkMethod(iface.members[2], "::SpecialMethods::__indexedcreator", "__indexedcreator",
creator=True)
checkMethod(iface.members[3], "::SpecialMethods::__namedgetter", "__namedgetter",
getter=True)
checkMethod(iface.members[4], "::SpecialMethods::__namedsetter", "__namedsetter",
setter=True)
checkMethod(iface.members[5], "::SpecialMethods::__namedcreator", "__namedcreator",
creator=True)
checkMethod(iface.members[6], "::SpecialMethods::__nameddeleter", "__nameddeleter",
deleter=True)
iface = results[1]
harness.check(len(iface.members), 3, "Expect 3 members")
checkMethod(iface.members[0], "::SpecialMethodsCombination::__indexedsettercreator",
"__indexedsettercreator", setter=True, creator=True)
checkMethod(iface.members[1], "::SpecialMethodsCombination::__namedgetterdeleter",
"__namedgetterdeleter", getter=True, deleter=True)
checkMethod(iface.members[2], "::SpecialMethodsCombination::__namedsettercreator",
"__namedsettercreator", setter=True, creator=True)
parser = parser.reset();
threw = False
try:
parser.parse(
"""
interface IndexedDeleter {
deleter void(unsigned long index);
};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "There are no indexed deleters")
|
ibc/MediaSoup
|
refs/heads/v3
|
worker/deps/gyp/test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py
|
100
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies a phony target isn't output if a target exists with the same name that
was output.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
# Reset xcode_ninja_target_pattern to its default for this test.
test.run_gyp('test.gyp', '-G', 'xcode_ninja_target_pattern=^$')
# Check for both \r and \n to cover both windows and linux.
test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\r')
test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\n')
test.pass_test()
|
DeepVisionTeam/TensorFlowBook
|
refs/heads/master
|
Titanic/01_tensorflow_basic.py
|
2
|
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
################################
# Preparing Data
################################
# read data from file
data = pd.read_csv('data/train.csv')
# fill nan values with 0
data = data.fillna(0)
# convert ['male', 'female'] values of Sex to [1, 0]
data['Sex'] = data['Sex'].apply(lambda s: 1 if s == 'male' else 0)
# 'Survived' is the label of one class,
# add 'Deceased' as the other class
data['Deceased'] = data['Survived'].apply(lambda s: 1 - s)
# select features and labels for training
dataset_X = data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare']].as_matrix()
dataset_Y = data[['Deceased', 'Survived']].as_matrix()
# split training data and validation set data
X_train, X_val, y_train, y_val = train_test_split(dataset_X, dataset_Y,
test_size=0.2,
random_state=42)
################################
# Constructing Dataflow Graph
################################
# create symbolic variables
X = tf.placeholder(tf.float32, shape=[None, 6])
y = tf.placeholder(tf.float32, shape=[None, 2])
# weights and bias are the variables to be trained
weights = tf.Variable(tf.random_normal([6, 2]), name='weights')
bias = tf.Variable(tf.zeros([2]), name='bias')
y_pred = tf.nn.softmax(tf.matmul(X, weights) + bias)
# Minimise cost using cross entropy
# NOTE: add a epsilon(1e-10) when calculate log(y_pred),
# otherwise the result will be -inf
cross_entropy = - tf.reduce_sum(y * tf.log(y_pred + 1e-10),
reduction_indices=1)
cost = tf.reduce_mean(cross_entropy)
# use gradient descent optimizer to minimize cost
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
# calculate accuracy
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1))
acc_op = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
################################
# Training and Evaluating the model
################################
# use session to run the calculation
with tf.Session() as sess:
# variables have to be initialized at the first place
tf.global_variables_initializer().run()
# training loop
for epoch in range(10):
total_loss = 0.
for i in range(len(X_train)):
# prepare feed data and run
feed_dict = {X: [X_train[i]], y: [y_train[i]]}
_, loss = sess.run([train_op, cost], feed_dict=feed_dict)
total_loss += loss
# display loss per epoch
print('Epoch: %04d, total loss=%.9f' % (epoch + 1, total_loss))
# Accuracy calculated by TensorFlow
accuracy = sess.run(acc_op, feed_dict={X: X_val, y: y_val})
print("Accuracy on validation set: %.9f" % accuracy)
# Accuracy calculated by NumPy
pred = sess.run(y_pred, feed_dict={X: X_val})
correct = np.equal(np.argmax(pred, 1), np.argmax(y_val, 1))
numpy_accuracy = np.mean(correct.astype(np.float32))
print("Accuracy on validation set (numpy): %.9f" % numpy_accuracy)
# predict on test data
testdata = pd.read_csv('data/test.csv')
testdata = testdata.fillna(0)
# convert ['male', 'female'] values of Sex to [1, 0]
testdata['Sex'] = testdata['Sex'].apply(lambda s: 1 if s == 'male' else 0)
X_test = testdata[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare']]
predictions = np.argmax(sess.run(y_pred, feed_dict={X: X_test}), 1)
submission = pd.DataFrame({
"PassengerId": testdata["PassengerId"],
"Survived": predictions
})
submission.to_csv("titanic-submission.csv", index=False)
|
jamesblunt/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/lms/course_wiki.py
|
146
|
"""
Wiki tab on courses
"""
from .course_page import CoursePage
from ...pages.studio.utils import type_in_codemirror
class CourseWikiPage(CoursePage):
"""
Course wiki navigation and objects.
"""
url_path = "wiki"
def is_browser_on_page(self):
"""
Browser is on the wiki page if the wiki breadcrumb is present
"""
return self.q(css='.breadcrumb').present
def open_editor(self):
"""
Replace content of a wiki article with new content
"""
edit_button = self.q(css='.fa-pencil')
edit_button.click()
@property
def article_name(self):
"""
Return the name of the article
"""
return str(self.q(css='.main-article h1').text[0])
class CourseWikiEditPage(CoursePage):
"""
Editor page
"""
def __init__(self, browser, course_id, course_info):
"""
Course ID is currently of the form "edx/999/2013_Spring"
but this format could change.
"""
super(CourseWikiEditPage, self).__init__(browser, course_id)
self.course_id = course_id
self.course_info = course_info
self.article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
@property
def url_path(self):
"""
Construct a URL to the page within the course.
"""
return "/wiki/" + self.article_name + "/_edit"
def is_browser_on_page(self):
"""
The wiki page editor
"""
return self.q(css='.CodeMirror-scroll').present
def replace_wiki_content(self, content):
"""
Editor must be open already. This will replace any content in the editor
with new content
"""
type_in_codemirror(self, 0, content)
def save_wiki_content(self):
"""
When the editor is open, click save
"""
self.q(css='button[name="save"]').click()
self.wait_for_element_presence('.alert-success', 'wait for the article to be saved')
|
eblade/images5
|
refs/heads/porting
|
exifread/utils.py
|
16
|
"""
Misc utilities.
"""
def ord_(dta):
if isinstance(dta, str):
return ord(dta)
return dta
def make_string(seq):
"""
Don't throw an exception when given an out of range character.
"""
string = ''
for c in seq:
# Screen out non-printing characters
try:
if 32 <= c and c < 256:
string += chr(c)
except TypeError:
pass
# If no printing chars
if not string:
return str(seq)
return string
def make_string_uc(seq):
"""
Special version to deal with the code in the first 8 bytes of a user comment.
First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode.
"""
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string(seq)
def s2n_motorola(string):
"""Extract multi-byte integer in Motorola format (little endian)."""
x = 0
for c in string:
x = (x << 8) | ord_(c)
return x
def s2n_intel(string):
"""Extract multi-byte integer in Intel format (big endian)."""
x = 0
y = 0
for c in string:
x = x | (ord_(c) << y)
y += + 8
return x
class Ratio:
"""
Ratio object that eventually will be able to reduce itself to lowest
common denominator for printing.
"""
def __init__(self, num, den):
self.num = num
self.den = den
def __repr__(self):
self.reduce()
if self.den == 1:
return str(self.num)
return '%d/%d' % (self.num, self.den)
def _gcd(self, a, b):
if b == 0:
return a
else:
return self._gcd(b, a % b)
def reduce(self):
div = self._gcd(self.num, self.den)
if div > 1:
self.num = self.num // div
self.den = self.den // div
|
nouiz/ccw_tutorial_theano
|
refs/heads/master
|
04_debugging/02_compute_test_value.py
|
4
|
# Run
# python 01_compute_test_value.py
# It should raise an exception when it tries to execute the call to fn.
# The exception doesn't make it easy to tell which line of the python script
# first created an invalid expression though.
# Modify the script to use compute_test_value to find the first bad line.
#
# This show you another way then using Theano flags to find the line
# in your code that build a bad graph.
import numpy as np
from theano import function
from theano import tensor as T
a = T.vector()
b = T.log(a)
c = T.nnet.sigmoid(b)
d = T.sqrt(c)
e = T.concatenate((d, c), axis=0)
f = b * c * d
g = e + f
h = g / c
fn = function([a], h)
fn(np.ones((3,)).astype(a.dtype))
|
Matt-Deacalion/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_ljust.py
|
521
|
from django.template.defaultfilters import ljust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LjustTests(SimpleTestCase):
@setup({'ljust01': '{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}'})
def test_ljust01(self):
output = self.engine.render_to_string('ljust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ".a&b . .a&b .")
@setup({'ljust02': '.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.'})
def test_ljust02(self):
output = self.engine.render_to_string('ljust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ".a&b . .a&b .")
class FunctionTests(SimpleTestCase):
def test_ljust(self):
self.assertEqual(ljust('test', 10), 'test ')
self.assertEqual(ljust('test', 3), 'test')
def test_less_than_string_length(self):
self.assertEqual(ljust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(ljust(123, 4), '123 ')
|
tacaswell/scikit-xray
|
refs/heads/master
|
skbeam/core/calibration.py
|
5
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""Automatically calibrate a diffraction beam line given a powder sample of a
known sample.
"""
from __future__ import absolute_import, division, print_function
from collections import deque
from string import Template
import numpy as np
import scipy.signal
from .constants import calibration_standards
from .feature import (filter_peak_height, peak_refinement,
refine_log_quadratic)
from .utils import (angle_grid, radial_grid,
pairwise, bin_edges_to_centers, bin_1D)
def estimate_d_blind(name, wavelength, bin_centers, ring_average,
window_size, max_peak_count, thresh):
"""
Estimate the sample-detector distance
Given a radially integrated calibration image return an estimate for
the sample-detector distance. This function does not require a
rough estimate of what d should be.
For the peaks found the detector-sample distance is estimated via
.. math ::
D = \\frac{r}{\\tan 2\\theta}
where :math:`r` is the distance in mm from the calibrated center
to the ring on the detector and :math:`D` is the distance from
the sample to the detector.
Parameters
----------
name : str
The name of the calibration standard. Used to look up the
expected peak location
Valid options: $name_ops
wavelength : float
The wavelength of scattered x-ray in nm
bin_centers : array
The distance from the calibrated center to the center of
the ring's annulus in mm
ring_average : array
The average intensity in the given ring of a azimuthally integrated
powder pattern. In counts [arb]
window_size : int
The number of elements on either side of a local maximum to
use for locating and refining peaks. Candidates are identified
as a relative maximum in a window sized (2*window_size + 1) and
the same window is used for fitting the peaks to refine the location.
max_peak_count : int
Use at most this many peaks
thresh : float
Fraction of maximum peak height
Returns
-------
dist_sample : float
The detector-sample distance in mm. This is the mean of the estimate
from all of the peaks used.
std_dist_sample : float
The standard deviation of d computed from the peaks used.
"""
# get the calibration standard
cal = calibration_standards[name]
# find the local maximums
cands = scipy.signal.argrelmax(ring_average, order=window_size)[0]
# filter local maximums by size
cands = filter_peak_height(ring_average, cands,
thresh*np.max(ring_average), window=window_size)
# TODO insert peak identification validation. This might be better than
# improving the threshold value.
# refine the locations of the peaks
peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands,
window_size, refine_log_quadratic)
# compute tan(2theta) for the expected peaks
tan2theta = np.tan(cal.convert_2theta(wavelength))
# figure out how many peaks we can look at
slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count]))
# estimate the sample-detector distance for each of the peaks
d_array = (peaks_x[slc] / tan2theta[slc])
return np.mean(d_array), np.std(d_array)
# Set an attribute for the calibration names that are valid options. This
# attribute also aids in autowrapping into VisTrails
estimate_d_blind.name = list(calibration_standards)
if estimate_d_blind.__doc__ is not None:
estimate_d_blind.__doc__ = Template(estimate_d_blind.__doc__).substitute(
name_ops=repr(sorted(estimate_d_blind.name)))
def refine_center(image, calibrated_center, pixel_size, phi_steps, max_peaks,
thresh, window_size,
nx=None, min_x=None, max_x=None):
"""
Refines the location of the center of the beam.
This relies on being able to see the whole powder pattern.
Parameters
----------
image : ndarray
The image
calibrated_center : tuple
(row, column) the estimated center
pixel_size : tuple
(pixel_height, pixel_width)
phi_steps : int
How many regions to split the ring into, should be >10
max_peaks : int
Number of rings to look it
thresh : float
Fraction of maximum peak height
window_size : int, optional
The window size to use (in bins) to use when refining peaks
nx : int, optional
Number of bins to use for radial binning
min_x : float, optional
The minimum radius to use for radial binning
max_x : float, optional
The maximum radius to use for radial binning
Returns
-------
calibrated_center : tuple
The refined calibrated center.
"""
if nx is None:
nx = int(np.mean(image.shape) * 2)
phi = angle_grid(calibrated_center, image.shape, pixel_size).ravel()
r = radial_grid(calibrated_center, image.shape, pixel_size).ravel()
II = image.ravel()
phi_steps = np.linspace(-np.pi, np.pi, phi_steps, endpoint=True)
out = deque()
for phi_start, phi_end in pairwise(phi_steps):
mask = (phi <= phi_end) * (phi > phi_start)
out.append(bin_1D(r[mask], II[mask],
nx=nx, min_x=min_x, max_x=max_x))
out = list(out)
ring_trace = []
for bins, b_sum, b_count in out:
mask = b_sum > 10
avg = b_sum[mask] / b_count[mask]
bin_centers = bin_edges_to_centers(bins)[mask]
cands = scipy.signal.argrelmax(avg, order=window_size)[0]
# filter local maximums by size
cands = filter_peak_height(avg, cands, thresh*np.max(avg),
window=window_size)
ring_trace.append(bin_centers[cands[:max_peaks]])
tr_len = [len(rt) for rt in ring_trace]
mm = np.min(tr_len)
ring_trace = np.vstack([rt[:mm] for rt in ring_trace]).T
mean_dr = np.mean(ring_trace - np.mean(ring_trace, axis=1, keepdims=True),
axis=0)
phi_centers = bin_edges_to_centers(phi_steps)
delta = np.mean(np.diff(phi_centers))
# this is doing just one term of a Fourier series
# note that we have to convert _back_ to pixels from real units
# TODO do this with better integration/handle repeat better
col_shift = (np.sum(np.sin(phi_centers) * mean_dr) *
delta / (np.pi * pixel_size[1]))
row_shift = (np.sum(np.cos(phi_centers) * mean_dr) *
delta / (np.pi * pixel_size[0]))
return tuple(np.array(calibrated_center) +
np.array([row_shift, col_shift]))
|
dgjustice/ansible
|
refs/heads/devel
|
lib/ansible/cli/vault.py
|
5
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.module_utils._text import to_text, to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "encrypt_string", "rekey", "view")
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.vault_pass = None
self.new_vault_pass = None
self.encrypt_string_read_stdin = False
super(VaultCLI, self).__init__(args)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
# I have no prefence for either dash or underscore
elif self.action == "encrypt_string":
self.parser.add_option('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
self.parser.add_option('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
self.parser.add_option('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
self.parser.set_usage("usage: %prog encrypt-string [--prompt] [options] string_to_encrypt")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
super(VaultCLI, self).parse()
display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt', 'encrypt_string']
if self.action not in can_output:
if self.options.output_file:
raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
if len(self.args) == 0:
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
else:
# This restriction should remain in place until it's possible to
# load multiple YAML records from a single file, or it's too easy
# to create an encrypted file that can't be read back in. But in
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
# a workaround.
if self.options.output_file and len(self.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if self.action == 'encrypt_string':
if '-' in self.args or len(self.args) == 0 or self.options.encrypt_string_stdin_name:
self.encrypt_string_read_stdin = True
# TODO: prompting from stdin and reading from stdin seem
# mutually exclusive, but verify that.
if self.options.encrypt_string_prompt and self.encrypt_string_read_stdin:
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
if self.options.new_vault_password_file:
# for rekey only
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass or self.options.ask_vault_pass:
self.vault_pass = self.ask_vault_passwords()
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
if self.action == 'rekey':
if not self.new_vault_pass:
self.new_vault_pass = self.ask_new_vault_passwords()
if not self.new_vault_pass:
raise AnsibleOptionsError("A password is required to rekey Ansible's Vault")
if self.action == 'encrypt_string':
if self.options.encrypt_string_prompt:
self.encrypt_string_prompt = True
self.editor = VaultEditor(self.vault_pass)
self.execute()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.encrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
def format_ciphertext_yaml(self, b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault-encrypted |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we dont add it to the plaintext list
args = [x for x in self.args if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if self.options.encrypt_string_prompt:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# could use private=True for shadowed input if useful
prompt_response = display.prompt(msg)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = self.options.encrypt_string_stdin_name
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if hasattr(self.options, 'encrypt_string_names') and self.options.encrypt_string_names:
name_and_text_list = list(zip(self.options.encrypt_string_names, args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have names.' % self.options.encrypt_string_names[-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list)
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
print(out)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list):
# If we are only showing one item in the output, we dont need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.decrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
if len(self.args) > 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(self.args[0])
def execute_edit(self):
for f in self.args:
self.editor.edit_file(f)
def execute_view(self):
for f in self.args:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
self.pager(to_text(self.editor.plaintext(f)))
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
for f in self.args:
self.editor.rekey_file(f, self.new_vault_pass)
display.display("Rekey successful", stderr=True)
|
dmacd/FB-try1
|
refs/heads/master
|
pyramid/frameworkbenchmarks/models.py
|
4
|
"""
Benchmark models.
"""
import json
import psycopg2
from collections import Iterable
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
DBHOSTNAME = 'localhost'
def get_conn():
return psycopg2.connect(
user = 'benchmarkdbuser',
password = 'benchmarkdbpass',
host = DBHOSTNAME,
port = '5432',
database = 'hello_world'
)
conn_pool = QueuePool(get_conn, pool_size=100, max_overflow=25, echo=False)
pg = create_engine('postgresql://', pool=conn_pool)
DBSession = sessionmaker(bind=pg)()
metadata = MetaData()
DatabaseBase = declarative_base()
def sqlalchemy_encoder_factory(system_values):
return SQLAlchemyEncoder()
class SQLAlchemyEncoder(json.JSONEncoder):
def __call__(self, obj, system_values):
if isinstance(obj, Iterable):
return json.dumps([self.default(x) for x in obj])
else:
return json.dumps(self.default(obj))
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
return obj.__json__()
return super(SQLAlchemyEncoder, self).default(obj)
class World(DatabaseBase):
__tablename__ = 'World'
id = Column('id', Integer, primary_key=True)
randomNumber = Column('randomnumber', Integer, nullable=False, server_default='0')
def __json__(self):
return {'id': self.id, 'randomNumber': self.randomNumber}
class Fortune(DatabaseBase):
__tablename__ = 'Fortune'
id = Column('id', Integer, primary_key=True)
message = Column('message', String, nullable=False)
def __json__(self):
return {'id': self.id, 'message': self.message}
|
pselle/calibre
|
refs/heads/master
|
src/calibre/ebooks/oeb/transforms/filenames.py
|
14
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import posixpath
from urlparse import urldefrag, urlparse
from lxml import etree
from calibre.ebooks.oeb.base import rewrite_links, urlnormalize
class RenameFiles(object): # {{{
'''
Rename files and adjust all links pointing to them. Note that the spine
and manifest are not touched by this transform.
'''
def __init__(self, rename_map, renamed_items_map = None):
self.rename_map = rename_map
self.renamed_items_map = renamed_items_map
def __call__(self, oeb, opts):
import cssutils
self.log = oeb.logger
self.opts = opts
self.oeb = oeb
for item in oeb.manifest.items:
self.current_item = item
if etree.iselement(item.data):
rewrite_links(self.current_item.data, self.url_replacer)
elif hasattr(item.data, 'cssText'):
cssutils.replaceUrls(item.data, self.url_replacer)
if self.oeb.guide:
for ref in self.oeb.guide.values():
href = urlnormalize(ref.href)
href, frag = urldefrag(href)
replacement = self.rename_map.get(href, None)
if replacement is not None:
nhref = replacement
if frag:
nhref += '#' + frag
ref.href = nhref
if self.oeb.toc:
self.fix_toc_entry(self.oeb.toc)
def fix_toc_entry(self, toc):
if toc.href:
href = urlnormalize(toc.href)
href, frag = urldefrag(href)
replacement = self.rename_map.get(href, None)
if replacement is not None:
nhref = replacement
if frag:
nhref = '#'.join((nhref, frag))
toc.href = nhref
for x in toc:
self.fix_toc_entry(x)
def url_replacer(self, orig_url):
url = urlnormalize(orig_url)
parts = urlparse(url)
if parts.scheme:
# Only rewrite local URLs
return orig_url
path, frag = urldefrag(url)
if self.renamed_items_map:
orig_item = self.renamed_items_map.get(self.current_item.href, self.current_item)
else:
orig_item = self.current_item
href = orig_item.abshref(path)
replacement = self.current_item.relhref(self.rename_map.get(href, href))
if frag:
replacement += '#' + frag
return replacement
# }}}
class UniqueFilenames(object): # {{{
'Ensure that every item in the manifest has a unique filename'
def __call__(self, oeb, opts):
self.log = oeb.logger
self.opts = opts
self.oeb = oeb
self.seen_filenames = set([])
self.rename_map = {}
for item in list(oeb.manifest.items):
fname = posixpath.basename(item.href)
if fname in self.seen_filenames:
suffix = self.unique_suffix(fname)
data = item.data
base, ext = posixpath.splitext(item.href)
nhref = base + suffix + ext
nhref = oeb.manifest.generate(href=nhref)[1]
spine_pos = item.spine_position
oeb.manifest.remove(item)
nitem = oeb.manifest.add(item.id, nhref, item.media_type, data=data,
fallback=item.fallback)
self.seen_filenames.add(posixpath.basename(nhref))
self.rename_map[item.href] = nhref
if spine_pos is not None:
oeb.spine.insert(spine_pos, nitem, item.linear)
else:
self.seen_filenames.add(fname)
if self.rename_map:
self.log('Found non-unique filenames, renaming to support broken'
' EPUB readers like FBReader, Aldiko and Stanza...')
from pprint import pformat
self.log.debug(pformat(self.rename_map))
renamer = RenameFiles(self.rename_map)
renamer(oeb, opts)
def unique_suffix(self, fname):
base, ext = posixpath.splitext(fname)
c = 0
while True:
c += 1
suffix = '_u%d'%c
candidate = base + suffix + ext
if candidate not in self.seen_filenames:
return suffix
# }}}
class FlatFilenames(object): # {{{
'Ensure that every item in the manifest has a unique filename without subdirectories.'
def __call__(self, oeb, opts):
self.log = oeb.logger
self.opts = opts
self.oeb = oeb
self.rename_map = {}
self.renamed_items_map = {}
for item in list(oeb.manifest.items):
# Flatten URL by removing directories.
# Example: a/b/c/index.html -> a_b_c_index.html
nhref = item.href.replace("/", "_")
if item.href == nhref:
# URL hasn't changed, skip item.
continue
data = item.data
isp = item.spine_position
nhref = oeb.manifest.generate(href=nhref)[1]
if isp is not None:
oeb.spine.remove(item)
oeb.manifest.remove(item)
nitem = oeb.manifest.add(item.id, nhref, item.media_type, data=data,
fallback=item.fallback)
self.rename_map[item.href] = nhref
self.renamed_items_map[nhref] = item
if isp is not None:
oeb.spine.insert(isp, nitem, item.linear)
if self.rename_map:
self.log('Found non-flat filenames, renaming to support broken'
' EPUB readers like FBReader...')
from pprint import pformat
self.log.debug(pformat(self.rename_map))
self.log.debug(pformat(self.renamed_items_map))
renamer = RenameFiles(self.rename_map, self.renamed_items_map)
renamer(oeb, opts)
# }}}
|
heke123/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
|
4
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
executable_name = "MockSCM"
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
self._local_commits = []
def add(self, destination_path, return_exit_code=False):
self.add_list([destination_path], return_exit_code)
def add_list(self, destination_paths, return_exit_code=False):
self.added_paths.update(set(destination_paths))
if return_exit_code:
return 0
def has_working_directory_changes(self):
return False
def ensure_cleanly_tracking_remote_master(self):
pass
def current_branch(self):
return "mock-branch-name"
def checkout_branch(self, name):
pass
def create_clean_branch(self, name):
pass
def delete_branch(self, name):
pass
def supports_local_commits(self):
return True
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def commit_position(self, path):
return 5678
def commit_position_from_git_commit(self, git_commit):
if git_commit == '6469e754a1':
return 1234
if git_commit == '624c3081c0':
return 5678
if git_commit == '624caaaaaa':
return 10000
return None
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def commit_locally_with_message(self, message):
self._local_commits.append([message])
pass
def local_commits(self):
"""For testing purposes, returns the internal recording of commits made via commit_locally_with_message.
Format as [ message, commit_all_working_directory_changes, author ]."""
return self._local_commits
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
def move(self, origin, destination):
if self._filesystem:
self._filesystem.move(self.absolute_path(origin), self.absolute_path(destination))
def changed_files(self):
return []
|
kawasaki2013/python-for-android-x86
|
refs/heads/master
|
python-build/python-libs/gdata/src/gdata/docs/service.py
|
133
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)(.*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('http://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, 'Invalid resource id: %s' % resource_id
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
if server_response.status != 200:
raise gdata.service.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': response_body}
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise Error, 'This entry cannot be exported as a different format'
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise Error, 'grid id parameter is not valid for this entry'
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
|
earies/ncclient
|
refs/heads/master
|
ncclient/logging_.py
|
6
|
import logging
class SessionLoggerAdapter(logging.LoggerAdapter):
"""Logger adapter that automatically adds session information to logs."""
def process(self, msg, kwargs):
if 'session' not in self.extra or self.extra['session'] is None:
return msg, kwargs
session = self.extra['session']
prefix = ""
# All Session instances have an id. SSHSessions have a host as well.
if hasattr(session, 'host'):
prefix += "host %s " % session.host
if session.id is not None:
prefix += "session-id %s" % session.id
else:
prefix += "session 0x%x" % id(session)
# Pass the session information through to the LogRecord itself
if 'extra' not in kwargs:
kwargs['extra'] = self.extra
else:
kwargs['extra'].update(self.extra)
return "[%s] %s" % (prefix, msg), kwargs
|
janztec/empc-arpi-linux
|
refs/heads/rpi-3.18.y
|
tools/perf/scripts/python/netdev-times.py
|
1544
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
farazrehman/aws-resources
|
refs/heads/master
|
CloudFormation/nash/lib/python2.7/site-packages/troposphere/autoscaling.py
|
1
|
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, If, FindInMap, Ref
from .validators import boolean, integer
from . import cloudformation
EC2_INSTANCE_LAUNCH = "autoscaling:EC2_INSTANCE_LAUNCH"
EC2_INSTANCE_LAUNCH_ERROR = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"
EC2_INSTANCE_TERMINATE = "autoscaling:EC2_INSTANCE_TERMINATE"
EC2_INSTANCE_TERMINATE_ERROR = "autoscaling:EC2_INSTANCE_TERMINATE_ERROR"
TEST_NOTIFICATION = "autoscaling:TEST_NOTIFICATION"
# Termination Policy constants
Default = 'Default'
OldestInstance = 'OldestInstance'
NewestInstance = 'NewestInstance'
OldestLaunchConfiguration = 'OldestLaunchConfiguration'
ClosestToNextInstanceHour = 'ClosestToNextInstanceHour'
class Tag(AWSHelperFn):
def __init__(self, key, value, propogate):
self.data = {
'Key': key,
'Value': value,
'PropagateAtLaunch': propogate,
}
def JSONrepr(self):
return self.data
class Tags(AWSHelperFn):
defaultPropagateAtLaunch = True
manyType = [type([]), type(())]
def __init__(self, **kwargs):
self.tags = []
for k, v in sorted(kwargs.iteritems()):
if type(v) in self.manyType:
propagate = str(v[1]).lower()
v = v[0]
else:
propagate = str(self.defaultPropagateAtLaunch).lower()
self.tags.append({
'Key': k,
'Value': v,
'PropagateAtLaunch': propagate,
})
# append tags to list
def __add__(self, newtags):
newtags.tags = self.tags + newtags.tags
return newtags
def JSONrepr(self):
return self.tags
class NotificationConfigurations(AWSProperty):
props = {
'TopicARN': (basestring, True),
'NotificationTypes': (list, True),
}
class MetricsCollection(AWSProperty):
props = {
'Granularity': (basestring, True),
'Metrics': (list, False),
}
class Metadata(AWSHelperFn):
def __init__(self, init, authentication=None):
self.validate(init, authentication)
# get keys and values from init and authentication
# if there's only one data point, then we know its the default
# cfn-init; where the key is 'config'
if len(init.data) == 1:
initKey, initValue = init.data.popitem()
self.data = {initKey: initValue}
else:
self.data = init.data
if authentication:
authKey, authValue = authentication.data.popitem()
self.data[authKey] = authValue
def validate(self, init, authentication):
if not isinstance(init, cloudformation.Init):
raise ValueError(
'init must be of type cloudformation.Init'
)
is_instance = isinstance(authentication, cloudformation.Authentication)
if authentication and not is_instance:
raise ValueError(
'authentication must be of type cloudformation.Authentication'
)
def JSONrepr(self):
return self.data
class AutoScalingGroup(AWSObject):
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props = {
'AvailabilityZones': (list, False),
'Cooldown': (integer, False),
'DesiredCapacity': (integer, False),
'HealthCheckGracePeriod': (integer, False),
'HealthCheckType': (basestring, False),
'InstanceId': (basestring, False),
'LaunchConfigurationName': (basestring, False),
'LoadBalancerNames': (list, False),
'MaxSize': (integer, True),
'MetricsCollection': ([MetricsCollection], False),
'MinSize': (integer, True),
'NotificationConfigurations': ([NotificationConfigurations], False),
'PlacementGroup': (basestring, False),
'Tags': (list, False),
'TargetGroupARNs': ([basestring], False),
'TerminationPolicies': ([basestring], False),
'VPCZoneIdentifier': (list, False),
}
def validate(self):
if 'UpdatePolicy' in self.resource:
update_policy = self.resource['UpdatePolicy']
if 'AutoScalingRollingUpdate' in update_policy.properties:
rolling_update = update_policy.AutoScalingRollingUpdate
isMinNoCheck = isinstance(
rolling_update.MinInstancesInService,
(FindInMap, Ref)
)
isMaxNoCheck = isinstance(self.MaxSize, (If, FindInMap, Ref))
if not (isMinNoCheck or isMaxNoCheck):
maxCount = int(self.MaxSize)
minCount = int(rolling_update.MinInstancesInService)
if minCount >= maxCount:
raise ValueError(
"The UpdatePolicy attribute "
"MinInstancesInService must be less than the "
"autoscaling group's MaxSize")
launch_config = self.properties.get('LaunchConfigurationName')
instance_id = self.properties.get('InstanceId')
if launch_config and instance_id:
raise ValueError("LaunchConfigurationName and InstanceId "
"are mutually exclusive.")
if not launch_config and not instance_id:
raise ValueError("Must specify either LaunchConfigurationName or "
"InstanceId: http://docs.aws.amazon.com/AWSCloud"
"Formation/latest/UserGuide/aws-properties-as-gr"
"oup.html#cfn-as-group-instanceid")
availability_zones = self.properties.get('AvailabilityZones')
vpc_zone_identifier = self.properties.get('VPCZoneIdentifier')
if not availability_zones and not vpc_zone_identifier:
raise ValueError("Must specify AvailabilityZones and/or "
"VPCZoneIdentifier: http://docs.aws.amazon.com/A"
"WSCloudFormation/latest/UserGuide/aws-propertie"
"s-as-group.html#cfn-as-group-vpczoneidentifier")
return True
class LaunchConfiguration(AWSObject):
resource_type = "AWS::AutoScaling::LaunchConfiguration"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BlockDeviceMappings': (list, False),
'ClassicLinkVPCId': (basestring, False),
'ClassicLinkVPCSecurityGroups': ([basestring], False),
'EbsOptimized': (boolean, False),
'IamInstanceProfile': (basestring, False),
'ImageId': (basestring, True),
'InstanceId': (basestring, False),
'InstanceMonitoring': (boolean, False),
'InstanceType': (basestring, True),
'KernelId': (basestring, False),
'KeyName': (basestring, False),
'Metadata': (Metadata, False),
'PlacementTenancy': (basestring, False),
'RamDiskId': (basestring, False),
'SecurityGroups': (list, False),
'SpotPrice': (basestring, False),
'UserData': (basestring, False),
}
class StepAdjustments(AWSProperty):
props = {
'MetricIntervalLowerBound': (integer, False),
'MetricIntervalUpperBound': (integer, False),
'ScalingAdjustment': (integer, True),
}
class ScalingPolicy(AWSObject):
resource_type = "AWS::AutoScaling::ScalingPolicy"
props = {
'AdjustmentType': (basestring, True),
'AutoScalingGroupName': (basestring, True),
'Cooldown': (integer, False),
'EstimatedInstanceWarmup': (integer, False),
'MetricAggregationType': (basestring, False),
'MinAdjustmentMagnitude': (integer, False),
'PolicyType': (basestring, False),
'ScalingAdjustment': (integer, False),
'StepAdjustments': ([StepAdjustments], False),
}
class ScheduledAction(AWSObject):
resource_type = "AWS::AutoScaling::ScheduledAction"
props = {
'AutoScalingGroupName': (basestring, True),
'DesiredCapacity': (integer, False),
'EndTime': (basestring, False),
'MaxSize': (integer, False),
'MinSize': (integer, False),
'Recurrence': (basestring, False),
'StartTime': (basestring, False),
}
class LifecycleHook(AWSObject):
resource_type = "AWS::AutoScaling::LifecycleHook"
props = {
'AutoScalingGroupName': (basestring, True),
'DefaultResult': (basestring, False),
'HeartbeatTimeout': (integer, False),
'LifecycleHookName': (basestring, False),
'LifecycleTransition': (basestring, True),
'NotificationMetadata': (basestring, False),
'NotificationTargetARN': (basestring, True),
'RoleARN': (basestring, True),
}
class Trigger(AWSObject):
resource_type = "AWS::AutoScaling::Trigger"
props = {
'AutoScalingGroupName': (basestring, True),
'BreachDuration': (integer, True),
'Dimensions': (list, True),
'LowerBreachScaleIncrement': (integer, False),
'LowerThreshold': (integer, True),
'MetricName': (basestring, True),
'Namespace': (basestring, True),
'Period': (integer, True),
'Statistic': (basestring, True),
'Unit': (basestring, False),
'UpperBreachScaleIncrement': (integer, False),
'UpperThreshold': (integer, True),
}
class EBSBlockDevice(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
props = {
'DeleteOnTermination': (boolean, False),
'Encrypted': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (basestring, False),
'VolumeSize': (integer, False),
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
props = {
'DeviceName': (basestring, True),
'Ebs': (EBSBlockDevice, False),
'NoDevice': (boolean, False),
'VirtualName': (basestring, False),
}
|
Shaps/ansible
|
refs/heads/devel
|
test/units/plugins/test_plugins.py
|
23
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.builtins import BUILTINS
from units.compat.mock import patch, MagicMock
from ansible.plugins.loader import PluginLoader
class TestErrors(unittest.TestCase):
@patch.object(PluginLoader, '_get_paths')
def test_print_paths(self, mock_method):
mock_method.return_value = ['/path/one', '/path/two', '/path/three']
pl = PluginLoader('foo', 'foo', '', 'test_plugins')
paths = pl.print_paths()
expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three'])
self.assertEqual(paths, expected_paths)
def test_plugins__get_package_paths_no_package(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
self.assertEqual(pl._get_package_paths(), [])
def test_plugins__get_package_paths_with_package(self):
# the _get_package_paths() call uses __import__ to load a
# python library, and then uses the __file__ attribute of
# the result for that to get the library path, so we mock
# that here and patch the builtin to use our mocked result
foo = MagicMock()
bar = MagicMock()
bam = MagicMock()
bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py'
bar.bam = bam
foo.return_value.bar = bar
pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin')
with patch('{0}.__import__'.format(BUILTINS), foo):
self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam'])
def test_plugins__get_paths(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
pl._paths = ['/path/one', '/path/two']
self.assertEqual(pl._get_paths(), ['/path/one', '/path/two'])
# NOT YET WORKING
# def fake_glob(path):
# if path == 'test/*':
# return ['test/foo', 'test/bar', 'test/bam']
# elif path == 'test/*/*'
# m._paths = None
# mock_glob = MagicMock()
# mock_glob.return_value = []
# with patch('glob.glob', mock_glob):
# pass
def assertPluginLoaderConfigBecomes(self, arg, expected):
pl = PluginLoader('test', '', arg, 'test_plugin')
self.assertEqual(pl.config, expected)
def test_plugin__init_config_list(self):
config = ['/one', '/two']
self.assertPluginLoaderConfigBecomes(config, config)
def test_plugin__init_config_str(self):
self.assertPluginLoaderConfigBecomes('test', ['test'])
def test_plugin__init_config_none(self):
self.assertPluginLoaderConfigBecomes(None, [])
def test__load_module_source_no_duplicate_names(self):
'''
This test simulates importing 2 plugins with the same name,
and validating that the import is short circuited if a file with the same name
has already been imported
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
pl = PluginLoader('test', '', 'test', 'test_plugin')
one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py'))
# This line wouldn't even succeed if we didn't short circuit on finding a duplicate name
two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py')
self.assertEqual(one, two)
@patch('ansible.plugins.loader.glob')
@patch.object(PluginLoader, '_get_paths')
def test_all_no_duplicate_names(self, gp_mock, glob_mock):
'''
This test goes along with ``test__load_module_source_no_duplicate_names``
and ensures that we ignore duplicate imports on multiple paths
'''
fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
gp_mock.return_value = [
fixture_path,
'/path/to'
]
glob_mock.glob.side_effect = [
[os.path.join(fixture_path, 'import_fixture.py')],
['/path/to/import_fixture.py']
]
pl = PluginLoader('test', '', 'test', 'test_plugin')
# Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator
# so ``list()`` actually causes ``PluginLoader.all`` to run.
plugins = list(pl.all())
self.assertEqual(len(plugins), 1)
self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache)
self.assertNotIn('/path/to/import_fixture.py', pl._module_cache)
|
godfather1103/WeiboRobot
|
refs/heads/master
|
python27/1.0/lib/unittest/test/test_setups.py
|
153
|
import sys
from cStringIO import StringIO
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegexp(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
charbeljc/server-tools
|
refs/heads/8.0
|
users_ldap_populate/__init__.py
|
257
|
from . import model
|
ronakkhunt/kuma
|
refs/heads/master
|
kuma/authkeys/views.py
|
31
|
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required, permission_required
from kuma.core.utils import paginate
from .models import Key
from .forms import KeyForm
ITEMS_PER_PAGE = 15
@login_required
@permission_required('authkeys.add_key', raise_exception=True)
def new(request):
context = {"key": None}
if request.method != "POST":
context['form'] = KeyForm()
else:
context['form'] = KeyForm(request.POST)
if context['form'].is_valid():
new_key = context['form'].save(commit=False)
new_key.user = request.user
context['secret'] = new_key.generate_secret()
new_key.save()
context['key'] = new_key
return render(request, 'authkeys/new.html', context)
@login_required
def list(request):
keys = Key.objects.filter(user=request.user)
return render(request, 'authkeys/list.html', dict(keys=keys))
@login_required
def history(request, pk):
key = get_object_or_404(Key, pk=pk)
if key.user != request.user:
raise PermissionDenied
items = key.history.all().order_by('-pk')
items = paginate(request, items, per_page=ITEMS_PER_PAGE)
context = {
'key': key,
'items': items,
}
return render(request, 'authkeys/history.html', context)
@login_required
@permission_required('authkeys.delete_key', raise_exception=True)
def delete(request, pk):
key = get_object_or_404(Key, pk=pk)
if key.user != request.user:
raise PermissionDenied
if request.method == "POST":
key.delete()
return redirect('authkeys.list')
return render(request, 'authkeys/delete.html', {'key': key})
|
mlperf/training_results_v0.7
|
refs/heads/master
|
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v3-8192/lingvo/core/generic_input.py
|
3
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ops
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
def GenericInput(processor, **kwargs):
"""Builds a generic input pipeline.
Example usage::
def ParseRecord(record):
# Given a tf.string record, return a (NestedMap, bucketing key) pair.
feature_map = ...
features = tf.io.parse_single_example(record, feature_map)
# Each example is represented by a NestedMap of tensors (without a
# batch dimension).
example = py_utils.NestedMap(field1=..., field2=...)
# bucketing_key is a scalar convertible to tf.int32.
# Use 1 if all examples are of the same size.
bucketing_key = 1
return example, bucketing_key
input_batch, bucket_keys = GenericInput(ParseRecord, file_pattern=..., ...)
# input_batch is a NestedMap of tensors, where dim 0 of each tensor
# represents the batch dimension.
input_batch.field1 = ...
ParseRecord can also take both 'source_id' and 'record' as inputs (the arg
names must be exactly 'source_id' and 'record'):
def ParseRecord(source_id, record):
# Given a tf.int32 source_id and a tf.string record, return a (NestedMap,
# bucketing key) pair.
example = py_utils.NestedMap(source_id=source_id, ...)
...
return example, bucketing_key
input_batch, bucket_keys = GenericInput(ParseRecord, file_pattern=..., ...)
Args:
processor: a function that takes either a tf.string record or a
(source_id: tf.int32, record: tf.string) pair as input and returns a tuple
(output, bucketing_key). `output` must be a NestedMap or a list of tensors
representing an example. `bucketing_key` must be a scalar convertible to
a tf.int32 tensor that represents the bucketing key (e.g., sequence
length for sequence inputs). If `bucketing_key` is a negative number,
the record is dropped.
**kwargs: additional keyword args for x_ops.generic_input.
Returns:
A tuple of (outputs, bucket_keys):
- outputs: a NestedMap or a list of tensors, similar to `processor`'s
return, except every tensor will have an additional dimension 0 that
represents the batch dimension.
- bucket_keys: a tf.int32 vector.
"""
output_tmpl = py_utils.NestedMap()
@tf.function(autograph=False)
def _FlatOutputProcessor(source_id, record):
"""Returns a flattened list of 'processor(inputs)'."""
processor_spec = tf_inspect.getargspec(processor)
tf.logging.debug('GenericInput.processor.argspec=%s', processor_spec)
processor_args = set(processor_spec.args) - set(['self'])
if len(processor_args) == 1:
output, bucketing_key = processor(record)
elif processor_args == set(['source_id', 'record']):
output, bucketing_key = processor(source_id=source_id, record=record)
else:
raise ValueError(
'GenericInput: processor should take either a single arg '
'or two args named as "source_id" and "record". '
'Actual: %s' % processor_args)
if isinstance(output, list):
assert output
assert all(isinstance(x, tf.Tensor) for x in output), '{}'.format(output)
else:
assert isinstance(output, py_utils.NestedMap), '{}'.format(output)
assert output
assert all(
isinstance(x, tf.Tensor) for x in output.Flatten()), '{}'.format(
output.DebugString())
bucketing_key = tf.cast(bucketing_key, tf.int32)
tf.logging.debug('Processor outputs=%s bucketing_key=%s', output,
bucketing_key)
output_tmpl.out_values = output
flat_output_tmpl = output_tmpl.Flatten()
tf.logging.debug('Processor flat outputs=%s', flat_output_tmpl)
tf.logging.debug('extra_inputs=%s extra_args=%s extra_vars=%s',
py_utils.GetExtraInputs(), py_utils.GetExtraArgs(),
py_utils.GetExtraVars())
assert not py_utils.GetExtraArgs(), (
'fns {} is not pure: extra_args={}'.format(processor,
py_utils.GetExtraArgs()))
return flat_output_tmpl + [bucketing_key]
proc_fn = _FlatOutputProcessor.get_concrete_function(
tf.TensorSpec([], tf.int32), tf.TensorSpec([], tf.string))
out_types = [
tf.DType(a.type) for a in proc_fn.function_def.signature.output_arg
]
assert out_types[-1] == tf.int32, ('%s is not expected.' % out_types[-1])
flat_outputs, bucket_keys = ops.gen_x_ops.generic_input(
processor=proc_fn, out_types=out_types[:-1], **kwargs)
tf.logging.debug('x_ops.generic_input flat_outputs=%s', flat_outputs)
# Pack flat_outputs to outputs.
outputs = output_tmpl.Pack(flat_outputs).out_values
tf.logging.debug('x_ops.generic_input outputs=%s', outputs)
return outputs, bucket_keys
|
mjudsp/Tsallis
|
refs/heads/master
|
sklearn/preprocessing/__init__.py
|
268
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
kylelwm/ponus
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
jennyzhang0215/incubator-mxnet
|
refs/heads/master
|
example/bi-lstm-sort/rnn_model.py
|
19
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, bi_lstm_inference_symbol
class BiLSTMInferenceModel(object):
def __init__(self,
seq_len,
input_size,
num_hidden,
num_embed,
num_label,
arg_params,
ctx=mx.cpu(),
dropout=0.):
self.sym = bi_lstm_inference_symbol(input_size, seq_len,
num_hidden,
num_embed,
num_label,
dropout)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(2)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(2)]
data_shape = [("data", (batch_size, seq_len, ))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=mx.cpu(), **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(2):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob
|
GitHublong/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/conf/locale/uk/formats.py
|
236
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y р.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y р. H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
# NUMBER_GROUPING =
|
jtopjian/st2
|
refs/heads/master
|
st2api/tests/unit/controllers/v1/test_runnertypes.py
|
7
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
class TestRunnerTypesController(FunctionalTest):
def test_get_one(self):
resp = self.app.get('/v1/runnertypes')
self.assertEqual(resp.status_int, 200)
self.assertTrue(len(resp.json) > 0, '/v1/runnertypes did not return correct runnertypes.')
runnertype_id = TestRunnerTypesController.__get_runnertype_id(resp.json[0])
resp = self.app.get('/v1/runnertypes/%s' % runnertype_id)
retrieved_id = TestRunnerTypesController.__get_runnertype_id(resp.json)
self.assertEqual(resp.status_int, 200)
self.assertEqual(retrieved_id, runnertype_id,
'/v1/runnertypes returned incorrect runnertype.')
def test_get_all(self):
resp = self.app.get('/v1/runnertypes')
self.assertEqual(resp.status_int, 200)
self.assertTrue(len(resp.json) > 0, '/v1/runnertypes did not return correct runnertypes.')
def test_get_one_fail(self):
resp = self.app.get('/v1/runnertypes/1', expect_errors=True)
self.assertEqual(resp.status_int, 404)
@staticmethod
def __get_runnertype_id(resp_json):
return resp_json['id']
|
locationtech/geowave
|
refs/heads/master
|
python/src/main/python/pygw/base/writer.py
|
2
|
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from .geowave_object import GeoWaveObject
from .write_results import WriteResults
class Writer(GeoWaveObject):
"""
Writes data to a GeoWave data store.
"""
def __init__(self, java_ref):
super().__init__(java_ref)
self.is_open = True
def write(self, data):
"""
Write data into the associated data store.
Args:
data (any) : The data to be written.
Raises:
RuntimeError: If the writer is closed.
Returns:
A `pygw.base.write_results.WriteResults` with the results of the write operation.
"""
if not self.is_open:
raise RuntimeError("Writer is already closed!")
if isinstance(data, GeoWaveObject):
data = data._java_ref
return WriteResults(self._java_ref.write(data))
def close(self):
"""
Close the writer.
"""
if self.is_open:
self._java_ref.close()
self.is_open = False
|
ribeiro-ucl/viewflow
|
refs/heads/master
|
tests/examples/helloworld/admin.py
|
3
|
from django.contrib import admin
from viewflow.admin import ProcessAdmin, TaskAdmin
from . import models, flows
class HelloWorldProcessAdmin(ProcessAdmin):
list_display = ['pk', 'created', 'status', 'participants',
'text', 'approved']
list_display_links = ['pk', 'created']
class HelloWorldTaskAdmin(TaskAdmin):
list_display = ['pk', 'created', 'status',
'owner', 'owner_permission', 'token',
'started', 'finished']
list_display_links = ['pk', 'created']
def get_queryset(self, request):
qs = super(HelloWorldTaskAdmin, self).get_queryset(request)
return qs.filter(process__flow_cls=flows.HelloWorldFlow)
admin.site.register(models.HelloWorldProcess, HelloWorldProcessAdmin)
admin.site.register(models.HelloWorldTask, HelloWorldTaskAdmin)
|
wevoice/wesub
|
refs/heads/staging
|
apps/api/extra.py
|
1
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2016 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
"""
api.extra -- framework for extra fields on API endpoints
This module exist to handle the extra query parameter for API endpoints.
There are a couple features here:
- API clients can optionally include an extra parameter to ask for extra
in the response.
- Clients can specify any number of extra fields using a comma separated list
- The extra fields can be implemented by components other than the API. In
particular, some are implement in amara-enterprise.
To facilitate this, we create a distpach-style system where components can
register to handle the extra fields. Here's how it works:
- We define an ExtraDispatcher object to be used for an endpoint, or set of
endpoints.
- Components can register for particular extra parameters with the register()
method.
- The API code calls the add_data() method to potentially add extra data
to the response.
- ExtraDispatcher calculates which components should be called based on the
extra query param and calls them.
"""
class ExtraDispatcher(object):
"""
Dispatcher for extra paramaters
"""
def __init__(self):
self.callbacks = {}
def register(self, name, callback):
"""
Register an extra callback function
If name is present in the extra query param, callback will be called
to add extra data to the response.
Args:
name: name for the extra data. This is what needs to be present
in the extra query param to trigger the callback. It must
be unique.
callback: callback function to provide the extra data.
"""
self.callbacks[name] = callback
def handler(self, name):
"""
Function decorator to register a callback function
You can use this decorator like this:
@handler(name)
def function():
pass
It's a shortcut for defining the function, then calling
register(name, function).
"""
def decorator(func):
self.register(name, func)
return func
return decorator
def add_data(self, request, data, **kwargs):
"""
Add extra data to an API response.
This method figures out if any functions registered with register()
should be called and calls them. The arguments are:
- request.user
- data
- Any additional kwargs
"""
extra = request.query_params.get('extra')
if extra:
for name in extra.split(','):
callback = self.callbacks.get(name)
if callback:
callback(request.user, data, **kwargs)
video_language = ExtraDispatcher()
user = ExtraDispatcher()
|
unicefuganda/mics
|
refs/heads/master
|
survey/tests/utils/__init__.py
|
17
|
__author__ = 'mnandri'
|
reverland/scripts
|
refs/heads/master
|
python/missile.py
|
1
|
import matplotlib.pyplot as plt
import numpy as np
tolerance = 1e-1
radius = np.pi
# missile 1
x_m1, y_m1 = -np.pi, 0
v_m1 = 5
# missile 2
x_m2, y_m2 = 0, np.pi
v_m2 = v_m1
# missile 3
x_m3, y_m3 = np.pi, 0
v_m3 = v_m1
# missile 4
x_m4, y_m4 = 0, -np.pi
v_m4 = v_m1
plt.figure(figsize=(10, 10), dpi=80)
plt.title(" missile flight simulator ", fontsize=40)
plt.xlim(-4, 4)
plt.ylim(-4, 4)
#plt.xticks([])
#plt.yticks([])
# set spines
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
plt.xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi], [r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
plt.yticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi], [r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
plt.annotate('missile start point', xy=(x_m1, y_m1), xycoords='data',
xytext=(+15, +15), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
# alpha labels
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(16)
label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65))
class ob(object):
"""docstring for ob"""
def __init__(self, x, y):
self.x = x
self.y = y
class missile(ob):
"""docstring for missile"""
def __init__(self, x, y):
super(missile, self).__init__(x, y)
def forward(self, v, target):
"""docstring for forward"""
if self.x < target.x:
alpha = np.arctan((target.y - self.y) / (target.x - self.x))
elif self.x > target.x:
alpha = np.pi + np.arctan((target.y - self.y) / (target.x - self.x))
elif self.x == target.x and self.y < target.y:
alpha = np.pi / 2
else:
alpha = -np.pi / 2
self.x = self.x + v * 0.01 * np.cos(alpha)
self.y = self.y + v * 0.01 * np.sin(alpha)
return self.x, self.y
def distance(self, target):
"""docstring for distance"""
return np.sqrt((self.x - target.x) ** 2 + (self.y - target.y) ** 2)
class target(ob):
"""docstring for target"""
def __init__(self, x, y):
super(target, self).__init__(x, y)
def newposition(self, x, y):
"""docstring for newposition"""
self.x = x
self.y = y
m1 = missile(x_m1, y_m1)
m2 = missile(x_m2, y_m2)
m3 = missile(x_m3, y_m3)
m4 = missile(x_m4, y_m4)
while True:
if m1.distance(m2) < tolerance or m1.distance(m3) < tolerance or m1.distance(m4) < tolerance:
print "collision"
plt.plot(x_m1, y_m1, 'o')
plt.annotate('crash point', xy=(x_m1, y_m1), xycoords='data',
xytext=(+15, +15), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.pause(0.1)
plt.show()
break
elif m3.distance(m2) < tolerance or m3.distance(m4) < tolerance:
print "collision"
plt.plot(x_m3, y_m3, 'o')
plt.annotate('crash point', xy=(x_m3, y_m3), xycoords='data',
xytext=(+15, +15), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.pause(0.1)
plt.show
break
x_m1, y_m1 = m1.forward(v_m1, m2)
x_m2, y_m2 = m2.forward(v_m2, m3)
x_m3, y_m3 = m3.forward(v_m3, m4)
x_m4, y_m4 = m4.forward(v_m4, m1)
#print alpha, beta
plt.plot(x_m1, y_m1, 'bx', alpha=.5)
plt.plot(x_m2, y_m2, 'k*', alpha=.5)
plt.plot(x_m3, y_m3, 'r.', alpha=.5)
plt.plot(x_m4, y_m4, 'gp', alpha=.5)
plt.legend(("missile1", "missile2", "missile3", "missile4"), loc="upper left", prop={'size': 12})
plt.pause(0.1)
|
songmonit/CTTMSONLINE
|
refs/heads/master
|
addons/portal/tests/__init__.py
|
177
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_portal
checks = [
test_portal,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Pharas/PyTalk
|
refs/heads/master
|
PyTalk - Raspberry Pi/sr.py
|
1
|
"""Library for performing speech recognition with the Google Speech Recognition API."""
__author__ = 'Anthony Zhang (Uberi)'
__version__ = '1.1.0'
__license__ = 'BSD'
import io, os, subprocess, wave, sys
import math, audioop, collections
import json
try: # try to use python2 module
from urllib2 import Request, urlopen
except ImportError: # otherwise, use python3 module
from urllib.request import Request, urlopen
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 44100 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 512 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
print("Error with importing PyAudio")
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "en-US", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 200 # minimum audio energy to consider for recording
self.pause_threshold = 1.5 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try:
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
finally: # make sure resources are cleaned up
wav_writer.close()
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
if system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC command line application")
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stderr = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = source.CHUNK / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration:
break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0:
break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = source.CHUNK / source.RATE
if seconds_per_buffer != 0:
pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after
else:
pause_buffer_count = 0
quiet_buffer_count = 0
elapsed_time = 0
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0:
break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0:
break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
assert isinstance(audio_data, AudioData)
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
# check for invalid key response from the server
try:
response = urlopen(self.request)
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
# make sure we have a list of transcriptions
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
# return the best guess unless told to do otherwise
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
# check to see if Google thinks it's 100% correct
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
# return all the possibilities
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
# helper functions
def shutil_which(pgm):
#python2 backport of python3's shutil.which()
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
if __name__ == "__main__":
r = Recognizer()
m = Microphone()
while True:
print("Say something!")
with m as source:
audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
print("You said " + r.recognize(audio))
except LookupError:
print("Oops! Didn't catch that")
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/r-suppdists/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSuppdists(RPackage):
"""Ten distributions supplementing those built into R. Inverse Gauss,
Kruskal-Wallis, Kendall's Tau, Friedman's chi squared, Spearman's rho,
maximum F ratio, the Pearson product moment correlation coefficient,
Johnson distributions, normal scores and generalized hypergeometric
distributions."""
homepage = "https://cloud.r-project.org/package=SuppDists"
url = "https://cloud.r-project.org/src/contrib/SuppDists_1.1-9.5.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/SuppDists"
version('1.1-9.5', sha256='680b67145c07d44e200275e08e48602fe19cd99fb106c05422b3f4a244c071c4')
depends_on('r@3.3.0:', type=('build', 'run'))
|
kikobr/projetoextintor
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/templates.py
|
291
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.templates
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various template engines' markup.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.web import \
PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer
from pygments.lexers.agile import PythonLexer, PerlLexer
from pygments.lexers.compiled import JavaLexer
from pygments.lexers.jvm import TeaLangLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
from pygments.util import html_doctype_matches, looks_like_xml
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer',
'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer',
'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer']
class ErbLexer(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
from pygments.lexers.agile import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
class SmartyLexer(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
(r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
(r'\}', Comment.Preproc, '#pop'),
(r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search('\{include\s+file=.*?\}', text):
rv += 0.15
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search('\{\$.*?\}', text):
rv += 0.01
return rv
class VelocityLexer(RegexLexer):
"""
Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
Just highlights velocity directives and variable references, other
data is left untouched by the lexer.
"""
name = 'Velocity'
aliases = ['velocity']
filenames = ['*.vm','*.fhtml']
flags = re.MULTILINE | re.DOTALL
identifier = r'[a-zA-Z_][a-zA-Z0-9_]*'
tokens = {
'root': [
(r'[^{#$]+', Other),
(r'(#)(\*.*?\*)(#)',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(##)(.*?$)',
bygroups(Comment.Preproc, Comment)),
(r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
'directiveparams'),
(r'(#\{?)(' + identifier + r')(\}|\b)',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
(r'\$\{?', Punctuation, 'variable')
],
'variable': [
(identifier, Name.Variable),
(r'\(', Punctuation, 'funcparams'),
(r'(\.)(' + identifier + r')',
bygroups(Punctuation, Name.Variable), '#push'),
(r'\}', Punctuation, '#pop'),
(r'', Other, '#pop')
],
'directiveparams': [
(r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
Operator),
(r'\[', Operator, 'rangeoperator'),
(r'\b' + identifier + r'\b', Name.Function),
include('funcparams')
],
'rangeoperator': [
(r'\.\.', Operator),
include('funcparams'),
(r'\]', Operator, '#pop')
],
'funcparams': [
(r'\$\{?', Punctuation, 'variable'),
(r'\s+', Text),
(r',', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
rv += 0.25
if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?'
r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text):
rv += 0.01
return rv
class VelocityHtmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexer data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
alias_filenames = ['*.html','*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
**options)
class VelocityXmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexer data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
alias_filenames = ['*.xml','*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
**options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.5
return rv
class DjangoLexer(RegexLexer):
"""
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
It just highlights django/jinja code between the preprocessor directives,
other data is left untouched by the lexer.
"""
name = 'Django/Jinja'
aliases = ['django', 'jinja']
mimetypes = ['application/x-django-templating', 'application/x-jinja']
flags = re.M | re.S
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# jinja/django comments
(r'\{[*#].*?[*#]\}', Comment),
# django comments
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Comment, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# raw jinja blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Text, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'block'),
(r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Comment.Preproc, Text, Keyword), 'block'),
(r'\{', Other)
],
'varnames': [
(r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
Keyword),
(r'(loop|block|super|forloop)\b', Name.Builtin),
(r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable),
(r'\.[a-zA-Z0-9_]+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'block': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{%\s*(block|extends)', text) is not None:
rv += 0.4
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
rv += 0.1
if re.search(r'\{\{.*?\}\}', text) is not None:
rv += 0.1
return rv
class MyghtyLexer(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
*New in Pygments 0.6.*
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
class MyghtyHtmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `HtmlLexer`.
*New in Pygments 0.6.*
"""
name = 'HTML+Myghty'
aliases = ['html+myghty']
mimetypes = ['text/html+myghty']
def __init__(self, **options):
super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
**options)
class MyghtyXmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `XmlLexer`.
*New in Pygments 0.6.*
"""
name = 'XML+Myghty'
aliases = ['xml+myghty']
mimetypes = ['application/xml+myghty']
def __init__(self, **options):
super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
**options)
class MyghtyJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `JavascriptLexer`.
*New in Pygments 0.6.*
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
MyghtyLexer, **options)
class MyghtyCssLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `CssLexer`.
*New in Pygments 0.6.*
"""
name = 'CSS+Myghty'
aliases = ['css+myghty']
mimetypes = ['text/css+myghty']
def __init__(self, **options):
super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
**options)
class MasonLexer(RegexLexer):
"""
Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
Mason markup is HTML.
.. _mason templates: http://www.masonhq.com/
*New in Pygments 1.4.*
"""
name = 'Mason'
aliases = ['mason']
filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
mimetypes = ['application/x-mason']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%doc>)(.*?)(</%doc>)(?s)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PerlLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PerlLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(using(HtmlLexer), Operator)),
]
}
def analyse_text(text):
rv = 0.0
if re.search('<&', text) is not None:
rv = 1.0
return rv
class MakoLexer(RegexLexer):
"""
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
*New in Pygments 0.7.*
.. _mako templates: http://www.makotemplates.org/
"""
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
mimetypes = ['application/x-mako']
tokens = {
'root': [
(r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(%)([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
(r'(<%)([\w\.\:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)(\s*)(".*?")',
bygroups(Name.Attribute, Text, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
*New in Pygments 0.7.*
"""
name = 'HTML+Mako'
aliases = ['html+mako']
mimetypes = ['text/html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexer data
with the `XmlLexer`.
*New in Pygments 0.7.*
"""
name = 'XML+Mako'
aliases = ['xml+mako']
mimetypes = ['application/xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexer data
with the `JavascriptLexer`.
*New in Pygments 0.7.*
"""
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
mimetypes = ['application/x-javascript+mako',
'text/x-javascript+mako',
'text/javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexer data
with the `CssLexer`.
*New in Pygments 0.7.*
"""
name = 'CSS+Mako'
aliases = ['css+mako']
mimetypes = ['text/css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
# Genshi and Cheetah lexers courtesy of Matt Good.
class CheetahPythonLexer(Lexer):
"""
Lexer for handling Cheetah's special $ tokens in Python syntax.
"""
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
class CheetahLexer(RegexLexer):
"""
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
markup is yielded as `Token.Other`. This also works for
`spitfire templates`_ which use the same syntax.
.. _cheetah templates: http://www.cheetahtemplate.org/
.. _spitfire templates: http://code.google.com/p/spitfire/
"""
name = 'Cheetah'
aliases = ['cheetah', 'spitfire']
filenames = ['*.tmpl', '*.spt']
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
tokens = {
'root': [
(r'(##[^\n]*)$',
(bygroups(Comment))),
(r'#[*](.|\n)*?[*]#', Comment),
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
(r'#slurp$', Comment.Preproc),
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc))),
# TODO support other Python syntax like $foo['bar']
(r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
(r'(\$\{!?)(.*?)(\})(?s)',
bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?=[#][#a-zA-Z]*) | # an eval comment
(?=\$[a-zA-Z_{]) | # a substitution
\Z # end of string
)
''', Other),
(r'\s+', Text),
],
}
class CheetahHtmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexer data
with the `HtmlLexer`.
"""
name = 'HTML+Cheetah'
aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
def __init__(self, **options):
super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
**options)
class CheetahXmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexer data
with the `XmlLexer`.
"""
name = 'XML+Cheetah'
aliases = ['xml+cheetah', 'xml+spitfire']
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
def __init__(self, **options):
super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
**options)
class CheetahJavascriptLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexer data
with the `JavascriptLexer`.
"""
name = 'JavaScript+Cheetah'
aliases = ['js+cheetah', 'javascript+cheetah',
'js+spitfire', 'javascript+spitfire']
mimetypes = ['application/x-javascript+cheetah',
'text/x-javascript+cheetah',
'text/javascript+cheetah',
'application/x-javascript+spitfire',
'text/x-javascript+spitfire',
'text/javascript+spitfire']
def __init__(self, **options):
super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
CheetahLexer, **options)
class GenshiTextLexer(RegexLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
templates.
"""
name = 'Genshi Text'
aliases = ['genshitext']
mimetypes = ['application/x-genshi-text', 'text/x-genshi']
tokens = {
'root': [
(r'[^#\$\s]+', Other),
(r'^(\s*)(##.*)$', bygroups(Text, Comment)),
(r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
include('variable'),
(r'[#\$\s]', Other),
],
'directive': [
(r'\n', Text, '#pop'),
(r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
(r'(choose|when|with)([^\S\n]+)(.*)',
bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
(r'(choose|otherwise)\b', Keyword, '#pop'),
(r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
Name.Variable),
]
}
class GenshiMarkupLexer(RegexLexer):
"""
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
`GenshiLexer`.
"""
flags = re.DOTALL
tokens = {
'root': [
(r'[^<\$]+', Other),
(r'(<\?python)(.*?)(\?>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
include('variable'),
(r'[<\$]', Other),
],
'pytag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'pyattr': [
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
(r'[^\s>]+', String, '#pop'),
],
'tag': [
(r'\s+', Text),
(r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('"', String, 'attr-dstring'),
("'", String, 'attr-sstring'),
(r'[^\s>]*', String, '#pop')
],
'attr-dstring': [
('"', String, '#pop'),
include('strings'),
("'", String)
],
'attr-sstring': [
("'", String, '#pop'),
include('strings'),
("'", String)
],
'strings': [
('[^"\'$]+', String),
include('variable')
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
Name.Variable),
]
}
class HtmlGenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid HTML templates.
"""
name = 'HTML+Genshi'
aliases = ['html+genshi', 'html+kid']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+genshi']
def __init__(self, **options):
super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + HtmlLexer.analyse_text(text) - 0.01
class GenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid XML templates.
"""
name = 'Genshi'
aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
filenames = ['*.kid']
alias_filenames = ['*.xml']
mimetypes = ['application/x-genshi', 'application/x-kid']
def __init__(self, **options):
super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
**options)
def analyse_text(text):
rv = 0.0
if re.search('\$\{.*?\}', text) is not None:
rv += 0.2
if re.search('py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + XmlLexer.analyse_text(text) - 0.01
class JavascriptGenshiLexer(DelegatingLexer):
"""
A lexer that highlights javascript code in genshi text templates.
"""
name = 'JavaScript+Genshi Text'
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
'javascript+genshi']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+genshi',
'text/x-javascript+genshi',
'text/javascript+genshi']
def __init__(self, **options):
super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class CssGenshiLexer(DelegatingLexer):
"""
A lexer that highlights CSS definitions in genshi text templates.
"""
name = 'CSS+Genshi Text'
aliases = ['css+genshitext', 'css+genshi']
alias_filenames = ['*.css']
mimetypes = ['text/css+genshi']
def __init__(self, **options):
super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class RhtmlLexer(DelegatingLexer):
"""
Subclass of the ERB lexer that highlights the unlexed data with the
html lexer.
Nested Javascript and CSS is highlighted too.
"""
name = 'RHTML'
aliases = ['rhtml', 'html+erb', 'html+ruby']
filenames = ['*.rhtml']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+ruby']
def __init__(self, **options):
super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
class XmlErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights data outside preprocessor
directives with the `XmlLexer`.
"""
name = 'XML+Ruby'
aliases = ['xml+erb', 'xml+ruby']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+ruby']
def __init__(self, **options):
super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
"""
name = 'CSS+Ruby'
aliases = ['css+erb', 'css+ruby']
alias_filenames = ['*.css']
mimetypes = ['text/css+ruby']
def __init__(self, **options):
super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class JavascriptErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Ruby'
aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+ruby',
'text/x-javascript+ruby',
'text/javascript+ruby']
def __init__(self, **options):
super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
**options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class HtmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+PHP'
aliases = ['html+php']
filenames = ['*.phtml']
alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
'*.php[345]']
mimetypes = ['application/x-php',
'application/x-httpd-php', 'application/x-httpd-php3',
'application/x-httpd-php4', 'application/x-httpd-php5']
def __init__(self, **options):
super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
"""
name = 'XML+PHP'
aliases = ['xml+php']
alias_filenames = ['*.xml', '*.php', '*.php[345]']
mimetypes = ['application/xml+php']
def __init__(self, **options):
super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
"""
name = 'CSS+PHP'
aliases = ['css+php']
alias_filenames = ['*.css']
mimetypes = ['text/css+php']
def __init__(self, **options):
super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
def analyse_text(text):
return PhpLexer.analyse_text(text) - 0.05
class JavascriptPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the
`JavascriptLexer`.
"""
name = 'JavaScript+PHP'
aliases = ['js+php', 'javascript+php']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+php',
'text/x-javascript+php',
'text/javascript+php']
def __init__(self, **options):
super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
**options)
def analyse_text(text):
return PhpLexer.analyse_text(text)
class HtmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highighlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Smarty'
aliases = ['html+smarty']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
mimetypes = ['text/html+smarty']
def __init__(self, **options):
super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Smarty'
aliases = ['xml+smarty']
alias_filenames = ['*.xml', '*.tpl']
mimetypes = ['application/xml+smarty']
def __init__(self, **options):
super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Smarty'
aliases = ['css+smarty']
alias_filenames = ['*.css', '*.tpl']
mimetypes = ['text/css+smarty']
def __init__(self, **options):
super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class JavascriptSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Smarty'
aliases = ['js+smarty', 'javascript+smarty']
alias_filenames = ['*.js', '*.tpl']
mimetypes = ['application/x-javascript+smarty',
'text/x-javascript+smarty',
'text/javascript+smarty']
def __init__(self, **options):
super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
**options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class HtmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highighlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Django/Jinja'
aliases = ['html+django', 'html+jinja', 'htmldjango']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+django', 'text/html+jinja']
def __init__(self, **options):
super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
def __init__(self, **options):
super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
def __init__(self, **options):
super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JavascriptDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Django/Jinja'
aliases = ['js+django', 'javascript+django',
'js+jinja', 'javascript+jinja']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+django',
'application/x-javascript+jinja',
'text/x-javascript+django',
'text/x-javascript+jinja',
'text/javascript+django',
'text/javascript+jinja']
def __init__(self, **options):
super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
**options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JspRootLexer(RegexLexer):
"""
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
*New in Pygments 0.7.*
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
# FIXME: I want to make these keywords but still parse attributes.
(r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
Keyword),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
],
}
class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
*New in Pygments 0.7.*
"""
name = 'Java Server Page'
aliases = ['jsp']
filenames = ['*.jsp']
mimetypes = ['application/x-jsp']
def __init__(self, **options):
super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = JavaLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
*New in Pygments 1.1.*
"""
name = 'Evoque'
aliases = ['evoque']
filenames = ['*.evoque']
mimetypes = ['application/x-evoque']
flags = re.DOTALL
tokens = {
'root': [
(r'[^#$]+', Other),
(r'#\[', Comment.Multiline, 'comment'),
(r'\$\$', Other),
# svn keywords
(r'\$\w+:[^$\n]*\$', Comment.Multiline),
# directives: begin, end
(r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, Punctuation)),
# directives: evoque, overlay
# see doc for handling first name arg: /directives/evoque/
#+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
# directives: if, for, prefer, test
(r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
using(PythonLexer), Punctuation)),
# directive clauses (no {} expression)
(r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
# expressions
(r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
bygroups(Punctuation, None, using(PythonLexer),
Name.Builtin, None, None, Punctuation)),
(r'#', Other),
],
'comment': [
(r'[^\]#]', Comment.Multiline),
(r'#\[', Comment.Multiline, '#push'),
(r'\]#', Comment.Multiline, '#pop'),
(r'[\]#]', Comment.Multiline)
],
}
class EvoqueHtmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
*New in Pygments 1.1.*
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
filenames = ['*.html']
mimetypes = ['text/html+evoque']
def __init__(self, **options):
super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
**options)
class EvoqueXmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
*New in Pygments 1.1.*
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
filenames = ['*.xml']
mimetypes = ['application/xml+evoque']
def __init__(self, **options):
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
**options)
class ColdfusionLexer(RegexLexer):
"""
Coldfusion statements
"""
name = 'cfstatement'
aliases = ['cfs']
filenames = []
mimetypes = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'//.*', Comment),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
(r'<=|>=|<|>', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
(r'(if|else|len|var|case|default|break|switch)\b', Keyword),
(r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
(r'[()\[\]{};:,.\\]', Punctuation),
(r'\s+', Text),
],
'string': [
(r'""', String.Double),
(r'#.+?#', String.Interp),
(r'[^"#]+', String.Double),
(r'#', String.Double),
(r'"', String.Double, '#pop'),
],
}
class ColdfusionMarkupLexer(RegexLexer):
"""
Coldfusion markup only
"""
name = 'Coldfusion'
aliases = ['cf']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'[^<]+', Other),
include('tags'),
(r'<[^<>]*', Other),
],
'tags': [
(r'(?s)<!---.*?--->', Comment.Multiline),
(r'(?s)<!--.*?-->', Comment),
(r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
(r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
# negative lookbehind is for strings with embedded >
(r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
r'mailpart|mail|header|content|zip|image|lock|argument|try|'
r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
],
'cfoutput': [
(r'[^#<]+', Other),
(r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
Punctuation)),
#(r'<cfoutput.*?>', Name.Builtin, '#push'),
(r'</cfoutput.*?>', Name.Builtin, '#pop'),
include('tags'),
(r'(?s)<[^<>]*', Other),
(r'#', Other),
],
}
class ColdfusionHtmlLexer(DelegatingLexer):
"""
Coldfusion markup in html
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
filenames = ['*.cfm', '*.cfml', '*.cfc']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
**options)
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
*New in Pygments 1.4.*
"""
name = 'Scalate Server Page'
aliases = ['ssp']
filenames = ['*.ssp']
mimetypes = ['application/x-ssp']
def __init__(self, **options):
super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search('val \w+\s*:', text):
rv += 0.6
if looks_like_xml(text):
rv += 0.2
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class TeaTemplateRootLexer(RegexLexer):
"""
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
*New in Pygments 1.5.*
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
],
}
class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
*New in Pygments 1.5.*
"""
name = 'Tea'
aliases = ['tea']
filenames = ['*.tea']
mimetypes = ['text/x-tea']
def __init__(self, **options):
super(TeaTemplateLexer, self).__init__(XmlLexer,
TeaTemplateRootLexer, **options)
def analyse_text(text):
rv = TeaLangLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class LassoHtmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`HtmlLexer`.
Nested JavaScript and CSS is also highlighted.
*New in Pygments 1.6.*
"""
name = 'HTML+Lasso'
aliases = ['html+lasso']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['text/html+lasso',
'application/x-httpd-lasso',
'application/x-httpd-lasso[89]']
def __init__(self, **options):
super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if re.search(r'<\w+>', text, re.I):
rv += 0.2
if html_doctype_matches(text):
rv += 0.5
return rv
class LassoXmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
*New in Pygments 1.6.*
"""
name = 'XML+Lasso'
aliases = ['xml+lasso']
alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['application/xml+lasso']
def __init__(self, **options):
super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class LassoCssLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
*New in Pygments 1.6.*
"""
name = 'CSS+Lasso'
aliases = ['css+lasso']
alias_filenames = ['*.css']
mimetypes = ['text/css+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if re.search(r'\w+:.+?;', text):
rv += 0.1
if 'padding:' in text:
rv += 0.1
return rv
class LassoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
*New in Pygments 1.6.*
"""
name = 'JavaScript+Lasso'
aliases = ['js+lasso', 'javascript+lasso']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+lasso',
'text/x-javascript+lasso',
'text/javascript+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer,
**options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if 'function' in text:
rv += 0.2
return rv
|
RydrDojo/Ridr
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/mako/ext/autohandler.py
|
61
|
# ext/autohandler.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""adds autohandler functionality to Mako templates.
requires that the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context, name='somefilename')}"/>
"""
import posixpath
import os
import re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '', path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False
|
ruanima/flasky-test
|
refs/heads/master
|
app/main/__init__.py
|
1
|
#coding=utf-8
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
from ..models import Permission
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
|
eugene1g/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/openbugs.py
|
124
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import sys
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class OpenBugs(Command):
name = "open-bugs"
help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser"
bug_number_regexp = re.compile(r"\b\d{4,6}\b")
def _open_bugs(self, bug_ids):
for bug_id in bug_ids:
bug_url = self._tool.bugs.bug_url_for_bug_id(bug_id)
self._tool.user.open_url(bug_url)
# _find_bugs_in_string mostly exists for easy unit testing.
def _find_bugs_in_string(self, string):
return self.bug_number_regexp.findall(string)
def _find_bugs_in_iterable(self, iterable):
return sum([self._find_bugs_in_string(string) for string in iterable], [])
def execute(self, options, args, tool):
if args:
bug_ids = self._find_bugs_in_iterable(args)
else:
# This won't open bugs until stdin is closed but could be made to easily. That would just make unit testing slightly harder.
bug_ids = self._find_bugs_in_iterable(sys.stdin)
_log.info("%s bugs found in input." % len(bug_ids))
self._open_bugs(bug_ids)
|
SchrodingersGat/kicad-footprint-generator
|
refs/heads/master
|
scripts/Connector/Connector_Molex/conn_molex_slimstack-502430.py
|
1
|
#!/usr/bin/env python3
# KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2016 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
"""
This family of parts is spread over 3 datasheets, depending on the 3rd number in the PN suffix:
502340-xx10 (14-80 pin):
http://www.molex.com/pdm_docs/sd/5024301410_sd.pdf
502340-0820 (8 pin)
http://www.molex.com/pdm_docs/sd/5024300820_sd.pdf
502340-xx30 (14-90 pin):
http://www.molex.com/pdm_docs/sd/5024307030_sd.pdf
"""
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "SlimStack"
series_long = 'SlimStack Fine-Pitch SMT Board-to-Board Connectors'
manufacturer = 'Molex'
orientation = 'V'
number_of_rows = 2
#pins_per_row per row
valid_pns = [
"1410","2010","2210","2410","2610","3010","3210","3410","4010","4410","5010","6010","6410","8010",
"0820",
#"1430","2030","2230","2430","2630","3230","4030","5030","6030","7030","8030","9030"
]
#Molex part number
#n = number of circuits per row
part_code = "502430-{pn:s}"
pitch = 0.4
def generate_one_footprint(partnumber, configuration):
pincount = int(partnumber[:2])
if str(partnumber)[2:3] == "1":
datasheet = "http://www.molex.com/pdm_docs/sd/5024301410_sd.pdf"
elif str(partnumber)[2:3] == "2":
datasheet = "http://www.molex.com/pdm_docs/sd/5024300820_sd.pdf"
elif str(partnumber)[2:3] == "3":
datasheet = "http://www.molex.com/pdm_docs/sd/5024307030_sd.pdf"
mpn = part_code.format(pn=partnumber)
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pincount//2, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("Molex {:s}, {:s}, {:d} Pins ({:s}), generated with kicad-footprint-generator".format(series_long, mpn, pincount, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
kicad_mod.setAttribute('smd')
# calculate working values
pad_x_spacing = pitch
pad_y_spacing = 1.05 + 0.66
pad_width = 0.22
pad_height = 0.66
pad_x_span = pad_x_spacing * ((pincount / 2) - 1)
nail_x = pad_x_span / 2.0 + 0.95
half_body_width = 1.54 / 2.0
half_body_length = (pad_x_span / 2.0) + 1.33
fab_width = configuration['fab_line_width']
outline_x = half_body_length - (pad_x_span / 2.0) - pad_width/2 - (configuration['silk_pad_clearance'] + configuration['silk_line_width']/2)
marker_y = 0.35
silk_width = configuration['silk_line_width']
nudge = configuration['silk_fab_offset']
courtyard_width = configuration['courtyard_line_width']
courtyard_precision = configuration['courtyard_grid']
courtyard_clearance = configuration['courtyard_offset']['connector']
courtyard_x = roundToBase(half_body_length + courtyard_clearance, courtyard_precision)
courtyard_y = roundToBase((pad_y_spacing + pad_height) / 2.0 + courtyard_clearance, courtyard_precision)
# create pads
kicad_mod.append(PadArray(pincount=pincount//2, x_spacing=pad_x_spacing, y_spacing=0,center=[0,-pad_y_spacing/2.0],\
initial=1, increment=2, type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, size=[pad_width, pad_height],layers=Pad.LAYERS_SMT))
kicad_mod.append(PadArray(pincount=pincount//2, x_spacing=pad_x_spacing, y_spacing=0,center=[0,pad_y_spacing/2.0],\
initial=2, increment=2, type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, size=[pad_width, pad_height],layers=Pad.LAYERS_SMT))
# create "fitting nail" (npth mounting) holes
#kicad_mod.append(Pad(at=[-nail_x, 0], type=Pad.TYPE_NPTH, shape=Pad.SHAPE_RECT, size=[0.35, 0.44], drill=[0.35, 0.44], layers=['*.Cu', '*.Mask']))
#kicad_mod.append(Pad(at=[nail_x, 0], type=Pad.TYPE_NPTH, shape=Pad.SHAPE_RECT, size=[0.35, 0.44], drill=[0.35, 0.44], layers=['*.Cu', '*.Mask']))
kicad_mod.append(RectLine(start=[-nail_x - 0.35 / 2.0, -0.22], end=[-nail_x + 0.35 / 2.0, 0.22], layer='Edge.Cuts', width=fab_width))
kicad_mod.append(RectLine(start=[nail_x - 0.35 / 2.0, -0.22], end=[nail_x + 0.35 / 2.0, 0.22], layer='Edge.Cuts', width=fab_width))
# create fab outline and pin 1 marker
kicad_mod.append(RectLine(start=[-half_body_length, -half_body_width], end=[half_body_length, half_body_width], layer='F.Fab', width=fab_width))
body_edge={
'left':-half_body_length,
'top':-half_body_width
}
body_edge['right'] = -body_edge['left']
body_edge['bottom'] = -body_edge['top']
kicad_mod.append(Line(start=[-half_body_length+outline_x, -half_body_width], end=[-half_body_length+outline_x, -half_body_width-marker_y], layer='F.Fab', width=fab_width))
# create silkscreen outline and pin 1 marker
left_outline = [[-half_body_length+outline_x, half_body_width+nudge], [-half_body_length-nudge, half_body_width+nudge], [-half_body_length-nudge, -half_body_width-nudge],\
[-half_body_length+outline_x, -half_body_width-nudge], [-half_body_length+outline_x, -half_body_width-marker_y]]
right_outline = [[half_body_length-outline_x, half_body_width+nudge], [half_body_length+nudge, half_body_width+nudge], [half_body_length+nudge, -half_body_width-nudge],\
[half_body_length-outline_x, -half_body_width-nudge]]
kicad_mod.append(PolygoneLine(polygone=left_outline, layer='F.SilkS', width=silk_width))
kicad_mod.append(PolygoneLine(polygone=right_outline, layer='F.SilkS', width=silk_width))
# create courtyard
kicad_mod.append(RectLine(start=[-courtyard_x, -courtyard_y], end=[courtyard_x, courtyard_y], layer='F.CrtYd', width=courtyard_width))
######################### Text Fields ###############################
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge,
courtyard={'top':-courtyard_y, 'bottom':+courtyard_y},
fp_name=footprint_name, text_y_inside_position='center')
##################### Output and 3d model ############################
model3d_path_prefix = configuration.get('3d_model_prefix','${KISYS3DMOD}/')
lib_name = configuration['lib_name_format_string'].format(series=series, man=manufacturer)
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name, fp_name=footprint_name)
kicad_mod.append(Model(filename=model_name))
output_dir = '{lib_name:s}.pretty/'.format(lib_name=lib_name)
if not os.path.isdir(output_dir): #returns false if path does not yet exist!! (Does not check path validity)
os.makedirs(output_dir)
filename = '{outdir:s}{fp_name:s}.kicad_mod'.format(outdir=output_dir, fp_name=footprint_name)
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='use confing .yaml files to create footprints.')
parser.add_argument('--global_config', type=str, nargs='?', help='the config file defining how the footprint will look like. (KLC)', default='../../tools/global_config_files/config_KLCv3.0.yaml')
parser.add_argument('--series_config', type=str, nargs='?', help='the config file defining series parameters.', default='../conn_config_KLCv3.yaml')
args = parser.parse_args()
with open(args.global_config, 'r') as config_stream:
try:
configuration = yaml.load(config_stream)
except yaml.YAMLError as exc:
print(exc)
with open(args.series_config, 'r') as config_stream:
try:
configuration.update(yaml.load(config_stream))
except yaml.YAMLError as exc:
print(exc)
for partnumber in valid_pns:
generate_one_footprint(partnumber, configuration)
|
LeeKamentsky/CellProfiler
|
refs/heads/master
|
cellprofiler/modules/tests/test_makeprojection.py
|
2
|
'''test_makeprojection - Test the MakeProjection module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
'''
import base64
from matplotlib.image import pil_to_array
import numpy as np
import os
import PIL.Image as PILImage
import scipy.ndimage
from StringIO import StringIO
import unittest
import zlib
from cellprofiler.preferences import set_headless
set_headless()
import cellprofiler.pipeline as cpp
import cellprofiler.cpmodule as cpm
import cellprofiler.cpimage as cpi
import cellprofiler.measurements as cpmeas
import cellprofiler.objects as cpo
import cellprofiler.workspace as cpw
import cellprofiler.modules.makeprojection as M
IMAGE_NAME = 'image'
PROJECTED_IMAGE_NAME = 'projectedimage'
class TestMakeProjection(unittest.TestCase):
def test_01_01_load_matlab(self):
data = ('eJzzdQzxcXRSMNUzUPB1DNFNy8xJ1VEIyEksScsvyrVSCHAO9/TTUXAuSk0s'
'SU1RyM+zUgjJKFXwKs1RMDRTMLC0MjSxMjZXMDIwsFQgGTAwevryMzAw/Gdk'
'YKiY8zbc2/+Qg4BcBuNThwmT2fltqz6K6ssdTFDqVBB0SnqU2sgaflvvZbNI'
'/Uz3+lm7L1zaxNT9dKEh38ycNOvPb88cP1c2m5lBn31C2mO5yyo+Kk9vd3YU'
'7W6UbZ9Ra82qJfPrs7xP9AmlFWsfi12KzTkc0vSz6+bisxcTfq3w2r2uL/tE'
'h5Xxyp1u0tHfavU5vshf72z/ylZ52EC78TaznNDsgMv93z8evfly1xXBa6ki'
'B6rVnigqflhgoOvybGe9oFN9KV/+z476e9fVvs2ZLM1fKnPWwe/5zMdzvAum'
'SMqwntoqlGPsN7czeGHMqvCKO1NV9JSvnH57SSB6Rb9iXo1o5ZGC3q2vdL0e'
'bTq066ZBPp/hNNNP+9NkBa37ja76vMpY13vYJk/VgpWx/Xa5SOnWroNem0yT'
'7zDfPnw7ZO6jH/27Y2Mi61mtDvoeeNr3efLby8yM028feTNJ8eUuj+snKraf'
'Oxi79d8TnjqhrBJjm3nHnhTGr5h+u5a79w0f1y3DsLpHlr9ORPz23Hek5oyx'
'iXi7tV51vfvPqPL9febB9xe9S/hs0e0m+W/Pb7eO9RvDDjTf79j8tip1z7+d'
'X4W6fzu8Wb7j97T9/7UnMpeKzpnTcPitVtXR0u59D/oOv3s5+2jnPO1MTn7P'
'NNEQ02s/axk/XvPWPDW9eqmO39faeX1Rb57Xbz/w/d/7x6r/Gt+c+i/ct++O'
'NwB/3SPw')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 3)
#
# Module 2 - image name = DNA, projection_image_name = AverageDNA
# projection_type = Average
#
# Module 3 - image name = DNA, projection_image_name = MaxBlue
# projection_type = Maximum
#
for i,projection_image_name, projection_type\
in ((1,"AverageDNA",M.P_AVERAGE),
(2,"MaxBlue", M.P_MAXIMUM)):
module = pipeline.modules()[i]
self.assertTrue(isinstance(module, M.MakeProjection))
self.assertEqual(module.image_name.value, "DNA")
self.assertEqual(module.projection_image_name.value,
projection_image_name)
self.assertEqual(module.projection_type, projection_type)
def test_01_02_load_v1(self):
data = ('eJztWd1OGkEUHn60WtuGXrVJb+ZSWtgsaBskjYLStLSiRImNMbYdYYBpd3fI'
'MGuljUkfq5d9lD5CH6EzuAvsCiwgupqwyQTP2fPNN+c7O7szYyFb2s5uwpeK'
'CgvZUrxKNAyLGuJVyvQ0NHgMbjGMOK5AaqRhqW7C96YGE6+gmkqvpNKrKkyq'
'6hqY7ArkC4/Ej/oMgHnxuyBa0Lo1Z9mBnibtfcw5MWrNORAGTy3/H9EOECPo'
'RMMHSDNxs0th+/NGlZZajc6tAq2YGt5Bem+wuHZM/QSz5m7VBlq3i+QMa/vk'
'B3alYIft4VPSJNSw8Fb/bm+Hl3IXr9Th71JXh4BLh5BokR6/jH8HuvHhPro9'
'7omPWDYxKuSUVEykQaKjWmcUsr+UR39zrv6kvUU1ykbE33Phpb3LSG0PV9r4'
'jAc+4sLLVsJnPP7mDJU51BEv1yfNY7+hEX6FPLKnmAk523jVAx9w4ANgZUTe'
'BeDkXbD0e8tQaxT9Hrrw0i4y+hWXuXhGZRXASHW47+pH2jkKDcqh2bQmyCg6'
'hBz9hEBCUS/h5l04+7Jxi2B0vqCDLwh2qD/j9Jq3T4BTX2nncBWZGod5OWlh'
'jjBRNMpavus8zfq4x3ko3k5+8F31PeanPpM+9zcxzzIeuEXg1FXaF+83jI0B'
'/NepLydVX/S97u/woO/Iplg6Xae+7u9eog/uNryHwo5xhkU9janpMg7ul8c4'
'PwBnHaX9aXmj+Fou4PG68iL6WVofsabt0e/rR9l48Thqe8QDY+rG+pEaXzv+'
'mYglzy+C94lAtp3RK49/UlzdI++UK29py7EfYsSshFbPo3HpKlCD1y1f0vLl'
'UKvr8SO/5THX+ZPyZDx07LeOa28Kaoyajen34+c8vwlcBgzXqd/+pasTFFsz'
'3JhmP3dFtxluhvNjnt2VfGf63k5cBkxHp2n1c1d0m+FmuLH2A4HB62X3vl3G'
'fwHD59Nz4JxP0i6LLVGDUfn/D6bo7UP6pqJRVLk4JVe2xZ/5ngPzUXhiLp7Y'
'IJ6y3LxzWmOopbQ38iUqz3Q7+XvwJF08yUE8OvqGG50DX6UgzO75b/86Lfbh'
'69U7KKzIg9DQ+gLgrGu33v82JuELBQKXzjmWPHDhnjHZef4G4z1Xy0Pi7Rxv'
'Kv4/DzlPxw==')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
module = pipeline.modules()[2]
self.assertTrue(isinstance(module, M.MakeProjection))
self.assertEqual(module.image_name.value, "OrigRed")
self.assertEqual(module.projection_image_name.value, "ProjectionRed")
self.assertEqual(module.projection_type, M.P_AVERAGE)
def test_01_03_load_v2(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10000
MakeProjection:[module_num:1|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Average
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:2|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Maximum
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:3|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Minimum
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:4|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Sum
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:5|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Variance
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:6|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Power
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
MakeProjection:[module_num:7|svn_version:\'9999\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D]
Select the input image:ch02
Type of projection:Brightfield
Name the output image:ProjectionCh00Scale6
Frequency\x3A:6
"""
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.load(StringIO(data))
methods = (M.P_AVERAGE, M.P_MAXIMUM, M.P_MINIMUM, M.P_SUM, M.P_VARIANCE,
M.P_POWER, M.P_BRIGHTFIELD)
self.assertEqual(len(pipeline.modules()), len(methods))
for method, module in zip(methods, pipeline.modules()):
self.assertTrue(isinstance(module, M.MakeProjection))
self.assertEqual(module.image_name, "ch02")
self.assertEqual(module.projection_type, method)
self.assertEqual(module.projection_image_name, "ProjectionCh00Scale6")
self.assertEqual(module.frequency, 6)
def run_image_set(self, projection_type, images_and_masks,
frequency=9, run_last = True):
image_set_list = cpi.ImageSetList()
image_count = len(images_and_masks)
for i in range(image_count):
pixel_data, mask = images_and_masks[i]
if mask is None:
image = cpi.Image(pixel_data)
else:
image = cpi.Image(pixel_data, mask)
image_set_list.get_image_set(i).add(IMAGE_NAME, image)
#
# Add bogus image at end for 2nd group
#
bogus_image = cpi.Image(np.zeros((10,20)))
image_set_list.get_image_set(image_count).add(IMAGE_NAME, bogus_image)
pipeline = cpp.Pipeline()
module = M.MakeProjection()
module.module_num = 1
module.image_name.value = IMAGE_NAME
module.projection_image_name.value = PROJECTED_IMAGE_NAME
module.projection_type.value = projection_type
module.frequency.value = frequency
pipeline.add_module(module)
m = cpmeas.Measurements()
workspace = cpw.Workspace(pipeline, module, None, None, m, image_set_list)
module.prepare_run(workspace)
module.prepare_group(workspace, {}, range(1,len(images_and_masks)+1))
for i in range(image_count):
if i > 0:
image_set_list.purge_image_set(i-1)
w = cpw.Workspace(pipeline, module,
image_set_list.get_image_set(i),
cpo.ObjectSet(),
m,
image_set_list)
if i < image_count - 1 or run_last:
module.run(w)
module.post_group(w, {})
image = w.image_set.get_image(PROJECTED_IMAGE_NAME)
#
# Make sure that the image provider is reset after prepare_group
#
module.prepare_group(workspace, {}, [image_count+1])
image_set = image_set_list.get_image_set(image_count)
w = cpw.Workspace(pipeline, module,
image_set,
cpo.ObjectSet(),
m,
image_set_list)
module.run(w)
image_provider = image_set.get_image_provider(PROJECTED_IMAGE_NAME)
self.assertEqual(np.max(image_provider.count), 1)
return image
def test_02_01_average(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10)).astype(np.float32), None)
for i in range(3)]
expected = np.zeros((10,10), np.float32)
for image, mask in images_and_masks:
expected += image
expected = expected / len(images_and_masks)
image = self.run_image_set(M.P_AVERAGE, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_02_02_average_mask(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(100,100)).astype(np.float32),
np.random.uniform(size=(100,100)) > .3)
for i in range(3)]
expected = np.zeros((100,100), np.float32)
expected_count = np.zeros((100,100), np.float32)
expected_mask = np.zeros((100,100), bool)
for image, mask in images_and_masks:
expected[mask] += image[mask]
expected_count[mask] += 1
expected_mask = mask | expected_mask
expected = expected / expected_count
image = self.run_image_set(M.P_AVERAGE, images_and_masks)
self.assertTrue(image.has_mask)
self.assertTrue(np.all(expected_mask == image.mask))
np.testing.assert_almost_equal(image.pixel_data[image.mask],
expected[expected_mask])
def test_02_03_average_color(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10,3)).astype(np.float32), None)
for i in range(3)]
expected = np.zeros((10,10,3), np.float32)
for image, mask in images_and_masks:
expected += image
expected = expected / len(images_and_masks)
image = self.run_image_set(M.P_AVERAGE, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_02_04_average_masked_color(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10,3)).astype(np.float32),
np.random.uniform(size=(10,10)) > .3)
for i in range(3)]
expected = np.zeros((10, 10, 3))
expected_count = np.zeros((10, 10), np.float32)
expected_mask = np.zeros((10, 10), bool)
for image, mask in images_and_masks:
expected[mask, :] += image[mask, :]
expected_count[mask] += 1
expected_mask = mask | expected_mask
expected = expected / expected_count[:, :, np.newaxis]
image = self.run_image_set(M.P_AVERAGE, images_and_masks)
self.assertTrue(image.has_mask)
np.testing.assert_equal(image.mask, expected_mask)
np.testing.assert_almost_equal(image.pixel_data[expected_mask],
expected[expected_mask])
def test_03_01_maximum(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10)).astype(np.float32), None)
for i in range(3)]
expected = np.zeros((10,10), np.float32)
for image, mask in images_and_masks:
expected = np.maximum(expected,image)
image = self.run_image_set(M.P_MAXIMUM, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_03_02_maximum_mask(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(100,100)).astype(np.float32),
np.random.uniform(size=(100,100)) > .3)
for i in range(3)]
expected = np.zeros((100,100), np.float32)
expected_mask = np.zeros((100,100), bool)
for image, mask in images_and_masks:
expected[mask] = np.maximum(expected[mask],image[mask])
expected_mask = mask | expected_mask
image = self.run_image_set(M.P_MAXIMUM, images_and_masks)
self.assertTrue(image.has_mask)
self.assertTrue(np.all(expected_mask == image.mask))
self.assertTrue(np.all(np.abs(image.pixel_data[image.mask] -
expected[expected_mask]) <
np.finfo(float).eps))
def test_03_03_maximum_color(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10,3)).astype(np.float32), None)
for i in range(3)]
expected = np.zeros((10,10,3), np.float32)
for image, mask in images_and_masks:
expected = np.maximum(expected, image)
image = self.run_image_set(M.P_MAXIMUM, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_04_01_variance(self):
np.random.seed(41)
images_and_masks = [(np.random.uniform(size=(20,10)).astype(np.float32), None)
for i in range(10)]
image = self.run_image_set(M.P_VARIANCE, images_and_masks)
images = np.array([x[0] for x in images_and_masks])
x = np.sum(images, 0)
x2 = np.sum(images**2, 0)
expected = x2 / 10.0 - x**2 / 100.0
np.testing.assert_almost_equal(image.pixel_data, expected, 4)
def test_05_01_power(self):
image = np.ones((20,10))
images_and_masks = [(image.copy(), None) for i in range(9)]
for i, (img, _) in enumerate(images_and_masks):
img[5,5] *= np.sin(2*np.pi * float(i) / 9.0)
image_out = self.run_image_set(M.P_POWER, images_and_masks, frequency=9)
i,j=np.mgrid[:image.shape[0],:image.shape[1]]
np.testing.assert_almost_equal(image_out.pixel_data[(i != 5) & (j != 5)], 0)
self.assertTrue(image_out.pixel_data[5,5] > 1)
def test_06_01_brightfield(self):
image = np.ones((20,10))
images_and_masks = [(image.copy(), None) for i in range(9)]
for i, (img, _) in enumerate(images_and_masks):
if i < 5:
img[:5,:5] = 0
else:
img[:5,5:] = 0
image_out = self.run_image_set(M.P_BRIGHTFIELD, images_and_masks)
i,j=np.mgrid[:image.shape[0],:image.shape[1]]
np.testing.assert_almost_equal(image_out.pixel_data[(i > 5) | (j < 5)], 0)
np.testing.assert_almost_equal(image_out.pixel_data[(i < 5) & (j >= 5)], 1)
def test_07_01_minimum(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10)).astype(np.float32), None)
for i in range(3)]
expected = np.ones((10,10), np.float32)
for image, mask in images_and_masks:
expected = np.minimum(expected,image)
image = self.run_image_set(M.P_MINIMUM, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_07_02_minimum_mask(self):
np.random.seed(72)
images_and_masks = [(np.random.uniform(size=(100,100)).astype(np.float32),
np.random.uniform(size=(100,100)) > .3)
for i in range(3)]
expected = np.ones((100,100), np.float32)
expected_mask = np.zeros((100,100), bool)
for image, mask in images_and_masks:
expected[mask] = np.minimum(expected[mask],image[mask])
expected_mask = mask | expected_mask
image = self.run_image_set(M.P_MINIMUM, images_and_masks)
self.assertTrue(image.has_mask)
self.assertTrue(np.any(image.mask == False))
self.assertTrue(np.all(expected_mask == image.mask))
self.assertTrue(np.all(np.abs(image.pixel_data[image.mask] -
expected[expected_mask]) <
np.finfo(float).eps))
self.assertTrue(np.all(image.pixel_data[~image.mask] == 0))
def test_07_03_minimum_color(self):
np.random.seed(0)
images_and_masks = [(np.random.uniform(size=(10,10,3)).astype(np.float32), None)
for i in range(3)]
expected = np.ones((10,10,3), np.float32)
for image, mask in images_and_masks:
expected = np.minimum(expected, image)
image = self.run_image_set(M.P_MINIMUM, images_and_masks)
self.assertFalse(image.has_mask)
self.assertTrue(np.all(np.abs(image.pixel_data - expected) <
np.finfo(float).eps))
def test_08_01_mask_unmasked(self):
np.random.seed (81)
images_and_masks = [ (np.random.uniform(size=(10,10)), None)
for i in range(3)]
image = self.run_image_set(M.P_MASK, images_and_masks)
self.assertEqual(tuple(image.pixel_data.shape), (10,10))
self.assertTrue(np.all(image.pixel_data == True))
self.assertFalse(image.has_mask)
def test_08_02_mask(self):
np.random.seed (81)
images_and_masks = [ (np.random.uniform(size=(10,10)),
np.random.uniform(size=(10,10)) > .3)
for i in range(3)]
expected = np.ones((10,10), bool)
for _, mask in images_and_masks:
expected = expected & mask
image = self.run_image_set(M.P_MASK, images_and_masks)
self.assertTrue(np.all(image.pixel_data == expected))
def test_09_02_filtered(self):
'''Make sure the image shows up in the image set even if filtered
This is similar to issue # 310 - the last image may be filtered before
the projection is done and the aggregate image is then missing
from the image set.
'''
np.random.seed (81)
images_and_masks = [ (np.random.uniform(size=(10,10)), None)
for i in range(3)]
image = self.run_image_set(M.P_AVERAGE, images_and_masks,
run_last=False)
np.testing.assert_array_almost_equal(
image.pixel_data,
(images_and_masks[0][0] + images_and_masks[1][0]) / 2)
|
h4r5h1t/django-hauthy
|
refs/heads/hauthy
|
django/contrib/gis/shortcuts.py
|
197
|
import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
|
samchrisinger/osf.io
|
refs/heads/develop
|
api/wikis/urls.py
|
23
|
from django.conf.urls import url
from api.wikis import views
urlpatterns = [
url(r'^(?P<wiki_id>\w+)/$', views.WikiDetail.as_view(), name=views.WikiDetail.view_name),
url(r'^(?P<wiki_id>\w+)/content/$', views.WikiContent.as_view(), name=views.WikiContent.view_name),
]
|
xubayer786/namebench
|
refs/heads/master
|
libnamebench/nameserver_list.py
|
173
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to work with bunches of nameservers."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import operator
import Queue
import random
import sys
import threading
import time
# 3rd party libraries
import dns.resolver
import conn_quality
import addr_util
import nameserver
import util
NS_CACHE_SLACK = 2
CACHE_VER = 4
PREFERRED_HEALTH_TIMEOUT_MULTIPLIER = 1.5
SYSTEM_HEALTH_TIMEOUT_MULTIPLIER = 2
TOO_DISTANT_MULTIPLIER = 4.75
DEFAULT_MAX_SERVERS_TO_CHECK = 350
# If we can't ping more than this, go into slowmode.
MIN_PINGABLE_PERCENT = 5
MIN_HEALTHY_PERCENT = 10
SLOW_MODE_THREAD_COUNT = 6
# Windows behaves in unfortunate ways if too many threads are specified
DEFAULT_THREAD_COUNT = 35
MAX_INITIAL_HEALTH_THREAD_COUNT = 35
class OutgoingUdpInterception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TooFewNameservers(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ThreadFailure(Exception):
def __init__(self):
pass
class QueryThreads(threading.Thread):
"""Quickly see which nameservers are awake."""
def __init__(self, input_queue, results_queue, action_type, checks=None):
threading.Thread.__init__(self)
self.input = input_queue
self.action_type = action_type
self.results = results_queue
self.checks = checks
self.halt = False
def stop(self):
self.halt = True
def run(self):
"""Iterate over the queue, processing each item."""
while not self.halt and not self.input.empty():
# check_wildcards is special: it has a tuple of two nameservers
if self.action_type == 'wildcard_check':
try:
(ns, other_ns) = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled or other_ns.is_disabled:
self.results.put(None)
continue
else:
self.results.put((ns, ns.TestSharedCache(other_ns)))
# everything else only has a single nameserver.
else:
try:
ns = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled:
self.results.put(None)
continue
if self.action_type == 'ping':
self.results.put(ns.CheckHealth(fast_check=True))
elif self.action_type == 'health':
self.results.put(ns.CheckHealth(sanity_checks=self.checks))
elif self.action_type == 'final':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, final_check=True))
elif self.action_type == 'port_behavior':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, port_check=True))
elif self.action_type == 'censorship':
self.results.put(ns.CheckCensorship(self.checks))
elif self.action_type == 'store_wildcards':
self.results.put(ns.StoreWildcardCache())
elif self.action_type == 'node_id':
self.results.put(ns.UpdateNodeIds())
elif self.action_type == 'update_hostname':
self.results.put(ns.UpdateHostname())
else:
raise ValueError('Invalid action type: %s' % self.action_type)
class NameServers(list):
def __init__(self, thread_count=DEFAULT_THREAD_COUNT, max_servers_to_check=DEFAULT_MAX_SERVERS_TO_CHECK):
self._ips = set()
self.thread_count = thread_count
super(NameServers, self).__init__()
self.client_latitude = None
self.client_longitude = None
self.client_country = None
self.client_domain = None
self.client_asn = None
self.max_servers_to_check = max_servers_to_check
@property
def visible_servers(self):
return [x for x in self if not x.is_hidden]
@property
def enabled_servers(self):
return [x for x in self.visible_servers if not x.is_disabled]
@property
def disabled_servers(self):
return [x for x in self.visible_servers if x.is_disabled]
@property
def enabled_keepers(self):
return [x for x in self.enabled_servers if x.is_keeper]
@property
def enabled_supplemental(self):
return [x for x in self.enabled_servers if not x.is_keeper]
@property
def supplemental_servers(self):
return [x for x in self if not x.is_keeper]
@property
def country_servers(self):
return [x for x in self if x.country_code == self.client_country]
# Return a list of servers that match a particular tag
def HasTag(self, tag):
return [x for x in self if x.HasTag(tag)]
# Return a list of servers that match a particular tag
def HasVisibleTag(self, tag):
return [x for x in self.visible_servers if x.HasTag(tag)]
def SortEnabledByFastest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('check_average'))
def SortEnabledByNearest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('fastest_check_duration'))
def msg(self, msg, count=None, total=None, **kwargs):
if self.status_callback:
self.status_callback(msg, count=count, total=total, **kwargs)
else:
print '%s [%s/%s]' % (msg, count, total)
def _GetObjectForIP(self, ip):
return [x for x in self if x.ip == ip][0]
def _MergeNameServerData(self, ns):
existing = self._GetObjectForIP(ns.ip)
existing.tags.update(ns.tags)
if ns.system_position is not None:
existing.system_position = ns.system_position
elif ns.dhcp_position is not None:
existing.dhcp_position = ns.dhcp_position
def append(self, ns):
"""Add a nameserver to the list, guaranteeing uniqueness."""
if ns.ip in self._ips:
self._MergeNameServerData(ns)
else:
super(NameServers, self).append(ns)
self._ips.add(ns.ip)
def SetTimeouts(self, timeout, ping_timeout, health_timeout):
if len(self.enabled_servers) > 1:
cq = conn_quality.ConnectionQuality(status_callback=self.status_callback)
(intercepted, avg_latency, max_latency) = cq.CheckConnectionQuality()[0:3]
if intercepted:
raise OutgoingUdpInterception(
'Your router or Internet Service Provider appears to be intercepting '
'and redirecting all outgoing DNS requests. This means you cannot '
'benchmark or utilize alternate DNS servers. Please adjust your '
'router configuration or file a support request with your ISP.'
)
if (max_latency * 2) > health_timeout:
health_timeout = max_latency * 2
self.msg('Set health timeout to %.2fs' % health_timeout)
if (max_latency * 1.1) > ping_timeout:
ping_timeout = avg_latency * 1.4
self.msg('Set ping timeout to %.2fs' % ping_timeout)
for ns in self:
ns.timeout = timeout
ns.ping_timeout = ping_timeout
ns.health_timeout = health_timeout
def SetClientLocation(self, latitude, longitude, client_country):
self.client_latitude = latitude
self.client_longitude = longitude
self.client_country = client_country
def SetNetworkLocation(self, domain, asn):
self.client_domain = domain
self.client_asn = asn
def FilterByTag(self, include_tags=None, require_tags=None):
for ns in self:
if include_tags:
if not ns.MatchesTags(include_tags):
ns.tags.add('hidden')
if require_tags:
for tag in require_tags:
if not ns.HasTag(tag):
ns.tags.add('hidden')
if not self.enabled_servers:
raise TooFewNameservers('No nameservers specified matched tags %s %s' % (include_tags, require_tags))
if require_tags:
self.msg("%s of %s nameservers have tags: %s (%s required)" %
(len(self.visible_servers), len(self), ', '.join(include_tags),
', '.join(require_tags)))
else:
self.msg("%s of %s nameservers have tags: %s" %
(len(self.visible_servers), len(self), ', '.join(include_tags)))
def HasEnoughInCountryServers():
return len(self.country_servers) > self.max_servers_to_check
def NearbyServers(self, max_distance):
srv_by_dist = sorted([(x.DistanceFromCoordinates(self.client_latitude, self.client_longitude), x)
for x in self.HasVisibleTag('regional')], key=operator.itemgetter(0))
for distance, ns in srv_by_dist:
if distance < float(max_distance):
yield ns
def AddNetworkTags(self):
"""Add network tags for each nameserver."""
if self.client_domain:
provider = self.client_domain.split('.')[0]
else:
provider = None
for ns in self:
ns.AddNetworkTags(self.client_domain, provider, self.client_asn, self.client_country)
def AddLocalityTags(self, max_distance):
if self.client_latitude:
count = 0
for ns in self.NearbyServers(max_distance):
count += 1
if count > self.max_servers_to_check:
break
ns.tags.add('nearby')
def DisableSlowestSupplementalServers(self, multiplier=TOO_DISTANT_MULTIPLIER, max_servers=None,
prefer_asn=None):
"""Disable servers who's fastest duration is multiplier * average of best 10 servers."""
if not max_servers:
max_servers = self.max_servers_to_check
supplemental_servers = self.enabled_supplemental
fastest = [x for x in self.SortEnabledByFastest()][:10]
best_10 = util.CalculateListAverage([x.fastest_check_duration for x in fastest])
cutoff = best_10 * multiplier
self.msg("Removing secondary nameservers slower than %0.2fms (max=%s)" % (cutoff, max_servers))
for (idx, ns) in enumerate(self.SortEnabledByFastest()):
hide = False
if ns not in supplemental_servers:
continue
if ns.fastest_check_duration > cutoff:
hide = True
if idx > max_servers:
hide = True
if hide:
matches = ns.MatchesTags(nameserver.PROVIDER_TAGS)
if matches:
self.msg("%s seems slow, but has tag: %s" % (ns, matches))
else:
ns.tags.add('hidden')
def _FastestByLocalProvider(self):
"""Find the fastest DNS server by the client provider."""
fastest = self.SortEnabledByFastest()
# Give preference in tag order
for tag in nameserver.PROVIDER_TAGS:
for ns in fastest:
if ns.HasTag(tag):
return ns
def HideBrokenIPV6Servers(self):
"""Most people don't care about these."""
for ns in self.disabled_servers:
if ns.HasTag('ipv6') and not ns.is_hidden:
ns.tags.add('hidden')
def HideSlowSupplementalServers(self, target_count):
"""Given a target count, delete nameservers that we do not plan to test."""
# Magic secondary mixing algorithm:
# - Half of them should be the "nearest" nameservers
# - Half of them should be the "fastest average" nameservers
self.msg("Hiding all but %s servers" % target_count)
keepers = self.enabled_keepers
isp_keeper = self._FastestByLocalProvider()
if isp_keeper:
self.msg("%s is the fastest DNS server provided by your ISP." % isp_keeper)
keepers.append(isp_keeper)
supplemental_servers_needed = target_count - len(keepers)
if supplemental_servers_needed < 1 or not self.enabled_supplemental:
return
nearest_needed = int(supplemental_servers_needed / 2.0)
if supplemental_servers_needed < 50:
self.msg("Picking %s secondary servers to use (%s nearest, %s fastest)" %
(supplemental_servers_needed, nearest_needed, supplemental_servers_needed - nearest_needed))
# Phase two is picking the nearest secondary server
supplemental_servers_to_keep = []
for ns in self.SortEnabledByNearest():
if ns not in keepers:
if not supplemental_servers_to_keep and supplemental_servers_needed < 15:
self.msg('%s appears to be the nearest regional (%0.2fms)' % (ns, ns.fastest_check_duration))
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= nearest_needed:
break
# Phase three is hiding the slower secondary servers
for ns in self.SortEnabledByFastest():
if ns not in keepers and ns not in supplemental_servers_to_keep:
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= supplemental_servers_needed:
break
for ns in self.supplemental_servers:
if ns not in supplemental_servers_to_keep and ns not in keepers:
ns.tags.add('hidden')
def CheckHealth(self, sanity_checks=None, max_servers=11, prefer_asn=None):
"""Filter out unhealthy or slow replica servers."""
self.PingNameServers()
if len(self.enabled_servers) > max_servers:
self.DisableSlowestSupplementalServers(prefer_asn=prefer_asn)
self.RunHealthCheckThreads(sanity_checks['primary'])
if len(self.enabled_servers) > max_servers:
self._DemoteSecondaryGlobalNameServers()
self.HideSlowSupplementalServers(int(max_servers * NS_CACHE_SLACK))
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunNodeIdThreads()
self.HideSlowSupplementalServers(max_servers)
self.RunFinalHealthCheckThreads(sanity_checks['secondary'])
self.RunNodeIdThreads()
self.HideBrokenIPV6Servers()
# One more time!
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunHostnameThreads()
if not self.enabled_servers:
raise TooFewNameservers('None of the nameservers tested are healthy')
def CheckCensorship(self, sanity_checks):
pass
def _RemoveGlobalWarnings(self):
"""If all nameservers have the same warning, remove it. It's likely false."""
ns_count = len(self.enabled_servers)
seen_counts = {}
# No sense in checking for duplicate warnings if we only have one server.
if len(self.enabled_servers) == 1:
return
for ns in self.enabled_servers:
for warning in ns.warnings:
seen_counts[warning] = seen_counts.get(warning, 0) + 1
for warning in seen_counts:
if seen_counts[warning] == ns_count:
self.msg('All nameservers have warning: %s (likely a false positive)' % warning)
for ns in self.enabled_servers:
ns.warnings.remove(warning)
def _DemoteSecondaryGlobalNameServers(self):
"""For global nameservers, demote the slower IP to secondary status."""
seen = {}
for ns in self.SortEnabledByFastest():
if ns.MatchesTags(['preferred', 'global']):
if ns.provider in seen and not ns.MatchesTags(['system', 'specified']):
faster_ns = seen[ns.provider]
if ns.HasTag('preferred'):
self.msg('Making %s the primary anycast - faster than %s by %2.2fms' %
(faster_ns.name_and_node, ns.name_and_node, ns.check_average - faster_ns.check_average))
ns.tags.add('hidden')
else:
seen[ns.provider] = ns
def ResetTestResults(self):
"""Reset the testng status of all disabled hosts."""
return [ns.ResetTestStatus() for ns in self]
def CheckCacheCollusion(self):
"""Mark if any nameservers share cache, especially if they are slower."""
self.RunWildcardStoreThreads()
sleepy_time = 4
self.msg("Waiting %ss for TTL's to decrement." % sleepy_time)
time.sleep(sleepy_time)
test_combos = []
good_nameservers = [x for x in self.SortEnabledByFastest()]
for ns in good_nameservers:
for compare_ns in good_nameservers:
if ns != compare_ns:
test_combos.append((compare_ns, ns))
results = self.RunCacheCollusionThreads(test_combos)
while not results.empty():
(ns, shared_ns) = results.get()
if shared_ns:
ns.shared_with.add(shared_ns)
shared_ns.shared_with.add(ns)
if ns.is_disabled or shared_ns.is_disabled:
continue
if ns.check_average > shared_ns.check_average:
slower = ns
faster = shared_ns
else:
slower = shared_ns
faster = ns
if slower.system_position == 0:
faster.DisableWithMessage('Shares-cache with current primary DNS server')
slower.warnings.add('Replica of %s' % faster.ip)
elif slower.is_keeper and not faster.is_keeper:
faster.DisableWithMessage('Replica of %s [%s]' % (slower.name, slower.ip))
slower.warnings.add('Replica of %s [%s]' % (faster.name, faster.ip))
else:
diff = slower.check_average - faster.check_average
self.msg("Disabling %s - slower replica of %s by %0.1fms." % (slower.name_and_node, faster.name_and_node, diff))
slower.DisableWithMessage('Slower replica of %s [%s]' % (faster.name, faster.ip))
faster.warnings.add('Replica of %s [%s]' % (slower.name, slower.ip))
def _LaunchQueryThreads(self, action_type, status_message, items,
thread_count=None, **kwargs):
"""Launch query threads for a given action type.
Args:
action_type: a string describing an action type to pass
status_message: Status to show during updates.
items: A list of items to pass to the queue
thread_count: How many threads to use (int)
kwargs: Arguments to pass to QueryThreads()
Returns:
results_queue: Results from the query tests.
Raises:
TooFewNameservers: If no tested nameservers are healthy.
"""
threads = []
input_queue = Queue.Queue()
results_queue = Queue.Queue()
# items are usually nameservers
random.shuffle(items)
for item in items:
input_queue.put(item)
if not thread_count:
thread_count = self.thread_count
if thread_count > len(items):
thread_count = len(items)
status_message += ' (%s threads)' % thread_count
self.msg(status_message, count=0, total=len(items))
for _ in range(0, thread_count):
thread = QueryThreads(input_queue, results_queue, action_type, **kwargs)
try:
thread.start()
except:
self.msg("ThreadingError with %s threads: waiting for completion before retrying." % thread_count)
for thread in threads:
thread.stop()
thread.join()
raise ThreadFailure()
threads.append(thread)
while results_queue.qsize() != len(items):
self.msg(status_message, count=results_queue.qsize(), total=len(items))
time.sleep(0.5)
self.msg(status_message, count=results_queue.qsize(), total=len(items))
for thread in threads:
thread.join()
if not self.enabled_servers:
raise TooFewNameservers('None of the %s nameservers tested are healthy' % len(self.visible_servers))
return results_queue
def RunCacheCollusionThreads(self, test_combos):
"""Schedule and manage threading for cache collusion checks."""
return self._LaunchQueryThreads('wildcard_check', 'Running cache-sharing checks on %s servers' % len(self.enabled_servers), test_combos)
def PingNameServers(self):
"""Quickly ping nameservers to see which are available."""
start = datetime.datetime.now()
test_servers = list(self.enabled_servers)
try:
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (self.thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < MIN_PINGABLE_PERCENT:
self.msg('How odd! Only %0.1f percent of name servers were pingable. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
if self.enabled_servers:
self.msg('%s of %s servers are available (duration: %s)' %
(len(self.enabled_servers), len(test_servers), datetime.datetime.now() - start))
return results
def GetHealthyPercentage(self, compare_to=None):
if not compare_to:
compare_to = self.visible_servers
return (float(len(self.enabled_servers)) / float(len(compare_to))) * 100
def RunHealthCheckThreads(self, checks, min_healthy_percent=MIN_HEALTHY_PERCENT):
"""Quickly ping nameservers to see which are healthy."""
test_servers = self.enabled_servers
status_msg = 'Running initial health checks on %s servers' % len(test_servers)
if self.thread_count > MAX_INITIAL_HEALTH_THREAD_COUNT:
thread_count = MAX_INITIAL_HEALTH_THREAD_COUNT
else:
thread_count = self.thread_count
try:
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', list(self.visible_servers))
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < min_healthy_percent:
self.msg('How odd! Only %0.1f percent of name servers are healthy. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
time.sleep(5)
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
self.msg('%s of %s tested name servers are healthy' %
(len(self.enabled_servers), len(test_servers)))
return results
def RunNodeIdThreads(self):
"""Update node id status on all servers."""
status_msg = 'Checking node ids on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('node_id', status_msg, list(self.enabled_servers))
def RunHostnameThreads(self):
"""Update node id status on all servers."""
status_msg = 'Updating hostnames on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('update_hostname', status_msg, list(self.enabled_servers))
def RunFinalHealthCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running final health checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('final', status_msg, list(self.enabled_servers), checks=checks)
def RunCensorshipCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running censorship checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('censorship', status_msg, list(self.enabled_servers), checks=checks)
def RunPortBehaviorThreads(self):
"""Get port behavior data."""
status_msg = 'Running port behavior checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('port_behavior', status_msg, list(self.enabled_servers))
def RunWildcardStoreThreads(self):
"""Store a wildcard cache value for all nameservers (using threads)."""
status_msg = 'Waiting for wildcard cache queries from %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('store_wildcards', status_msg, list(self.enabled_servers))
|
rdipietro/tensorflow
|
refs/heads/master
|
tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
|
12
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def confusion_matrix(predictions, labels, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Computes the confusion matrix from predictions and labels.
Calculate the Confusion Matrix for a pair of prediction and
label 1-D int arrays.
The matrix rows represent the prediction labels and the columns
represents the real labels. The confusion matrix is always a 2-D array
of shape `[n, n]`, where `n` is the number of valid labels for a given
classification task. Both prediction and labels must be 1-D arrays of
the same shape in order for this function to work.
If `num_classes` is None, then `num_classes` will be set to the one plus
the maximum value in either predictions or labels.
Class labels are expected to start at 0. E.g., if `num_classes` was
three, then the possible labels would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.contrib.metrics.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
predictions: A 1-D array representing the predictions for a given
classification.
labels: A 1-D representing the real labels for the classification task.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A k X k matrix representing the confusion matrix, where k is the number of
possible labels in the classification task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
with ops.name_scope(name, 'confusion_matrix',
[predictions, labels, num_classes]) as name:
predictions, labels = tensor_util.remove_squeezable_dimensions(
ops.convert_to_tensor(
predictions, name='predictions'),
ops.convert_to_tensor(labels, name='labels'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
if weights is not None:
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.pack([num_classes, num_classes])
indices = array_ops.transpose(array_ops.pack([predictions, labels]))
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = sparse_tensor.SparseTensor(
indices=indices, values=values, shape=math_ops.to_int64(shape))
zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)
|
kalov/ShapePFCN
|
refs/heads/master
|
caffe-ours/scripts/copy_notebook.py
|
75
|
#!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
|
skynetera/skynet
|
refs/heads/master
|
skynet-server/conf/__init__.py
|
3
|
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: skynetEye@gmail.com
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: __init__.py.py
@time: 2015-11-26 下午8:33
"""
def run():
print("hello")
if __name__ == "__main__":
run()
|
tumbl3w33d/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/nxos/config/hsrp_interfaces/hsrp_interfaces.py
|
13
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Cisco and/or its affiliates.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The nxos hsrp_interfaces class
This class creates a command set to bring the current device configuration
to a desired end-state. The command set is based on a comparison of the
current configuration (as dict) and the provided configuration (as dict).
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import dict_diff, to_list, remove_empties
from ansible.module_utils.network.nxos.facts.facts import Facts
from ansible.module_utils.network.nxos.utils.utils import flatten_dict, get_interface_type, normalize_interface, search_obj_in_list, vlan_range_to_list
class Hsrp_interfaces(ConfigBase):
"""
The nxos_hsrp_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'hsrp_interfaces',
]
def __init__(self, module):
super(Hsrp_interfaces, self).__init__(module)
def get_hsrp_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
hsrp_interfaces_facts = facts['ansible_network_resources'].get('hsrp_interfaces', [])
return hsrp_interfaces_facts
def edit_config(self, commands):
return self._connection.edit_config(commands)
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
cmds = list()
existing_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
cmds.extend(self.set_config(existing_hsrp_interfaces_facts))
if cmds:
if not self._module.check_mode:
self.edit_config(cmds)
result['changed'] = True
result['commands'] = cmds
changed_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
result['before'] = existing_hsrp_interfaces_facts
if result['changed']:
result['after'] = changed_hsrp_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_hsrp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
config = self._module.params['config']
want = []
if config:
for w in config:
w.update({'name': normalize_interface(w['name'])})
want.append(w)
have = existing_hsrp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
# check for 'config' keyword in play
if state in ('overridden', 'merged', 'replaced') and not want:
self._module.fail_json(msg='config is required for state {0}'.format(state))
cmds = list()
if state == 'overridden':
cmds.extend(self._state_overridden(want, have))
elif state == 'deleted':
cmds.extend(self._state_deleted(want, have))
else:
for w in want:
if state == 'merged':
cmds.extend(self._state_merged(flatten_dict(w), have))
elif state == 'replaced':
cmds.extend(self._state_replaced(flatten_dict(w), have))
return cmds
def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
cmds = []
obj_in_have = search_obj_in_list(want['name'], have, 'name')
if obj_in_have:
diff = dict_diff(want, obj_in_have)
else:
diff = want
merged_cmds = self.set_commands(want, have)
if 'name' not in diff:
diff['name'] = want['name']
replaced_cmds = []
if obj_in_have:
replaced_cmds = self.del_attribs(diff)
if replaced_cmds or merged_cmds:
for cmd in set(replaced_cmds).intersection(set(merged_cmds)):
merged_cmds.remove(cmd)
cmds.extend(replaced_cmds)
cmds.extend(merged_cmds)
return cmds
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
cmds = []
for h in have:
# Check existing states, set to default if not in want or different than want
h = flatten_dict(h)
obj_in_want = search_obj_in_list(h['name'], want, 'name')
if obj_in_want:
# Let the 'want' loop handle all vals for this interface
continue
cmds.extend(self.del_attribs(h))
for w in want:
# Update any want attrs if needed. The overridden state considers
# the play as the source of truth for the entire device, therefore
# set any unspecified attrs to their default state.
w = self.set_none_vals_to_defaults(flatten_dict(w))
cmds.extend(self.set_commands(w, have))
return cmds
def _state_merged(self, want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
return self.set_commands(want, have)
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
if not (want or have):
return []
cmds = []
if want:
for w in want:
obj_in_have = flatten_dict(search_obj_in_list(w['name'], have, 'name'))
cmds.extend(self.del_attribs(obj_in_have))
else:
for h in have:
cmds.extend(self.del_attribs(flatten_dict(h)))
return cmds
def del_attribs(self, obj):
if not obj or len(obj.keys()) == 1:
return []
cmds = []
if 'bfd' in obj:
cmds.append('no hsrp bfd')
if cmds:
cmds.insert(0, 'interface ' + obj['name'])
return cmds
def set_none_vals_to_defaults(self, want):
# Set dict None values to default states
if 'bfd' in want and want['bfd'] is None:
want['bfd'] = 'disable'
return want
def diff_of_dicts(self, want, obj_in_have):
diff = set(want.items()) - set(obj_in_have.items())
diff = dict(diff)
if diff and want['name'] == obj_in_have['name']:
diff.update({'name': want['name']})
return diff
def add_commands(self, want, obj_in_have):
if not want:
return []
cmds = []
if 'bfd' in want and want['bfd'] is not None:
if want['bfd'] == 'enable':
cmd = 'hsrp bfd'
cmds.append(cmd)
elif want['bfd'] == 'disable' and obj_in_have and obj_in_have.get('bfd') == 'enable':
cmd = 'no hsrp bfd'
cmds.append(cmd)
if cmds:
cmds.insert(0, 'interface ' + want['name'])
return cmds
def set_commands(self, want, have):
cmds = []
obj_in_have = search_obj_in_list(want['name'], have, 'name')
if not obj_in_have:
cmds = self.add_commands(want, obj_in_have)
else:
diff = self.diff_of_dicts(want, obj_in_have)
cmds = self.add_commands(diff, obj_in_have)
return cmds
|
puzan/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/mysql/mysql_variables.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage mysql variables
(c) 2013, Balazs Pocze <banyek@gawker.com>
Certain parts are taken from Mark Theunissen's mysqldb module
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:
- Variable name to operate
required: True
value:
description:
- If set, then sets variable value to this
required: False
extends_documentation_fragment: mysql
'''
EXAMPLES = '''
# Check for sync_binlog setting
- mysql_variables:
variable: sync_binlog
# Set read_only variable to 1
- mysql_variables:
variable: read_only
value: 1
'''
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
cursor.fetchall()
result = True
except Exception:
e = get_exception()
result = str(e)
return result
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
variable=dict(default=None),
value=dict(default=None),
ssl_cert=dict(default=None),
ssl_key=dict(default=None),
ssl_ca=dict(default=None),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type="path")
)
)
user = module.params["login_user"]
password = module.params["login_password"]
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
db = 'mysql'
mysqlvar = module.params["variable"]
value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
try:
cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception:
e = get_exception()
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
else:
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError:
e = get_exception()
result = str(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
from ansible.module_utils.mysql import *
if __name__ == '__main__':
main()
|
MagicNews/ASMagic
|
refs/heads/master
|
config.py
|
1
|
# -*- coding: utf-8 -*-
redis_db = 0 # Set your redis db number 0 - 15
token = "SET_YOUR_TOKEN_HERE"
sudos = []
admins = []
errors_chat = #Set your errors chat(group/channel/user) id here!
log_chat = #Set your logs chat(group/supergroup) id here!
|
raccoongang/xblock-video
|
refs/heads/dev
|
video_xblock/tests/unit/mocks/brightcove.py
|
1
|
"""
Brightcove backend mocks.
"""
import json
from copy import copy, deepcopy
from video_xblock.backends import brightcove
from video_xblock.exceptions import VideoXBlockException, VideoXBlockMockException
from video_xblock.tests.unit.mocks.base import BaseMock, RequestsMock, ResponseStub
class BrightcoveAuthMock(BaseMock):
"""
Brightcove auth mock class.
"""
outcomes = (
(
'credentials_created',
{
'client_secret': 'brightcove_client_secret',
'client_id': 'brightcove_client_id',
'error_message': ''
}
),
(
'auth_failed',
{
'client_secret': '',
'client_id': '',
'error_message': 'Authentication to Brightcove API failed: no client credentials have been retrieved.'
}
)
)
def create_credentials(self):
"""
Mock `get_client_credentials` returned value.
"""
if self.event == 'auth_failed':
self.side_effect = VideoXBlockException(self.ordered_results[self.event]['error_message'])
self.return_value = (
self.ordered_results[self.event]['client_secret'],
self.ordered_results[self.event]['client_id'],
self.ordered_results[self.event]['error_message']
)
return self
@property
def expected_value(self):
"""
Return expected value of `authenticate_api` after mock is applied.
"""
ret = copy(self.ordered_results[self.event])
error = ret.pop('error_message')
return ret, error
def apply_mock(self, mocked_objects):
"""
Save state of auth related entities before mocks are applied.
"""
mocked_objects.append({
'obj': brightcove.BrightcoveApiClient,
'attrs': ['create_credentials', ],
'value': [brightcove.BrightcoveApiClient.create_credentials, ]
})
brightcove.BrightcoveApiClient.create_credentials = self.create_credentials()
return mocked_objects
class BrightcoveDefaultTranscriptsMock(BaseMock):
"""
Brightcove default transcripts mock class.
"""
_default_transcripts = [
{'label': u'English', 'lang': u'en', 'url': None, 'source': u'default'},
{'label': u'Ukrainian', 'lang': u'uk', 'url': None, 'source': u'default'}
]
_response = {
"master": {
"url": "http://host/master.mp4"
},
"poster": {
"url": "http://learning-services-media.brightcove.com/images/for_video/Water-In-Motion-poster.png",
"width": 640,
"height": 360
},
"thumbnail": {
"url": "http://learning-services-media.brightcove.com/images/for_video/Water-In-Motion-thumbnail.png",
"width": 160,
"height": 90
},
"capture-images": False,
"callbacks": ["http://solutions.brightcove.com/bcls/di-api/di-callbacks.php"]
}
transcripts = [
{
"url": "http://learning-services-media.brightcove.com/captions/for_video/Water-in-Motion.vtt",
"srclang": "en",
"kind": "captions",
"label": "EN",
"default": True
},
{
"url": "http://learning-services-media.brightcove.com/captions/for_video/Water-in-Motion.vtt",
"srclang": "uk",
"kind": "captions",
"label": "UK",
"default": False
}
]
outcomes = (
(
'no_credentials',
{
'default_transcripts': [],
'message': 'No API credentials provided'
}
),
(
'fetch_transcripts_exception',
{
'default_transcripts': [],
'message': 'No timed transcript may be fetched from a video platform.'
}
),
(
'no_captions_data',
{
'default_transcripts': [],
'message': 'For now, video platform doesn\'t have any timed transcript for this video.'
}
),
(
'success',
{
'default_transcripts': _default_transcripts,
'message': ''
}
)
)
to_return = ['default_transcripts', 'message']
def api_client_get(self):
"""
Mock for `api_client` method.
"""
if self.event == 'fetch_transcripts_exception':
self.side_effect = self.mock()
elif self.event == 'no_captions_data':
self.return_value = ResponseStub(status_code=200, body=json.dumps(self._response))
else:
ret = copy(self._response)
ret['text_tracks'] = self.transcripts
self.return_value = ResponseStub(status_code=200, body=json.dumps(ret))
return self
def no_credentials(self):
"""
Return xblock metadata.
"""
if self.event == 'no_credentials':
return {'client_id': '', 'client_secret': ''}
return self.mock
# @XBlock.register_temp_plugin(brightcove.BrightcovePlayer, 'wistia')
def apply_mock(self, mocked_objects):
"""
Save state of default transcripts related entities before mocks are applied.
"""
if not self.xblock:
raise VideoXBlockMockException("`xblock` parameter is required for %s." % self.__class__)
mocked_objects.append({
'obj': brightcove.BrightcoveApiClient,
'attrs': ['get', ],
'value': [deepcopy(brightcove.BrightcoveApiClient.get), ]
})
mocked_objects.append({
'obj': self.xblock,
'attrs': ['metadata', ],
'value': [deepcopy(self.xblock.metadata), ]
})
brightcove.BrightcoveApiClient.get = BrightcoveDefaultTranscriptsMock(
mock_magic=brightcove.BrightcoveApiClientError, event=self.event
).api_client_get()
self.xblock.metadata = self.no_credentials()
return mocked_objects
class BrightcoveDownloadTranscriptMock(RequestsMock):
"""
Brightcove download default transcript mock class.
"""
_vtt = """WEBVTT
00:06.047 --> 00:06.068
Hi.
00:06.070 --> 00:08.041
I'm Bob Bailey, a Learning Specialist with Brightcove.
00:09.041 --> 00:11.003
In this video, we'll learn about Brightcove Smart Players
00:21.052 --> 00:23.027
the next few years.
00:25.042 --> 00:27.094
accessed from mobile devices."""
outcomes = (
('wrong_arguments', {'transcript': [], 'message': '`url` parameter is required.'}),
('success', {'transcript': _vtt, 'message': ''})
)
to_return = ['transcript', 'message']
def get(self):
"""
Substitute requests.get method.
"""
self.return_value = ResponseStub(status_code=200, body=self._vtt)
return lambda x: self.return_value
|
willowrose/QubitCoin
|
refs/heads/master
|
qa/rpc-tests/getblocktemplate_proposals.py
|
145
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(BitcoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
|
joyeshmishra/spark-tk
|
refs/heads/master
|
regression-tests/sparktkregtests/testcases/frames/frame_group_by_test.py
|
13
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test functionality of group_by, including aggregation_arguments """
import unittest
import pandas as pd
import numpy as np
import math
from sparktkregtests.lib import sparktk_test
class GroupByTest(sparktk_test.SparkTKTestCase):
# Aggregates and names for non-numeric aggregates
# (some aggregates are not defined on integers)
# atk aggregates, then numpy aggregates
pd_cols_str = ['size', '<lambda>', 'max', 'min']
numpy_aggs_str = ['size',
lambda x: pd.Series.nunique(x, False),
'max',
'min']
atk_cols_str = ['_COUNT', '_COUNT_DISTINCT', '_MAX', '_MIN']
pd_cols = ['mean', 'size', '<lambda>', 'max',
'min', 'std', 'nansum', 'var']
numpy_aggs = ['mean',
'size',
lambda x: pd.Series.nunique(x, False),
'max',
'min',
'std',
np.nansum,
'var']
atk_cols = ['_AVG', '_COUNT', '_COUNT_DISTINCT', '_MAX',
'_MIN', '_STDEV', '_SUM', '_VAR']
def setUp(self):
"""Build test frame"""
super(GroupByTest, self).setUp()
# Aggregates to test on strings
self.aggs_str = [self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min]
# Aggregates for numeric columns
self.aggs = [self.context.agg.avg,
self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min,
self.context.agg.stdev,
self.context.agg.sum,
self.context.agg.var]
schema_colors = [("Int32_0_15", int),
("Int32_0_31", int),
("colors", str),
("Int64_0_15", int),
("Int64_0_31", int),
("Float32_0_15", float),
("Float32_0_31", float),
("Float64_0_15", float),
("Float64_0_31", float)]
dataset = self.get_file("colors_32_9cols_128rows.csv")
self.frame = self.context.frame.import_csv(
dataset, schema=schema_colors)
def test_stats_on_string_avg(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.avg})
def test_stats_on_string_stdev(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.stdev})
def test_stats_on_string_sum(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.sum})
def test_stats_on_string_var(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.var})
def test_invalid_column_name(self):
"""Aggregate on non-existant column errors"""
with self.assertRaises(Exception):
self.frame.group_by(
'InvalidColumnName', {'colors': self.context.agg.var})
def test_group_int32_standard(self):
"""Test groupby on 1 column, int32"""
stats = self.frame.group_by(['Int32_0_15'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15'])
def test_group_float32_standard(self):
"""Test groupby on 1 column, float32"""
stats = self.frame.group_by(
['Float32_0_15'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15'])
def test_group_float64_standard(self):
"""Test groupby on 1 column, float64"""
stats = self.frame.group_by(
['Float64_0_15'], {'Float64_0_31': self.aggs})
self._validate(stats, 'Float64_0_31', ['Float64_0_15'])
def test_group_int64_standard(self):
"""Test groupby on 1 column, int64"""
stats = self.frame.group_by(['Int64_0_15'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15'])
def Test_group_by_str_standard(self):
"""Test groupby on 1 column, string"""
stats = self.frame.group_by(['colors'], {'Int32_0_31': self.aggs})
self._validate_str(stats, 'Int32_0_31', ['colors'])
def test_group_by_str_agg_str(self):
"""Test groupby on 1 column, string, aggregate is string"""
stats = self.frame.group_by(['colors'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors'])
def test_group_int32_multiple_cols(self):
"""Test groupby on multiple columns, int32"""
stats = self.frame.group_by(
['Int32_0_15', 'Int32_0_31'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15', 'Int32_0_31'])
def test_group_float32_multiple_cols(self):
"""Test groupby on multiple columns, float32"""
stats = self.frame.group_by(
['Float32_0_15', 'Float32_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15', 'Float32_0_31'])
def test_group_float64_multiple_cols(self):
"""Test groupby on multiple columns, float64"""
stats = self.frame.group_by(
['Float64_0_15', 'Float64_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float64_0_15', 'Float64_0_31'])
def test_group_int64_multiple_cols(self):
"""Test groupby on multiple columns, int64"""
stats = self.frame.group_by(
['Int64_0_15', 'Int64_0_31'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15', 'Int64_0_31'])
def test_groupby_str_multiple_cols(self):
"""Test groupby on multiple columns, string"""
stats = self.frame.group_by(
['colors', 'Int32_0_15'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors', 'Int32_0_15'])
def test_group_int32_none(self):
"""Test groupby none, int32 aggregate"""
stats = self.frame.group_by(None, {'Int32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int32_0_31')
def test_group_float32_none(self):
"""Test groupby none, float32 aggregate"""
stats = self.frame.group_by(None, {'Float32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float32_0_31')
def test_group_float64_none(self):
"""Test groupby none, float64 aggregate"""
stats = self.frame.group_by(None, {'Float64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float64_0_31')
def test_group_int64_none(self):
"""Test groupby none, int64 aggregate"""
stats = self.frame.group_by(None, {'Int64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int64_0_31')
def _validate_single_group(self, stats, groupby_cols, aggregator):
# Validate the result of atk groupby and pandas groupby are the same
# when there is single group (none)
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(lambda x: 0)[aggregator].agg(self.numpy_aggs)
int_cols = map(lambda x: aggregator+x, self.atk_cols)
for k, l in zip(int_cols, self.pd_cols):
self.assertAlmostEqual(gb.loc[0][l], pd_stats.loc[0][k], places=4)
def _validate(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to integer, and use all aggregates, as column
# for aggregatees is numeric
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs,
self.pd_cols, self.atk_cols, int)
def _validate_str(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to the same value, and use strin aggregates, as column
# for aggregatees is a string
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs_str,
self.pd_cols_str, self.atk_cols_str, lambda x: x)
def _validate_helper(self, stats, aggregator, groupby_cols,
aggs, pd_cols, atk_cols, mapper):
# Get and compare results of atk and pandas, cast as appropriate
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(groupby_cols)[aggregator].agg(aggs)
int_cols = map(lambda x: aggregator+x, atk_cols)
for _, i in pd_stats.iterrows():
for k, l in zip(int_cols, pd_cols):
if ((type(i[k]) is np.float64 or type(i[k]) is float) and
math.isnan(i[k])):
self.assertTrue(
math.isnan(
gb.loc[tuple(
map(lambda x: mapper(i[x]),
groupby_cols))][l]))
else:
self.assertAlmostEqual(
gb.loc[tuple(
map(lambda x: mapper(i[x]), groupby_cols))][l],
i[k], places=4)
if __name__ == "__main__":
unittest.main()
|
rohit21122012/DCASE2013
|
refs/heads/master
|
runs/2016/dnn2016med_traps/traps3/src/ui.py
|
56
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import sys
spinner = itertools.cycle(["`", "*", ";", ","])
def title(text):
"""Prints title
Parameters
----------
text : str
Title
Returns
-------
Nothing
"""
print "--------------------------------"
print text
print "--------------------------------"
def section_header(text):
"""Prints section header
Parameters
----------
text : str
Section header
Returns
-------
Nothing
"""
print " "
print text
print "================================"
def foot():
"""Prints foot
Parameters
----------
Nothing
Returns
-------
Nothing
"""
print " [Done] "
def progress(title_text=None, fold=None, percentage=None, note=None, label=None):
"""Prints progress line
Parameters
----------
title_text : str or None
Title
fold : int > 0 [scalar] or None
Fold number
percentage : float [0-1] or None
Progress percentage.
note : str or None
Note
label : str or None
Label
Returns
-------
Nothing
"""
if title_text is not None and fold is not None and percentage is not None and note is not None and label is None:
print " {:2s} {:20s} fold[{:1d}] [{:3.0f}%] [{:20s}] \r".format(spinner.next(),
title_text, fold,
percentage * 100, note),
elif title_text is not None and fold is not None and percentage is None and note is not None and label is None:
print " {:2s} {:20s} fold[{:1d}] [{:20s}] \r".format(spinner.next(), title_text,
fold, note),
elif title_text is not None and fold is None and percentage is not None and note is not None and label is None:
print " {:2s} {:20s} [{:3.0f}%] [{:20s}] \r".format(spinner.next(), title_text,
percentage * 100, note),
elif title_text is not None and fold is None and percentage is not None and note is None and label is None:
print " {:2s} {:20s} [{:3.0f}%] \r".format(spinner.next(), title_text,
percentage * 100),
elif title_text is not None and fold is None and percentage is None and note is not None and label is None:
print " {:2s} {:20s} [{:20s}] \r".format(spinner.next(), title_text, note),
elif title_text is not None and fold is None and percentage is None and note is not None and label is not None:
print " {:2s} {:20s} [{:20s}] [{:20s}] \r".format(spinner.next(),
title_text, label, note),
elif title_text is not None and fold is None and percentage is not None and note is not None and label is not None:
print " {:2s} {:20s} [{:20s}] [{:3.0f}%] [{:20s}] \r".format(spinner.next(),
title_text, label,
percentage * 100, note),
elif title_text is not None and fold is not None and percentage is not None and note is not None and label is not None:
print " {:2s} {:20s} fold[{:1d}] [{:10s}] [{:3.0f}%] [{:20s}] \r".format(
spinner.next(), title_text, fold, label, percentage * 100, note),
elif title_text is not None and fold is not None and percentage is None and note is None and label is not None:
print " {:2s} {:20s} fold[{:1d}] [{:10s}] \r".format(
spinner.next(), title_text, fold, label),
sys.stdout.flush()
|
drgarcia1986/muffin-elasticsearch
|
refs/heads/master
|
tests.py
|
1
|
import muffin
import pytest
@pytest.fixture(scope='session')
def app(loop):
return muffin.Application(
'elasticsearch_app', loop=loop,
PLUGINS=('muffin_elasticsearch',)
)
def test_plugin_register(app):
assert 'elasticsearch' in app.ps
assert app.ps.elasticsearch.conn
@pytest.mark.async
def test_elasticsearch_create_get(app):
body = {'str': 'foo', 'int': 1}
result = yield from app.ps.elasticsearch.create(
index='test',
doc_type='test',
id=42,
body=body
)
assert 'created' in result, result.keys()
assert result['created'] is True, result['created']
result = yield from app.ps.elasticsearch.get(
index='test',
doc_type='test',
id=42
)
assert '_source' in result, result.keys()
assert result['_source'] == body, result
|
bokzor/guimovie
|
refs/heads/master
|
build/releases/Guimovie/mac/Guimovie.app/Contents/Resources/app.nw/node_modules/nw-gyp/gyp/pylib/gyp/MSVSUtil.py
|
566
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
guiquanz/msaf
|
refs/heads/master
|
msaf/algorithms/interface.py
|
1
|
"""Interface for all the algorithms in MSAF."""
import numpy as np
import msaf.input_output as io
import msaf.utils as U
class SegmenterInterface:
"""This class is an interface for all the segmenter algorithms included
in MSAF. These segmenters must inherit from it and implement one of the
following methods:
processFlat()
processHierarchical()
Additionally, two private helper functions are provided:
- preprocess
- postprocess
These are meant to do common tasks for all the segmenters and they
should be called inside the process method if needed.
All segmenters must return estimates times for the boundaries (est_times),
and estimated labels (est_labels), **even if they can't compute them**.
The following three types of algorithms with their behaviors are:
- Computes boundaries and labels:
If in_bound_times is None:
Compute the est_times
Else:
Do not compute est_times, simply use in_bound_times instead
If in_labels is None:
Compute the est_labels
Else:
Do not compute est_labels, simply use in_labels instead
- Computes boundaries only:
Compute boundaries and return est_labels as None.
- Computes labels only:
Use in_bound_times in order to compute the labels.
Return est_times as in_bound_times and the computed labels.
In these cases, est_times or est_labels will be empty (None).
"""
def __init__(self, audio_file, in_bound_idxs=None, feature="hpcp",
annot_beats=False, framesync=False, features=None, **config):
"""Inits the Segmenter.
Parameters
----------
audio_file: str
Path to the audio file.
in_bound_idxs: np.array
Array containing the frame indeces of the previously find
boundaries. `None` for computing them.
feature: str
Identifier of the features (e.g., hpcp, mfcc)
annot_beats: boolean
Whether to use annotated beats or estimated ones.
framesync: boolean
Whether to use frame-synchronous or beat-synchronous features.
features: dict
Previously computed features. `None` for reading them.
config: dict
Configuration for the given algorithm (see module's __config.py__).
"""
self.audio_file = audio_file
self.in_bound_idxs = in_bound_idxs
self.feature_str = feature
self.annot_beats = annot_beats
self.framesync = framesync
self.config = config
self.features = features
def processFlat(self):
"""Main process to obtain the flat segmentation of a given track."""
raise NotImplementedError("This method does not return flat "
"segmentations.")
def processHierarchical(self):
"""Main process to obtian the hierarchical segmentation of a given
track."""
raise NotImplementedError("This method does not return hierarchical "
"segmentations.")
def _preprocess(self, valid_features=["hpcp", "tonnetz", "mfcc", "cqt"],
normalize=True):
"""This method obtains the actual features."""
# Read features
self.hpcp, self.mfcc, self.tonnetz, self.cqt, beats, dur, self.anal = \
io.get_features(self.audio_file, annot_beats=self.annot_beats,
framesync=self.framesync,
pre_features=self.features)
# Use specific feature
if self.feature_str not in valid_features:
raise RuntimeError("Feature %s in not valid for algorithm: %s "
"(valid features are %s)." %
(self.feature_str, __name__, valid_features))
else:
try:
F = eval("self." + self.feature_str)
except:
raise RuntimeError("Feature %s in not supported by MSAF" %
(self.feature_str))
# Normalize if needed
if normalize:
F = U.lognormalize_chroma(F)
return F
def _postprocess(self, est_idxs, est_labels):
"""Post processes the estimations from the algorithm, removing empty
segments and making sure the lenghts of the boundaries and labels
match."""
# Remove empty segments if needed
est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels)
assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \
"(%d) and number of labels(%d) don't match" % (len(est_idxs),
len(est_labels))
# Make sure the indeces are integers
est_idxs = np.asarray(est_idxs, dtype=int)
return est_idxs, est_labels
|
rc/sfepy
|
refs/heads/master
|
sfepy/discrete/fem/fields_hierarchic.py
|
3
|
import numpy as nm
from sfepy.base.base import assert_
from sfepy.discrete.fem.utils import prepare_remap, prepare_translate
from sfepy.discrete.common.dof_info import expand_nodes_to_dofs
from sfepy.discrete.fem.fields_base import VolumeField, H1Mixin
class H1HierarchicVolumeField(H1Mixin, VolumeField):
family_name = 'volume_H1_lobatto'
def _init_econn(self):
"""
Initialize the extended DOF connectivity and facet orientation array.
"""
VolumeField._init_econn(self)
self.ori = nm.zeros_like(self.econn)
def _setup_facet_orientations(self):
self.node_desc = self.poly_space.describe_nodes()
def _setup_edge_dofs(self):
"""
Setup edge DOF connectivity.
"""
if self.node_desc.edge is None:
return 0, None, None
return self._setup_facet_dofs(1,
self.node_desc.edge,
self.n_vertex_dof)
def _setup_face_dofs(self):
"""
Setup face DOF connectivity.
"""
if self.node_desc.face is None:
return 0, None, None
return self._setup_facet_dofs(self.domain.shape.tdim - 1,
self.node_desc.face,
self.n_vertex_dof + self.n_edge_dof)
def _setup_facet_dofs(self, dim, facet_desc, offset):
"""
Helper function to setup facet DOF connectivity, works for both
edges and faces.
"""
facet_desc = nm.array(facet_desc)
n_dof_per_facet = facet_desc.shape[1]
cmesh = self.domain.cmesh
facets = self.region.entities[dim]
ii = nm.arange(facets.shape[0], dtype=nm.int32)
all_dofs = offset + expand_nodes_to_dofs(ii, n_dof_per_facet)
# Prepare global facet id remapping to field-local numbering.
remap = prepare_remap(facets, cmesh.num[dim])
cconn = self.region.domain.cmesh.get_conn(self.region.tdim, dim)
offs = cconn.offsets
n_f = self.gel.edges.shape[0] if dim == 1 else self.gel.faces.shape[0]
n_fp = 2 if dim == 1 else self.gel.surface_facet.n_vertex
oris = cmesh.get_orientations(dim)
gcells = self.region.get_cells()
n_el = gcells.shape[0]
# Elements of facets.
iel = nm.arange(n_el, dtype=nm.int32).repeat(n_f)
ies = nm.tile(nm.arange(n_f, dtype=nm.int32), n_el)
aux = offs[gcells][:, None] + ies.reshape((n_el, n_f))
indices = cconn.indices[aux]
facets_of_cells = remap[indices].ravel()
# Define global facet dof numbers.
gdofs = offset + expand_nodes_to_dofs(facets_of_cells,
n_dof_per_facet)
# DOF columns in econn for each facet (repeating same values for
# each element.
iep = facet_desc[ies]
self.econn[iel[:, None], iep] = gdofs
ori = oris[aux].ravel()
if (n_fp == 2) and (self.gel.name in ['2_4', '3_8']):
tp_edges = self.gel.edges
ecs = self.gel.coors[tp_edges]
# True = positive, False = negative edge orientation w.r.t.
# reference tensor product axes.
tp_edge_ori = (nm.diff(ecs, axis=1).sum(axis=2) > 0).squeeze()
aux = nm.tile(tp_edge_ori, n_el)
ori = nm.where(aux, ori, 1 - ori)
if n_fp == 2: # Edges.
# ori == 1 means the basis has to be multiplied by -1.
ps = self.poly_space
orders = ps.node_orders
eori = nm.repeat(ori[:, None], n_dof_per_facet, 1)
eoo = orders[iep] % 2 # Odd orders.
self.ori[iel[:, None], iep] = eori * eoo
elif n_fp == 3: # Triangular faces.
raise NotImplementedError
else: # Quadrilateral faces.
# ori encoding in 3 bits:
# 0: axis swap, 1: axis 1 sign, 2: axis 2 sign
# 0 = + or False, 1 = - or True
# 63 -> 000 = 0
# 0 -> 001 = 1
# 30 -> 010 = 2
# 33 -> 011 = 3
# 11 -> 100 = 4
# 7 -> 101 = 5
# 52 -> 110 = 6
# 56 -> 111 = 7
# Special cases:
# Both orders same and even -> 000
# Both orders same and odd -> 0??
# Bits 1, 2 are multiplied by (swapped) axial order % 2.
new = nm.repeat(nm.arange(8, dtype=nm.int32), 3)
translate = prepare_translate([31, 59, 63,
0, 1, 4,
22, 30, 62,
32, 33, 41,
11, 15, 43,
3, 6, 7,
20, 52, 60,
48, 56, 57], new)
ori = translate[ori]
eori = nm.repeat(ori[:, None], n_dof_per_facet, 1)
ps = self.poly_space
orders = ps.face_axes_nodes[iep - ps.face_indx[0]]
eoo = orders % 2
eoo0, eoo1 = eoo[..., 0], eoo[..., 1]
i0 = nm.where(eori < 4)
i1 = nm.where(eori >= 4)
eori[i0] = nm.bitwise_and(eori[i0], 2*eoo0[i0] + 5)
eori[i0] = nm.bitwise_and(eori[i0], eoo1[i0] + 6)
eori[i1] = nm.bitwise_and(eori[i1], eoo0[i1] + 6)
eori[i1] = nm.bitwise_and(eori[i1], 2*eoo1[i1] + 5)
self.ori[iel[:, None], iep] = eori
n_dof = n_dof_per_facet * facets.shape[0]
assert_(n_dof == nm.prod(all_dofs.shape))
return n_dof, all_dofs, remap
def _setup_bubble_dofs(self):
"""
Setup bubble DOF connectivity.
"""
if self.node_desc.bubble is None:
return 0, None, None
offset = self.n_vertex_dof + self.n_edge_dof + self.n_face_dof
n_dof_per_cell = self.node_desc.bubble.shape[0]
ii = self.region.get_cells()
remap = prepare_remap(ii, self.domain.cmesh.n_el)
n_cell = ii.shape[0]
n_dof = n_dof_per_cell * n_cell
all_dofs = nm.arange(offset, offset + n_dof, dtype=nm.int32)
all_dofs.shape = (n_cell, n_dof_per_cell)
iep = self.node_desc.bubble[0]
self.econn[:,iep:] = all_dofs
return n_dof, all_dofs, remap
def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""
Set the values of DOFs in a given `region` using a function of space
coordinates or value `fun`.
"""
if region is None:
region = self.region
if dpn is None:
dpn = self.n_components
# Hack - use only vertex DOFs.
gnods = self.get_dofs_in_region(region, merge=False)
nods = nm.concatenate(gnods)
n_dof = dpn * nods.shape[0]
if nm.isscalar(fun):
vals = nm.zeros(n_dof, dtype=nm.dtype(type(fun)))
vals[:gnods[0].shape[0] * dpn] = fun
elif callable(fun):
coors = self.get_coor(gnods[0])
vv = nm.asarray(fun(coors))
if (vv.ndim > 1) and (vv.shape != (len(coors), dpn)):
raise ValueError('The projected function return value should be'
' (n_point, dpn) == %s, instead of %s!'
% ((len(coors), dpn), vv.shape))
vals = nm.zeros(n_dof, dtype=vv.dtype)
vals[:gnods[0].shape[0] * dpn] = vv.ravel()
else:
raise ValueError('unknown function/value type! (%s)' % type(fun))
nods, indx = nm.unique(nods, return_index=True)
ii = (nm.tile(dpn * indx, dpn)
+ nm.tile(nm.arange(dpn, dtype=nm.int32), indx.shape[0]))
vals = vals[ii]
vals.shape = (len(nods), -1)
return nods, vals
def create_basis_context(self):
"""
Create the context required for evaluating the field basis.
"""
# Hack for tests to pass - the reference coordinates are determined
# from vertices only - we can use the Lagrange basis context for the
# moment. The true context for Field.evaluate_at() is not implemented.
gps = self.gel.poly_space
mesh = self.create_mesh(extra_nodes=False)
ctx = geo_ctx = gps.create_context(mesh.cmesh, 0, 1e-15, 100, 1e-8)
ctx.geo_ctx = geo_ctx
return ctx
|
elegion/djangodash2012
|
refs/heads/master
|
fortuitus/frunner/urls.py
|
1
|
from django.conf.urls import patterns, url
from fortuitus.frunner import views
urlpatterns = patterns('',
url(r'^(?P<company_slug>[\w\d_-]+)/projects/(?P<project_slug>[\w\d_-]+)/runs/$',
views.project_runs, name='frunner_project_runs'),
url(r'^(?P<company_slug>[\w\d_-]+)/projects/(?P<project_slug>[\w\d_-]+)/runs/(?P<testrun_number>\d+)/$',
views.testrun, name='frunner_testrun'),
url(r'^(?P<company_slug>[\w\d_-]+)/projects/(?P<project_slug>[\w\d_-]+)/runs/(?P<testrun_number>\d+)/(?P<testcase_slug>[\w\d_-]+)/$',
views.testrun, name='frunner_testrun'),
url(r'^(?P<company_slug>[\w\d_-]+)/projects/(?P<project_slug>[\w\d-]+)/run$',
views.run_project, name='frunner_run_project'),
)
|
sabi0/intellij-community
|
refs/heads/master
|
python/testData/console/indent1.py
|
83
|
for x in range(1, 10):
print x
|
django-nonrel/django
|
refs/heads/nonrel-1.6
|
tests/mail/__init__.py
|
45382
| |
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/graph_objs/waterfall/decreasing/__init__.py
|
6
|
import sys
if sys.version_info < (3, 7):
from ._marker import Marker
from . import marker
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [".marker"], ["._marker.Marker"]
)
|
WhireCrow/openwrt-mt7620
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/distutils/command/build_clib.py
|
176
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id$"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import DistutilsSetupError
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if not isinstance(build_info, dict):
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
|
ojengwa/sympy
|
refs/heads/master
|
sympy/functions/combinatorial/tests/test_comb_numbers.py
|
2
|
import string
from sympy import (bernoulli, Symbol, symbols, Dummy, Sum, harmonic, Rational, oo,
zoo, pi, I, bell, fibonacci, lucas, euler, catalan, binomial, gamma, sqrt,
hyper, log, digamma, trigamma, polygamma, diff, Expr, sympify, expand_func,
EulerGamma, factorial)
from sympy.utilities.pytest import XFAIL, raises
x = Symbol('x')
def test_bernoulli():
assert bernoulli(0) == 1
assert bernoulli(1) == Rational(-1, 2)
assert bernoulli(2) == Rational(1, 6)
assert bernoulli(3) == 0
assert bernoulli(4) == Rational(-1, 30)
assert bernoulli(5) == 0
assert bernoulli(6) == Rational(1, 42)
assert bernoulli(7) == 0
assert bernoulli(8) == Rational(-1, 30)
assert bernoulli(10) == Rational(5, 66)
assert bernoulli(1000001) == 0
assert bernoulli(0, x) == 1
assert bernoulli(1, x) == x - Rational(1, 2)
assert bernoulli(2, x) == x**2 - x + Rational(1, 6)
assert bernoulli(3, x) == x**3 - (3*x**2)/2 + x/2
# Should be fast; computed with mpmath
b = bernoulli(1000)
assert b.p % 10**10 == 7950421099
assert b.q == 342999030
b = bernoulli(10**6, evaluate=False).evalf()
assert str(b) == '-2.23799235765713e+4767529'
def test_fibonacci():
assert [fibonacci(n) for n in range(-3, 5)] == [2, -1, 1, 0, 1, 1, 2, 3]
assert fibonacci(100) == 354224848179261915075
assert [lucas(n) for n in range(-3, 5)] == [-4, 3, -1, 2, 1, 3, 4, 7]
assert lucas(100) == 792070839848372253127
assert fibonacci(1, x) == 1
assert fibonacci(2, x) == x
assert fibonacci(3, x) == x**2 + 1
assert fibonacci(4, x) == x**3 + 2*x
def test_bell():
assert [bell(n) for n in range(8)] == [1, 1, 2, 5, 15, 52, 203, 877]
assert bell(0, x) == 1
assert bell(1, x) == x
assert bell(2, x) == x**2 + x
assert bell(5, x) == x**5 + 10*x**4 + 25*x**3 + 15*x**2 + x
X = symbols('x:6')
# X = (x0, x1, .. x5)
# at the same time: X[1] = x1, X[2] = x2 for standard readablity.
# but we must supply zero-based indexed object X[1:] = (x1, .. x5)
assert bell(6, 2, X[1:]) == 6*X[5]*X[1] + 15*X[4]*X[2] + 10*X[3]**2
assert bell(
6, 3, X[1:]) == 15*X[4]*X[1]**2 + 60*X[3]*X[2]*X[1] + 15*X[2]**3
X = (1, 10, 100, 1000, 10000)
assert bell(6, 2, X) == (6 + 15 + 10)*10000
X = (1, 2, 3, 3, 5)
assert bell(6, 2, X) == 6*5 + 15*3*2 + 10*3**2
X = (1, 2, 3, 5)
assert bell(6, 3, X) == 15*5 + 60*3*2 + 15*2**3
def test_harmonic():
assert harmonic(1, 1) == 1
assert harmonic(2, 1) == Rational(3, 2)
assert harmonic(3, 1) == Rational(11, 6)
assert harmonic(4, 1) == Rational(25, 12)
assert harmonic(3, 1) == harmonic(3)
assert harmonic(3, 5) == 1 + Rational(1, 2**5) + Rational(1, 3**5)
assert harmonic(10, 0) == 10
assert harmonic(oo, 1) == zoo
assert harmonic(oo, 2) == (pi**2)/6
def replace_dummy(expr, sym):
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_harmonic_rewrite_sum():
n = Symbol("n")
m = Symbol("m")
_k = Dummy("k")
assert replace_dummy(harmonic(n).rewrite(Sum), _k) == Sum(1/_k, (_k, 1, n))
assert replace_dummy(harmonic(n, m).rewrite(Sum), _k) == Sum(_k**(-m), (_k, 1, n))
@XFAIL
def test_harmonic_rewrite_sum_fail():
n = Symbol("n")
m = Symbol("m")
assert harmonic(n).rewrite(digamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(trigamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(polygamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n,3).rewrite(polygamma) == polygamma(2, n + 1)/2 - polygamma(2, 1)/2
assert harmonic(n,m).rewrite(polygamma) == (-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
assert expand_func(harmonic(n+4)) == harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
assert expand_func(harmonic(n-4)) == harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
assert harmonic(n, m).rewrite("tractable") == harmonic(n, m).rewrite(polygamma)
_k = Dummy("k")
assert harmonic(n).rewrite(Sum) == Sum(1/_k, (_k, 1, n))
assert harmonic(n, m).rewrite(Sum) == Sum(_k**(-m), (_k, 1, n))
def test_euler():
assert euler(0) == 1
assert euler(1) == 0
assert euler(2) == -1
assert euler(3) == 0
assert euler(4) == 5
assert euler(6) == -61
assert euler(8) == 1385
assert euler(20, evaluate=False) != 370371188237525
n = Symbol('n', integer=True)
assert euler(n) != -1
assert euler(n).subs(n, 2) == -1
assert euler(20).evalf() == 370371188237525.0
assert euler(20, evaluate=False).evalf() == 370371188237525.0
assert euler(n).rewrite(Sum) == euler(n)
# XXX: Not sure what the guy who wrote this test was trying to do with the _j and _k stuff
assert euler(2*n + 1).rewrite(Sum) == 0
@XFAIL
def test_euler_failing():
# depends on dummy variables being implemented https://github.com/sympy/sympy/issues/5665
assert euler(2*n).rewrite(Sum) == I*Sum(Sum((-1)**_j*2**(-_k)*I**(-_k)*(-2*_j + _k)**(2*n + 1)*binomial(_k, _j)/_k, (_j, 0, _k)), (_k, 1, 2*n + 1))
def test_catalan():
assert catalan(1) == 1
assert catalan(2) == 2
assert catalan(3) == 5
assert catalan(4) == 14
assert catalan(x) == catalan(x)
assert catalan(2*x).rewrite(binomial) == binomial(4*x, 2*x)/(2*x + 1)
assert catalan(Rational(1, 2)).rewrite(gamma) == 8/(3*pi)
assert catalan(3*x).rewrite(gamma) == 4**(
3*x)*gamma(3*x + Rational(1, 2))/(sqrt(pi)*gamma(3*x + 2))
assert catalan(x).rewrite(hyper) == hyper((-x + 1, -x), (2,), 1)
assert diff(catalan(x), x) == (polygamma(
0, x + Rational(1, 2)) - polygamma(0, x + 2) + log(4))*catalan(x)
c = catalan(0.5).evalf()
assert str(c) == '0.848826363156775'
def test_nC_nP_nT():
from sympy.utilities.iterables import (
multiset_permutations, multiset_combinations, multiset_partitions,
partitions, subsets, permutations)
from sympy.functions.combinatorial.numbers import (
nP, nC, nT, stirling, _multiset_histogram, _AOP_product)
from sympy.combinatorics.permutations import Permutation
from sympy.core.numbers import oo
from random import choice
c = string.ascii_lowercase
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nP(s, i)
tot += check
assert len(list(multiset_permutations(s, i))) == check
if u:
assert nP(len(s), i) == check
assert nP(s) == tot
except AssertionError:
print(s, i, 'failed perm test')
raise ValueError()
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nC(s, i)
tot += check
assert len(list(multiset_combinations(s, i))) == check
if u:
assert nC(len(s), i) == check
assert nC(s) == tot
if u:
assert nC(len(s)) == tot
except AssertionError:
print(s, i, 'failed combo test')
raise ValueError()
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(i, j)
tot += check
assert sum(1 for p in partitions(i, j, size=True) if p[0] == j) == check
assert nT(i) == tot
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(range(i), j)
tot += check
assert len(list(multiset_partitions(range(i), j))) == check
assert nT(range(i)) == tot
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(1, 8):
check = nT(s, i)
tot += check
assert len(list(multiset_partitions(s, i))) == check
if u:
assert nT(range(len(s)), i) == check
if u:
assert nT(range(len(s))) == tot
assert nT(s) == tot
except AssertionError:
print(s, i, 'failed partition test')
raise ValueError()
# tests for Stirling numbers of the first kind that are not tested in the
# above
assert [stirling(9, i, kind=1) for i in range(11)] == [
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
perms = list(permutations(range(4)))
assert [sum(1 for p in perms if Permutation(p).cycles == i)
for i in range(5)] == [0, 6, 11, 6, 1] == [
stirling(4, i, kind=1) for i in range(5)]
# http://oeis.org/A008275
assert [stirling(n, k, signed=1)
for n in range(10) for k in range(1, n + 1)] == [
1, -1,
1, 2, -3,
1, -6, 11, -6,
1, 24, -50, 35, -10,
1, -120, 274, -225, 85, -15,
1, 720, -1764, 1624, -735, 175, -21,
1, -5040, 13068, -13132, 6769, -1960, 322, -28,
1, 40320, -109584, 118124, -67284, 22449, -4536, 546, -36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
assert [stirling(n, k, kind=1)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 2, 3, 1,
0, 6, 11, 6, 1,
0, 24, 50, 35, 10, 1,
0, 120, 274, 225, 85, 15, 1,
0, 720, 1764, 1624, 735, 175, 21, 1,
0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1,
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
assert [stirling(n, k, kind=2)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 1, 3, 1,
0, 1, 7, 6, 1,
0, 1, 15, 25, 10, 1,
0, 1, 31, 90, 65, 15, 1,
0, 1, 63, 301, 350, 140, 21, 1,
0, 1, 127, 966, 1701, 1050, 266, 28, 1,
0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1]
assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
raises(ValueError, lambda: stirling(-2, 2))
def delta(p):
if len(p) == 1:
return oo
return min(abs(i[0] - i[1]) for i in subsets(p, 2))
parts = multiset_partitions(range(5), 3)
d = 2
assert (sum(1 for p in parts if all(delta(i) >= d for i in p)) ==
stirling(5, 3, d=d) == 7)
# other coverage tests
assert nC('abb', 2) == nC('aab', 2) == 2
assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
assert nP(3, 4) == 0
assert nP('aabc', 5) == 0
assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
len(list(multiset_combinations('aabbccdd', 2))) == 10
assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
assert nC(list('abcdd'), 4) == 4
assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
assert nC('aabb'*3, 3) == 4 # aaa, bbb, abb, baa
assert dict(_AOP_product((4,1,1,1))) == {
0: 1, 1: 4, 2: 7, 3: 8, 4: 8, 5: 7, 6: 4, 7: 1}
# the following was the first t that showed a problem in a previous form of
# the function, so it's not as random as it may appear
t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
raises(ValueError, lambda: _multiset_histogram({1:'a'}))
|
NeuralEnsemble/neuroConstruct
|
refs/heads/master
|
lib/jython/Lib/test/test_future1.py
|
432
|
"""This is a test"""
# Import the name nested_scopes twice to trigger SF bug #407394 (regression).
from __future__ import nested_scopes, nested_scopes
def f(x):
def g(y):
return x + y
return g
result = f(2)(4)
|
abessifi/pytd
|
refs/heads/master
|
pytd/compat.py
|
84
|
# -*- coding: utf-8 -*-
"""Python 2/3 compatibility module."""
import sys
PY2 = int(sys.version[0]) == 2
if PY2:
text_type = unicode
binary_type = str
string_types = (str, unicode)
unicode = unicode
basestring = basestring
else:
text_type = str
binary_type = bytes
string_types = (str,)
unicode = str
basestring = (str, bytes)
|
jarzofjam/tenshi
|
refs/heads/master
|
tools/split_lbr.py
|
11
|
#!/usr/bin/env python
# Licensed to Pioneers in Engineering under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Pioneers in Engineering licenses
# this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import os
import sys
import shutil
import xml.dom.minidom
import re
def find_only_subnode(element, subnode_name):
ret = element.getElementsByTagName(subnode_name)
assert len(ret) == 1, "Found more or less than 1 %s node" % subnode_name
return ret[0]
def filter_filenames(in_name, outdir, extension):
new_name = re.sub("[^0-9A-Za-z_\-]", "-", in_name)
filename = "%s/%s.%s" % (outdir, new_name, extension)
if not os.path.exists(filename):
return filename
extra_num = 0
filename = "%s/%s.%s" % (outdir, new_name + str(extra_num), extension)
while os.path.exists(filename):
extra_num = extra_num + 1
filename = "%s/%s.%s" % (outdir, new_name + str(extra_num), extension)
return filename
def write_out_subnode(node, outdir, extension):
name = node.getAttribute("name")
filename = filter_filenames(name, outdir, extension)
assert not os.path.exists(filename), (
"The output file %s already exists!" % filename)
f = open(filename, "w")
f.write(node.toxml("utf-8"))
f.close()
def main():
if len(sys.argv) != 3:
print("Usage: %s file.lbr outdir" % (sys.argv[0]))
sys.exit(1)
lbrfile = sys.argv[1]
outdir = sys.argv[2]
# parse lbr
lbr = xml.dom.minidom.parse(lbrfile)
libraryNode = find_only_subnode(lbr, "library")
# find the main stuff
packagesNode = find_only_subnode(libraryNode, "packages")
symbolsNode = find_only_subnode(libraryNode, "symbols")
devicesetsNode = find_only_subnode(libraryNode, "devicesets")
# FIXME: may be dangerous
shutil.rmtree(outdir, ignore_errors=True)
os.mkdir(outdir)
# remove all the packages, symbols, devicesets
packagesNode.parentNode.removeChild(packagesNode)
symbolsNode.parentNode.removeChild(symbolsNode)
devicesetsNode.parentNode.removeChild(devicesetsNode)
# write out the remainder (the "metadata")
f = open("%s/meta.xml" % outdir, "w")
lbr.writexml(f)
f.close()
# write out all the "main stuff"
for n in packagesNode.childNodes:
if n.nodeType == xml.dom.Node.TEXT_NODE:
assert n.data.strip() == "", (
"Text node with some data not understood (%s)" % n.data)
else:
assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (
"Unknown node type (%s)" % n.nodeType)
write_out_subnode(n, outdir, "pac")
for n in symbolsNode.childNodes:
if n.nodeType == xml.dom.Node.TEXT_NODE:
assert n.data.strip() == "", (
"Text node with some data not understood (%s)" % n.data)
else:
assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (
"Unknown node type (%s)" % n.nodeType)
write_out_subnode(n, outdir, "sym")
for n in devicesetsNode.childNodes:
if n.nodeType == xml.dom.Node.TEXT_NODE:
assert n.data.strip() == "", (
"Text node with some data not understood (%s)" % n.data)
else:
assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (
"Unknown node type (%s)" % n.nodeType)
write_out_subnode(n, outdir, "dev")
if __name__ == '__main__':
main()
|
skerit/romcollectionbrowser
|
refs/heads/master
|
resources/lib/configxmlwriter.py
|
10
|
import os
import util
from util import *
import config
from config import *
from xml.etree.ElementTree import *
class ConfigXmlWriter:
def __init__(self, createNew):
Logutil.log('init ConfigXmlWriter', util.LOG_LEVEL_INFO)
self.createNew = createNew
if(createNew):
configFile = os.path.join(util.getAddonInstallPath(), 'resources', 'database', 'config_template.xml')
else:
configFile = util.getConfigXmlPath()
if(not os.path.isfile(configFile)):
Logutil.log('File config.xml does not exist. Place a valid config file here: ' +str(configFile), util.LOG_LEVEL_ERROR)
return False, util.localize(32003)
self.tree = ElementTree().parse(configFile)
def writeRomCollections(self, romCollections, isEdit):
Logutil.log('write Rom Collections', util.LOG_LEVEL_INFO)
romCollectionsXml = self.tree.find('RomCollections')
#HACK: remove all Rom Collections and create new
if(isEdit):
for romCollectionXml in romCollectionsXml.findall('RomCollection'):
romCollectionsXml.remove(romCollectionXml)
for romCollection in romCollections.values():
Logutil.log('write Rom Collection: ' +str(romCollection.name), util.LOG_LEVEL_INFO)
romCollectionXml = SubElement(romCollectionsXml, 'RomCollection', {'id' : str(romCollection.id), 'name' : romCollection.name})
SubElement(romCollectionXml, 'useBuiltinEmulator').text = str(romCollection.useBuiltinEmulator)
SubElement(romCollectionXml, 'gameclient').text = romCollection.gameclient
SubElement(romCollectionXml, 'emulatorCmd').text = romCollection.emulatorCmd
SubElement(romCollectionXml, 'emulatorParams').text = romCollection.emulatorParams
for romPath in romCollection.romPaths:
SubElement(romCollectionXml, 'romPath').text = romPath
SubElement(romCollectionXml, 'saveStatePath').text = romCollection.saveStatePath
SubElement(romCollectionXml, 'saveStateParams').text = romCollection.saveStateParams
for mediaPath in romCollection.mediaPaths:
success, message = self.searchConfigObjects('FileTypes/FileType', mediaPath.fileType.name, 'FileType')
if(not success):
return False, message
SubElement(romCollectionXml, 'mediaPath', {'type' : mediaPath.fileType.name}).text = mediaPath.path
SubElement(romCollectionXml, 'preCmd').text = romCollection.preCmd
SubElement(romCollectionXml, 'postCmd').text = romCollection.postCmd
SubElement(romCollectionXml, 'useEmuSolo').text = str(romCollection.useEmuSolo)
SubElement(romCollectionXml, 'usePopen').text = str(romCollection.usePopen)
SubElement(romCollectionXml, 'ignoreOnScan').text = str(romCollection.ignoreOnScan)
SubElement(romCollectionXml, 'allowUpdate').text = str(romCollection.allowUpdate)
SubElement(romCollectionXml, 'autoplayVideoMain').text = str(romCollection.autoplayVideoMain)
SubElement(romCollectionXml, 'autoplayVideoInfo').text = str(romCollection.autoplayVideoInfo)
SubElement(romCollectionXml, 'useFoldernameAsGamename').text = str(romCollection.useFoldernameAsGamename)
SubElement(romCollectionXml, 'maxFolderDepth').text = str(romCollection.maxFolderDepth)
SubElement(romCollectionXml, 'doNotExtractZipFiles').text = str(romCollection.doNotExtractZipFiles)
SubElement(romCollectionXml, 'makeLocalCopy').text = str(romCollection.makeLocalCopy)
SubElement(romCollectionXml, 'diskPrefix').text = str(romCollection.diskPrefix)
if (os.environ.get( "OS", "xbox" ) == "xbox"):
SubElement(romCollectionXml, 'xboxCreateShortcut').text = str(romCollection.xboxCreateShortcut)
SubElement(romCollectionXml, 'xboxCreateShortcutAddRomfile').text = str(romCollection.xboxCreateShortcutAddRomfile)
SubElement(romCollectionXml, 'xboxCreateShortcutUseShortGamename').text = str(romCollection.xboxCreateShortcutUseShortGamename)
#image placing
if(not self.createNew):
#in case of an update we have to create new options
if(romCollection.name == 'MAME' and not self.createNew):
self.addFileTypesForMame()
self.addImagePlacingForMame()
if(romCollection.imagePlacingMain != None and romCollection.imagePlacingMain.name != ''):
success, message = self.searchConfigObjects('ImagePlacing/fileTypeFor', romCollection.imagePlacingMain.name, 'ImagePlacing')
if(not success):
return False, message
SubElement(romCollectionXml, 'imagePlacingMain').text = romCollection.imagePlacingMain.name
else:
SubElement(romCollectionXml, 'imagePlacingMain').text = 'gameinfobig'
if(romCollection.imagePlacingInfo != None and romCollection.imagePlacingInfo.name != ''):
success, message = self.searchConfigObjects('ImagePlacing/fileTypeFor', romCollection.imagePlacingInfo.name, 'ImagePlacing')
if(not success):
return False, message
SubElement(romCollectionXml, 'imagePlacingInfo').text = romCollection.imagePlacingInfo.name
else:
SubElement(romCollectionXml, 'imagePlacingInfo').text = 'gameinfosmall'
if(romCollection.scraperSites == None or len(romCollection.scraperSites) == 0):
SubElement(romCollectionXml, 'scraper', {'name' : 'thegamesdb.net', 'replaceKeyString' : '', 'replaceValueString' : ''})
SubElement(romCollectionXml, 'scraper', {'name' : 'archive.vg', 'replaceKeyString' : '', 'replaceValueString' : ''})
SubElement(romCollectionXml, 'scraper', {'name' : 'mobygames.com', 'replaceKeyString' : '', 'replaceValueString' : ''})
else:
for scraperSite in romCollection.scraperSites:
if(scraperSite == None):
continue
#HACK: use replaceKey and -Value only from first scraper
firstScraper = scraperSite.scrapers[0]
SubElement(romCollectionXml, 'scraper', {'name' : scraperSite.name, 'replaceKeyString' : firstScraper.replaceKeyString, 'replaceValueString' : firstScraper.replaceValueString})
#create Scraper element
scrapersXml = self.tree.find('Scrapers')
#check if the current scraper already exists
siteExists = False
sitesXml = scrapersXml.findall('Site')
for site in sitesXml:
name = site.attrib.get('name')
if name == scraperSite.name:
siteExists = True
break
if not siteExists:
#HACK: this only covers the first scraper (for offline scrapers)
site = SubElement(scrapersXml, 'Site',
{
'name' : scraperSite.name,
'descFilePerGame' : str(scraperSite.descFilePerGame),
'searchGameByCRC' : str(scraperSite.searchGameByCRC),
'useFoldernameAsCRC' : str(scraperSite.useFoldernameAsCRC),
'useFilenameAsCRC' : str(scraperSite.useFilenameAsCRC)
})
scraper = scraperSite.scrapers[0]
SubElement(site, 'Scraper',
{
'parseInstruction' : scraper.parseInstruction,
'source' : scraper.source,
'encoding' : scraper.encoding
})
success, message = self.writeFile()
return success, message
def writeScrapers(self, scrapers):
Logutil.log('write scraper sites', util.LOG_LEVEL_INFO)
scraperSitesXml = self.tree.find('Scrapers')
#HACK: remove all scrapers and create new
for scraperSiteXml in scraperSitesXml.findall('Site'):
scraperSitesXml.remove(scraperSiteXml)
for scraperSite in scrapers.values():
Logutil.log('write scraper site: ' +str(scraperSite.name), util.LOG_LEVEL_INFO)
#Don't write None-Scraper
if(scraperSite.name == util.localize(32854)):
Logutil.log('None scraper will be skipped', util.LOG_LEVEL_INFO)
continue
scraperSiteXml = SubElement(scraperSitesXml, 'Site',
{
'name' : scraperSite.name,
'descFilePerGame' : str(scraperSite.descFilePerGame),
'searchGameByCRC' : str(scraperSite.searchGameByCRC),
'useFoldernameAsCRC' : str(scraperSite.useFoldernameAsCRC),
'useFilenameAsCRC' : str(scraperSite.useFilenameAsCRC)
})
for scraper in scraperSite.scrapers:
#check if we can use a relative path to parseInstructions
rcbScraperPath = os.path.join(util.RCBHOME, 'resources', 'scraper')
pathParts = os.path.split(scraper.parseInstruction)
if(pathParts[0].upper() == rcbScraperPath.upper()):
scraper.parseInstruction = pathParts[1]
scraperXml = SubElement(scraperSiteXml, 'Scraper',
{
'parseInstruction' : scraper.parseInstruction,
'source' : scraper.source,
'encoding' : scraper.encoding,
'returnUrl' : str(scraper.returnUrl)
})
success, message = self.writeFile()
return success, message
def writeMissingFilter(self, showHideOption, artworkOrGroup, artworkAndGroup, infoOrGroup, infoAndGroup):
Logutil.log('write Missing Info Filter', util.LOG_LEVEL_INFO)
missingFilterXml = self.tree.find('MissingFilter')
#HACK: remove MissingFilter-element
if(missingFilterXml != None):
self.tree.remove(missingFilterXml)
missingFilterXml = SubElement(self.tree, 'MissingFilter')
SubElement(missingFilterXml, 'showHideOption').text = showHideOption
if(len(artworkOrGroup) > 0 or len(artworkAndGroup) > 0):
missingArtworkXml = SubElement(missingFilterXml, 'missingArtworkFilter')
self.addMissingFilterItems(missingArtworkXml, artworkOrGroup, 'orGroup')
self.addMissingFilterItems(missingArtworkXml, artworkAndGroup, 'andGroup')
if(len(infoOrGroup) > 0 or len(infoAndGroup) > 0):
missingInfoXml = SubElement(missingFilterXml, 'missingInfoFilter')
self.addMissingFilterItems(missingInfoXml, infoOrGroup, 'orGroup')
self.addMissingFilterItems(missingInfoXml, infoAndGroup, 'andGroup')
success, message = self.writeFile()
return success, message
def addMissingFilterItems(self, missingXml, group, groupName):
if(len(group) > 0):
groupXml = SubElement(missingXml, groupName)
for item in group:
SubElement(groupXml, 'item').text = item
def searchConfigObjects(self, xPath, nameToCompare, objectType):
objects = self.tree.findall(xPath)
objectFound = False
for obj in objects:
objectName = obj.attrib.get('name')
if(objectName == nameToCompare):
objectFound = True
break
if(not objectFound):
return False, util.localize(32009) %(objectType, nameToCompare)
return True, ''
def removeRomCollection(self, RCName):
Logutil.log('removeRomCollection', util.LOG_LEVEL_INFO)
configFile = util.getConfigXmlPath()
self.tree = ElementTree().parse(configFile)
romCollectionsXml = self.tree.find('RomCollections')
for romCollectionXml in romCollectionsXml.findall('RomCollection'):
name = romCollectionXml.attrib.get('name')
if(name == RCName):
romCollectionsXml.remove(romCollectionXml)
success, message = self.writeFile()
return success, message
def addFileTypesForMame(self):
Logutil.log('addFileTypesForMame', util.LOG_LEVEL_INFO)
fileTypesXml = self.tree.find('FileTypes')
#check if the MAME FileTypes already exist
cabinetExists = False
marqueeExists = False
actionExists = False
titleExists = False
highestId = 0
fileTypeXml = fileTypesXml.findall('FileType')
for fileType in fileTypeXml:
name = fileType.attrib.get('name')
if name == 'cabinet':
cabinetExists = True
elif name == 'marquee':
marqueeExists = True
elif name == 'action':
actionExists = True
elif name == 'title':
titleExists = True
id = fileType.attrib.get('id')
if int(id) > highestId:
highestId = int(id)
if not cabinetExists:
self.createFileType(fileTypesXml, str(highestId +1), 'cabinet', 'image', 'game')
if not marqueeExists:
self.createFileType(fileTypesXml, str(highestId +2), 'marquee', 'image', 'game')
if not actionExists:
self.createFileType(fileTypesXml, str(highestId +3), 'action', 'image', 'game')
if not titleExists:
self.createFileType(fileTypesXml, str(highestId +4), 'title', 'image', 'game')
def createFileType(self, fileTypesXml, id, name, type, parent):
fileType = SubElement(fileTypesXml, 'FileType', {'id' : str(id), 'name' : name})
SubElement(fileType, 'type').text = type
SubElement(fileType, 'parent').text = parent
def addImagePlacingForMame(self):
Logutil.log('addImagePlacingForMame', util.LOG_LEVEL_INFO)
imagePlacingXml = self.tree.find('ImagePlacing')
#check if the MAME ImagePlacing options already exist
cabinetExists = False
marqueeExists = False
fileTypeForXml = imagePlacingXml.findall('fileTypeFor')
for fileTypeFor in fileTypeForXml:
name = fileTypeFor.attrib.get('name')
if name == 'gameinfomamecabinet':
cabinetExists = True
elif name == 'gameinfomamemarquee':
marqueeExists = True
if not cabinetExists:
fileTypeFor = SubElement(imagePlacingXml, 'fileTypeFor', {'name' : 'gameinfomamecabinet'})
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'cabinet'
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'title'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'cabinet'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'title'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'title'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'action'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoUpperLeft').text = 'title'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoUpperRight').text = 'action'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoLower').text = 'marquee'
if not marqueeExists:
fileTypeFor = SubElement(imagePlacingXml, 'fileTypeFor', {'name' : 'gameinfomamemarquee'})
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'marquee'
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForGameList').text = 'title'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'marquee'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForGameListSelected').text = 'title'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'boxfront'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'title'
SubElement(fileTypeFor, 'fileTypeForMainViewBackground').text = 'action'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoLeft').text = 'cabinet'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoUpperRight').text = 'action'
SubElement(fileTypeFor, 'fileTypeForMainViewGameInfoLowerRight').text = 'title'
def writeFile(self):
Logutil.log('writeFile', util.LOG_LEVEL_INFO)
#write file
try:
configFile = util.getConfigXmlPath()
util.indentXml(self.tree)
treeToWrite = ElementTree(self.tree)
treeToWrite.write(configFile)
return True, ""
except Exception, (exc):
print("Error: Cannot write config.xml: " +str(exc))
return False, util.localize(32008) +": " +str(exc)
|
teamtuga4/teamtuga4ever.repository
|
refs/heads/master
|
plugin.video.traquinas/resources/lib/sources/muchmovies_mv_tv.py
|
7
|
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse, json
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import client2
from resources.lib import resolvers
from resources.lib.libraries import control
class source:
def __init__(self):
self.base_link = 'http://123movies.to'
self.search_link = '/movie/search/%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote(title)
query = urlparse.urljoin(self.base_link, query)
result = client2.http_get(query)
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
r = [x for y,x in enumerate(r) if x not in r[:y]]
r = [i for i in r if title == cleantitle.movie(i[1])]
u = [i[0] for i in r][0]
url = urlparse.urljoin(self.base_link, u)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
control.log("@@@@@@@@@@@@@@@ URL %s" % url)
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]
query = self.search_link % urllib.quote(tvshowtitle)
query = urlparse.urljoin(self.base_link, query)
#result = client.source(query)
result = client2.http_get(query)
tvshowtitle = cleantitle.tv(tvshowtitle)
season = '%01d' % int(season)
episode = '%01d' % int(episode)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i for i in result if season == i[2]]
result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
result += '?S%02dE%02d' % (int(season), int(episode))
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
url = urlparse.urljoin(self.base_link, url) + '/watching.html'
result = client2.http_get(url)
movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
mtoken = client.parseDOM(result, 'div', ret='player-token', attrs = {'id': 'media-player'})[0]
try:
quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
except: quality = 'hd'
if quality == 'cam' or quality == 'ts': quality = 'CAM'
elif quality == 'hd': quality = 'HD'
else: quality = 'SD'
url = '/ajax/get_episodes/%s/%s' % (movie, mtoken)
url = urlparse.urljoin(self.base_link, url)
result = client2.http_get(url)
result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9a-fA-F]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
result = [(i[0], i[1], i[2], i[3]) for i in result]
if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]
links = [('/ajax/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]
for i in links:
url1 = urlparse.urljoin(self.base_link, i[0])
sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
links = []
links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'videomega') for i in result if i[0] == '13']
#links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']
for i in links:
url1 = urlparse.urljoin(self.base_link, i[0])
sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
return sources
except:
return sources
def resolve(self, url):
try:
headers = {'Referer': url}
url = urlparse.urljoin(self.base_link, url)
result = client.source(url, headers=headers)
if 'load_embed' in url:
result = json.loads(result)
result = resolvers.request(result['embed_url'])
return result
except:
pass
try:
url = re.compile('"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall(result)
url = [(int(i[1]), i[0]) for i in url]
url = sorted(url, key=lambda k: k[0])
url = url[-1][1]
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
pass
|
hfp/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/autoregressive.py
|
1
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Autoregressive distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Autoregressive(distribution_lib.Distribution):
"""Autoregressive distributions.
The Autoregressive distribution enables learning (often) richer multivariate
distributions by repeatedly applying a [diffeomorphic](
https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as
implemented by `Bijector`s). Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian." [(Papamakarios et
al., 2016)][1]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
Practically speaking the autoregressive property means that there exists a
permutation of the event coordinates such that each coordinate is a
diffeomorphic function of only preceding coordinates
[(van den Oord et al., 2016)][2].
#### Mathematical Details
The probability function is
```none
prob(x; fn, n) = fn(x).prob(x)
```
And a sample is generated by
```none
x = fn(...fn(fn(x0).sample()).sample()).sample()
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
constructs a `tfp.distributions.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
def normal_fn(self, event_size):
n = event_size * (event_size + 1) / 2
p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n))
affine = tfd.bijectors.Affine(
scale_tril=tfd.fill_triangular(0.25 * p))
def _fn(samples):
scale = math_ops.exp(affine.forward(samples)).eval()
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
batch_and_event_shape = [3, 2, 4]
sample0 = array_ops.zeros(batch_and_event_shape)
ar = autoregressive_lib.Autoregressive(
self._normal_fn(batch_and_event_shape[-1]), sample0)
x = ar.sample([6, 5])
# ==> x.shape = [6, 5, 3, 2, 4]
prob_x = ar.prob(x)
# ==> x.shape = [6, 5, 3, 2]
```
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
distribution_fn,
sample0=None,
num_steps=None,
validate_args=False,
allow_nan_stats=True,
name="Autoregressive"):
"""Construct an `Autoregressive` distribution.
Args:
distribution_fn: Python `callable` which constructs a
`tfp.distributions.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the "autoregressive property",
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of on preceding coordinates.
sample0: Initial input to `distribution_fn`; used to
build the distribution in `__init__` which in turn specifies this
distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`.
If unspecified, then `distribution_fn` should be default constructable.
num_steps: Number of times `distribution_fn` is composed from samples,
e.g., `num_steps=2` implies
`distribution_fn(distribution_fn(sample0).sample(n)).sample()`.
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: "Autoregressive".
Raises:
ValueError: if `num_steps` and
`distribution_fn(sample0).event_shape.num_elements()` are both `None`.
ValueError: if `num_steps < 1`.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._distribution_fn = distribution_fn
self._sample0 = sample0
self._distribution0 = (distribution_fn() if sample0 is None
else distribution_fn(sample0))
if num_steps is None:
num_steps = self._distribution0.event_shape.num_elements()
if num_steps is None:
raise ValueError("distribution_fn must generate a distribution "
"with fully known `event_shape`.")
if num_steps < 1:
raise ValueError("num_steps ({}) must be at least 1.".format(num_steps))
self._num_steps = num_steps
super(Autoregressive, self).__init__(
dtype=self._distribution0.dtype,
reparameterization_type=self._distribution0.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=self._distribution0._graph_parents, # pylint: disable=protected-access
name=name)
@property
def distribution_fn(self):
return self._distribution_fn
@property
def sample0(self):
return self._sample0
@property
def num_steps(self):
return self._num_steps
@property
def distribution0(self):
return self._distribution0
def _batch_shape(self):
return self.distribution0.batch_shape
def _batch_shape_tensor(self):
return self.distribution0.batch_shape_tensor()
def _event_shape(self):
return self.distribution0.event_shape
def _event_shape_tensor(self):
return self.distribution0.event_shape_tensor()
def _sample_n(self, n, seed=None):
if seed is None:
seed = distribution_util.gen_new_seed(
seed=np.random.randint(2**32 - 1),
salt="autoregressive")
samples = self.distribution0.sample(n, seed=seed)
for _ in range(self._num_steps):
samples = self.distribution_fn(samples).sample(seed=seed)
return samples
def _log_prob(self, value):
return self.distribution_fn(value).log_prob(value)
def _prob(self, value):
return self.distribution_fn(value).prob(value)
|
GSI/uzbl
|
refs/heads/next
|
tests/event-manager/testcookie.py
|
5
|
#!/usr/bin/env python
import sys
if '' not in sys.path:
sys.path.insert(0, '')
import unittest
from emtest import EventManagerMock
from uzbl.plugins.cookies import Cookies
from uzbl.plugins.config import Config
cookies = (
r'".nyan.cat" "/" "__utmb" "183192761.1.10.1313990640" "http" "1313992440"',
r'".twitter.com" "/" "guest_id" "v1%3A131399064036991891" "http" "1377104460"'
)
config = {
'cookies': {
'session.type': 'memory',
'global.type': 'memory'
}
}
class CookieFilterTest(unittest.TestCase):
def setUp(self):
self.event_manager = EventManagerMock((), (Cookies,),
plugin_config=config)
self.uzbl = self.event_manager.add()
self.other = self.event_manager.add()
def test_add_cookie(self):
c = Cookies[self.uzbl]
c.add_cookie(cookies[0])
self.other.send.assert_called_once_with(
'cookie add ' + cookies[0])
def test_whitelist_block(self):
c = Cookies[self.uzbl]
c.whitelist_cookie(r'domain "nyan\.cat$"')
c.add_cookie(cookies[1])
self.uzbl.send.assert_called_once_with(
'cookie delete ' + cookies[1])
def test_whitelist_accept(self):
c = Cookies[self.uzbl]
c.whitelist_cookie(r'domain "nyan\.cat$"')
c.add_cookie(cookies[0])
self.other.send.assert_called_once_with(
'cookie add ' + cookies[0])
def test_blacklist_block(self):
c = Cookies[self.uzbl]
c.blacklist_cookie(r'domain "twitter\.com$"')
c.add_cookie(cookies[1])
self.uzbl.send.assert_called_once_with(
'cookie delete ' + cookies[1])
def test_blacklist_accept(self):
c = Cookies[self.uzbl]
c.blacklist_cookie(r'domain "twitter\.com$"')
c.add_cookie(cookies[0])
self.other.send.assert_called_once_with(
'cookie add ' + cookies[0])
def test_filter_numeric(self):
c = Cookies[self.uzbl]
c.blacklist_cookie(r'0 "twitter\.com$"')
c.add_cookie(cookies[1])
self.uzbl.send.assert_called_once_with(
'cookie delete ' + cookies[1])
class PrivateCookieTest(unittest.TestCase):
def setUp(self):
self.event_manager = EventManagerMock(
(), (Cookies,),
(), ((Config, dict),),
config
)
self.priv = self.event_manager.add()
self.uzbl_a = self.event_manager.add()
self.uzbl_b = self.event_manager.add()
Config[self.priv]['enable_private'] = 1
def test_does_not_send_from_private_uzbl(self):
c = Cookies[self.priv]
c.add_cookie(cookies[0])
self.uzbl_a.send.assert_not_called()
self.uzbl_b.send.assert_not_called()
def test_does_not_send_to_private_uzbl(self):
c = Cookies[self.uzbl_a]
c.add_cookie(cookies[0])
self.priv.send.assert_not_called()
if __name__ == '__main__':
unittest.main()
|
jimsimon/sky_engine
|
refs/heads/master
|
tools/git/for-all-touched-files.py
|
68
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes the specified (quoted) command for all files modified
between the current git branch and the specified branch or commit.
The special token [[FILENAME]] (or whatever you choose using the -t
flag) is replaced with each of the filenames of new or modified files.
Deleted files are not included. Neither are untracked files.
Synopsis:
%prog [-b BRANCH] [-d] [-x EXTENSIONS|-c|-g] [-t TOKEN] QUOTED_COMMAND
Examples:
%prog -x gyp,gypi "tools/format_xml.py [[FILENAME]]"
%prog -c "tools/sort-headers.py [[FILENAME]]"
%prog -g "tools/sort_sources.py [[FILENAME]]"
%prog -t "~~BINGO~~" "echo I modified ~~BINGO~~"
"""
import optparse
import os
import subprocess
import sys
# List of C++-like source file extensions.
_CPP_EXTENSIONS = ('h', 'hh', 'hpp', 'c', 'cc', 'cpp', 'cxx', 'mm',)
# List of build file extensions.
_BUILD_EXTENSIONS = ('gyp', 'gypi', 'gn',)
def GitShell(args, ignore_return=False):
"""A shell invocation suitable for communicating with git. Returns
output as list of lines, raises exception on error.
"""
job = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = job.communicate()
if job.returncode != 0 and not ignore_return:
print out
raise Exception("Error %d running command %s" % (
job.returncode, args))
return out.split('\n')
def FilenamesFromGit(branch_name, extensions):
"""Provides a list of all new and modified files listed by [git diff
branch_name] where branch_name can be blank to get a diff of the
workspace.
Excludes deleted files.
If extensions is not an empty list, include only files with one of
the extensions on the list.
"""
lines = GitShell('git diff --stat=600,500 %s' % branch_name)
filenames = []
for line in lines:
line = line.lstrip()
# Avoid summary line, and files that have been deleted (no plus).
if line.find('|') != -1 and line.find('+') != -1:
filename = line.split()[0]
if filename:
filename = filename.rstrip()
ext = filename.rsplit('.')[-1]
if not extensions or ext in extensions:
filenames.append(filename)
return filenames
def ForAllTouchedFiles(branch_name, extensions, token, command):
"""For each new or modified file output by [git diff branch_name],
run command with token replaced with the filename. If extensions is
not empty, do this only for files with one of the extensions in that
list.
"""
filenames = FilenamesFromGit(branch_name, extensions)
for filename in filenames:
os.system(command.replace(token, filename))
def main():
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('-x', '--extensions', default='', dest='extensions',
help='Limits to files with given extensions '
'(comma-separated).')
parser.add_option('-c', '--cpp', default=False, action='store_true',
dest='cpp_only',
help='Runs your command only on C++-like source files.')
# -g stands for GYP and GN.
parser.add_option('-g', '--build', default=False, action='store_true',
dest='build_only',
help='Runs your command only on build files.')
parser.add_option('-t', '--token', default='[[FILENAME]]', dest='token',
help='Sets the token to be replaced for each file '
'in your command (default [[FILENAME]]).')
parser.add_option('-b', '--branch', default='origin/master', dest='branch',
help='Sets what to diff to (default origin/master). Set '
'to empty to diff workspace against HEAD.')
opts, args = parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
if opts.cpp_only and opts.build_only:
parser.error("--cpp and --build are mutually exclusive")
extensions = opts.extensions
if opts.cpp_only:
extensions = _CPP_EXTENSIONS
if opts.build_only:
extensions = _BUILD_EXTENSIONS
ForAllTouchedFiles(opts.branch, extensions, opts.token, args[0])
if __name__ == '__main__':
main()
|
mbohlool/client-python
|
refs/heads/master
|
kubernetes/test/test_v1beta1_ingress_tls.py
|
1
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_ingress_tls import V1beta1IngressTLS
class TestV1beta1IngressTLS(unittest.TestCase):
""" V1beta1IngressTLS unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1IngressTLS(self):
"""
Test V1beta1IngressTLS
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_ingress_tls.V1beta1IngressTLS()
pass
if __name__ == '__main__':
unittest.main()
|
google-code/android-scripting
|
refs/heads/master
|
python/src/Lib/test/test_cpickle.py
|
51
|
import cPickle, unittest
from cStringIO import StringIO
from test.pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import test_support
class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads
error = cPickle.BadPickleGet
module = cPickle
class cPicklePicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = cPickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleFastPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def test_recursive_inst(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_inst,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. Jack Jansen reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
class Node(object):
pass
class cPickleDeepRecursive(unittest.TestCase):
def test_issue2702(self):
# This should raise a RecursionLimit but in some
# platforms (FreeBSD, win32) sometimes raises KeyError instead,
# or just silently terminates the interpreter (=crashes).
nodes = [Node() for i in range(500)]
for n in nodes:
n.connections = list(nodes)
n.connections.remove(n)
self.assertRaises(RuntimeError, cPickle.dumps, n)
def test_issue3179(self):
# Safe test, because I broke this case when fixing the
# behaviour for the previous test.
res=[]
for x in range(1,2000):
res.append(dict(doc=x, similar=[]))
cPickle.dumps(res)
def test_main():
test_support.run_unittest(
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests,
cPickleDeepRecursive,
)
if __name__ == "__main__":
test_main()
|
MattDevo/edk2
|
refs/heads/master
|
AppPkg/Applications/Python/Python-2.7.2/Lib/json/tests/test_check_circular.py
|
17
|
from json.tests import PyTest, CTest
def default_iterable(obj):
return list(obj)
class TestCheckCircular(object):
def test_circular_dict(self):
dct = {}
dct['a'] = dct
self.assertRaises(ValueError, self.dumps, dct)
def test_circular_list(self):
lst = []
lst.append(lst)
self.assertRaises(ValueError, self.dumps, lst)
def test_circular_composite(self):
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
self.assertRaises(ValueError, self.dumps, dct2)
def test_circular_default(self):
self.dumps([set()], default=default_iterable)
self.assertRaises(TypeError, self.dumps, [set()])
def test_circular_off_default(self):
self.dumps([set()], default=default_iterable, check_circular=False)
self.assertRaises(TypeError, self.dumps, [set()], check_circular=False)
class TestPyCheckCircular(TestCheckCircular, PyTest): pass
class TestCCheckCircular(TestCheckCircular, CTest): pass
|
ghtmtt/QGIS
|
refs/heads/master
|
tests/src/python/test_qgslayoutaligner.py
|
41
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutAligner
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '3/10/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.PyQt import sip
from qgis.core import (QgsUnitTypes,
QgsLayout,
QgsLayoutAligner,
QgsLayoutItemPage,
QgsLayoutGuide,
QgsLayoutObject,
QgsProject,
QgsProperty,
QgsLayoutPageCollection,
QgsLayoutMeasurement,
QgsLayoutItemPicture,
QgsLayoutSize,
QgsLayoutPoint,
QgsLayoutItem)
from qgis.PyQt.QtCore import Qt, QCoreApplication, QEvent, QPointF, QRectF
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayoutAligner(unittest.TestCase):
def testAlign(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(6, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
item2.setReferencePoint(QgsLayoutItem.LowerMiddle)
l.addItem(item2)
# NOTE: item3 has measurement units specified in Centimeters, see below!
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
item3.setReferencePoint(QgsLayoutItem.UpperRight)
l.addItem(item3)
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignLeft)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(9, 19, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignHCenter)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(13, 19, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignRight)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(17, 19, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignTop)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(17, 17, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignVCenter)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 10, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(17, 20.5, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.alignItems(l, [item1, item2, item3], QgsLayoutAligner.AlignBottom)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(17, 24, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(2.2, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
def testDistribute(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(7, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
item2.setReferencePoint(QgsLayoutItem.LowerMiddle)
l.addItem(item2)
# NOTE: item3 has measurement units specified in Centimeters, see below!
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
item3.setReferencePoint(QgsLayoutItem.UpperRight)
l.addItem(item3)
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeLeft)
self.assertAlmostEqual(item1.positionWithUnits().x(), 4.0, 3)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 11.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 2.6, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeHCenter)
self.assertAlmostEqual(item1.positionWithUnits().x(), 5.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 11.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 2.6, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeRight)
self.assertAlmostEqual(item1.positionWithUnits().x(), 3.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 11.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 2.6, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeTop)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 19.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeVCenter)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 21.5, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeBottom)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 24.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeHSpace)
self.assertAlmostEqual(item1.positionWithUnits().x(), 3.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 14.5, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 2.6, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
QgsLayoutAligner.distributeItems(l, [item1, item2, item3], QgsLayoutAligner.DistributeVSpace)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 28.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.15, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
def testResize(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(7, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
item2.setReferencePoint(QgsLayoutItem.LowerMiddle)
l.addItem(item2)
# NOTE: item3 has measurement units specified in Centimeters, see below!
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
item3.setReferencePoint(QgsLayoutItem.UpperRight)
l.addItem(item3)
QgsLayoutAligner.resizeItems(l, [item1, item2, item3], QgsLayoutAligner.ResizeNarrowest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(10, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.0, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
QgsLayoutAligner.resizeItems(l, [item1, item2, item3], QgsLayoutAligner.ResizeWidest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(18, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
QgsLayoutAligner.resizeItems(l, [item1, item2, item3], QgsLayoutAligner.ResizeShortest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 0.9, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
QgsLayoutAligner.resizeItems(l, [item1, item2, item3], QgsLayoutAligner.ResizeTallest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 16, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 16, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
item2.attemptResize(QgsLayoutSize(10, 19, QgsUnitTypes.LayoutMillimeters))
QgsLayoutAligner.resizeItems(l, [item1, item2, item3], QgsLayoutAligner.ResizeToSquare)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 18, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(19, 19, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.8, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
QgsLayoutAligner.resizeItems(l, [item1], QgsLayoutAligner.ResizeToSquare)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 18, QgsUnitTypes.LayoutMillimeters))
if __name__ == '__main__':
unittest.main()
|
TathagataChakraborti/resource-conflicts
|
refs/heads/master
|
PLANROB-2015/py2.5/lib/python2.5/encodings/mac_cyrillic.py
|
593
|
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
tensorflow/autograph
|
refs/heads/master
|
reference_tests/assertion_test.py
|
1
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic assertions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import reference_test_base
import tensorflow.compat.v1 as tf
def simple_assertion(x):
assert x > 0
return x
class ReferenceTest(reference_test_base.TestCase):
def test_basic(self):
self.assertNativeMatchesCompiled(simple_assertion, 1)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.try_execute_compiled(simple_assertion, 0)
if __name__ == '__main__':
tf.test.main()
|
suneeth51/neutron
|
refs/heads/master
|
neutron/tests/tempest/common/tempest_fixtures.py
|
102
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency.fixture import lockutils
class LockFixture(lockutils.LockFixture):
def __init__(self, name):
super(LockFixture, self).__init__(name, 'tempest-')
|
espressopp/espressopp
|
refs/heads/master
|
testsuite/cap_force/testCapForce.py
|
1
|
# Copyright (C) 2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import math
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestCaseCapForce(unittest.TestCase):
def setUp(self):
# set up system
system = espressopp.System()
rng = espressopp.esutil.RNG()
rng.seed(1)
system.rng = rng
L = 10
box = (L, L, L)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
self.system = system
self.L = L
self.box = box
def test_cap_force(self):
# set up normal domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(self.box, nodeGrid, 1.5, 0.3)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
# add some particles (normal, coarse-grained particles only)
particle_list = [
(1, 0, 0, espressopp.Real3D(4.95, 5.0, 5.0), 1.0, 0, 1.),
(2, 0, 0, espressopp.Real3D(5.05, 5.0, 5.0), 1.0, 0, 1.),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat', 'radius')
self.system.storage.decompose()
# integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.005
# Lennard-Jones with Verlet list
rc_lj = pow(2.0, 1.0/6.0)
vl = espressopp.VerletList(self.system, cutoff = rc_lj)
potLJ = espressopp.interaction.LennardJones(epsilon=1., sigma=1., cutoff=rc_lj, shift=0)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
self.system.addInteraction(interLJ)
# create a CapForce instance
capforce = espressopp.integrator.CapForce(self.system, 1.0)
integrator.addExtension(capforce)
# run 1 step
integrator.run(1)
particle1 = self.system.storage.getParticle(1)
particle2 = self.system.storage.getParticle(2)
print(particle1.f, particle2.f)
# run checks
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 1 is not capped.")
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 2 is not capped.")
def test_cap_force_array(self):
# set up normal domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(self.box, nodeGrid, 1.5, 0.3)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
# add some particles (normal, coarse-grained particles only)
particle_list = [
(1, 0, 0, espressopp.Real3D(4.95, 5.0, 5.0), 1.0, 0, 1.),
(2, 0, 0, espressopp.Real3D(5.05, 5.0, 5.0), 1.0, 0, 1.),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat', 'radius')
self.system.storage.decompose()
# integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.005
# Lennard-Jones with Verlet list
rc_lj = pow(2.0, 1.0/6.0)
vl = espressopp.VerletList(self.system, cutoff = rc_lj)
potLJ = espressopp.interaction.LennardJones(epsilon=1., sigma=1., cutoff=rc_lj, shift=0)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
self.system.addInteraction(interLJ)
# create a CapForce instance
capforce = espressopp.integrator.CapForce(self.system, espressopp.Real3D(1.0, 1.0, 1.0))
integrator.addExtension(capforce)
# run 1 step
integrator.run(1)
particle1 = self.system.storage.getParticle(1)
particle2 = self.system.storage.getParticle(2)
print(particle1.f, particle2.f)
# run checks
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 1 is not capped.")
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 2 is not capped.")
def test_cap_force_group(self):
# set up normal domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(self.box, nodeGrid, 1.5, 0.3)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
# add some particles (normal, coarse-grained particles only)
particle_list = [
(1, 0, 0, espressopp.Real3D(4.95, 5.0, 5.0), 1.0, 0, 1.),
(2, 0, 0, espressopp.Real3D(5.05, 5.0, 5.0), 1.0, 0, 1.),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat', 'radius')
self.system.storage.decompose()
# integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.005
# Lennard-Jones with Verlet list
rc_lj = pow(2.0, 1.0/6.0)
vl = espressopp.VerletList(self.system, cutoff = rc_lj)
potLJ = espressopp.interaction.LennardJones(epsilon=1., sigma=1., cutoff=rc_lj, shift=0)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
self.system.addInteraction(interLJ)
# create a ParticleGroup instance
particle_group = espressopp.ParticleGroup(self.system.storage)
particle_group.add(1)
# create a CapForce instance
capforce = espressopp.integrator.CapForce(self.system, 1.0, particle_group)
integrator.addExtension(capforce)
# run 1 step
integrator.run(1)
particle1 = self.system.storage.getParticle(1)
particle2 = self.system.storage.getParticle(2)
print(particle1.f, particle2.f)
# run checks
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 1 is not capped.")
self.assertTrue(math.fabs(particle2.f[0]) > 1.0, "The force of particle 2 is capped.")
def test_cap_force_array_group(self):
# set up normal domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(self.box, nodeGrid, 1.5, 0.3)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
# add some particles (normal, coarse-grained particles only)
particle_list = [
(1, 0, 0, espressopp.Real3D(4.95, 5.0, 5.0), 1.0, 0, 1.),
(2, 0, 0, espressopp.Real3D(5.05, 5.0, 5.0), 1.0, 0, 1.),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat', 'radius')
self.system.storage.decompose()
# integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.005
# Lennard-Jones with Verlet list
rc_lj = pow(2.0, 1.0/6.0)
vl = espressopp.VerletList(self.system, cutoff = rc_lj)
potLJ = espressopp.interaction.LennardJones(epsilon=1., sigma=1., cutoff=rc_lj, shift=0)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
self.system.addInteraction(interLJ)
# create a ParticleGroup instance
particle_group = espressopp.ParticleGroup(self.system.storage)
particle_group.add(1)
# create a CapForce instance
capforce = espressopp.integrator.CapForce(self.system, espressopp.Real3D(1.0, 1.0, 1.0), particle_group)
integrator.addExtension(capforce)
# run 1 step
integrator.run(1)
particle1 = self.system.storage.getParticle(1)
particle2 = self.system.storage.getParticle(2)
print(particle1.f, particle2.f)
# run checks
self.assertTrue(math.fabs(particle1.f[0]) == 1.0, "The force of particle 1 is not capped.")
self.assertTrue(math.fabs(particle2.f[0]) > 1.0, "The force of particle 2 is capped.")
if __name__ == '__main__':
unittest.main()
|
johankaito/fufuka
|
refs/heads/master
|
microblog/old-flask/lib/python2.7/site-packages/sqlalchemy/connectors/mxodbc.py
|
80
|
# connectors/mxodbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide an SQLALchemy connector for the eGenix mxODBC commercial
Python adapter for ODBC. This is not a free product, but eGenix
provides SQLAlchemy with a license for use in continuous integration
testing.
This has been tested for use with mxODBC 3.1.2 on SQL Server 2005
and 2008, using the SQL Server Native driver. However, it is
possible for this to be used on other database platforms.
For more info on mxODBC, see http://www.egenix.com/
"""
import sys
import re
import warnings
from . import Connector
class MxODBCConnector(Connector):
driver = 'mxodbc'
supports_sane_multi_rowcount = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_native_decimal = True
@classmethod
def dbapi(cls):
# this classmethod will normally be replaced by an instance
# attribute of the same name, so this is normally only called once.
cls._load_mx_exceptions()
platform = sys.platform
if platform == 'win32':
from mx.ODBC import Windows as module
# this can be the string "linux2", and possibly others
elif 'linux' in platform:
from mx.ODBC import unixODBC as module
elif platform == 'darwin':
from mx.ODBC import iODBC as module
else:
raise ImportError("Unrecognized platform for mxODBC import")
return module
@classmethod
def _load_mx_exceptions(cls):
""" Import mxODBC exception classes into the module namespace,
as if they had been imported normally. This is done here
to avoid requiring all SQLAlchemy users to install mxODBC.
"""
global InterfaceError, ProgrammingError
from mx.ODBC import InterfaceError
from mx.ODBC import ProgrammingError
def on_connect(self):
def connect(conn):
conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
conn.errorhandler = self._error_handler()
return connect
def _error_handler(self):
""" Return a handler that adjusts mxODBC's raised Warnings to
emit Python standard warnings.
"""
from mx.ODBC.Error import Warning as MxOdbcWarning
def error_handler(connection, cursor, errorclass, errorvalue):
if issubclass(errorclass, MxOdbcWarning):
errorclass.__bases__ = (Warning,)
warnings.warn(message=str(errorvalue),
category=errorclass,
stacklevel=2)
else:
raise errorclass(errorvalue)
return error_handler
def create_connect_args(self, url):
""" Return a tuple of *args,**kwargs for creating a connection.
The mxODBC 3.x connection constructor looks like this:
connect(dsn, user='', password='',
clear_auto_commit=1, errorhandler=None)
This method translates the values in the provided uri
into args and kwargs needed to instantiate an mxODBC Connection.
The arg 'errorhandler' is not used by SQLAlchemy and will
not be populated.
"""
opts = url.translate_connect_args(username='user')
opts.update(url.query)
args = opts.pop('host')
opts.pop('port', None)
opts.pop('database', None)
return (args,), opts
def is_disconnect(self, e, connection, cursor):
# TODO: eGenix recommends checking connection.closed here
# Does that detect dropped connections ?
if isinstance(e, self.dbapi.ProgrammingError):
return "connection already closed" in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def _get_server_version_info(self, connection):
# eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
# 18 == pyodbc.SQL_DBMS_VER
for n in r.split(dbapi_con.getinfo(18)[1]):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _get_direct(self, context):
if context:
native_odbc_execute = context.execution_options.\
get('native_odbc_execute', 'auto')
# default to direct=True in all cases, is more generally
# compatible especially with SQL Server
return False if native_odbc_execute is True else True
else:
return True
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(
statement, parameters, direct=self._get_direct(context))
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters, direct=self._get_direct(context))
|
DigitalGlobe/gbdxtools
|
refs/heads/master
|
gbdxtools/images/exceptions/driver.py
|
1
|
class DriverConfigurationError(AttributeError):
pass
class UnsupportedImageProduct(KeyError):
pass
|
CollabQ/CollabQ
|
refs/heads/master
|
.google_appengine/lib/django/django/db/backends/ado_mssql/creation.py
|
32
|
DATA_TYPES = {
'AutoField': 'int IDENTITY (1, 1)',
'BooleanField': 'bit',
'CharField': 'varchar(%(maxlength)s)',
'CommaSeparatedIntegerField': 'varchar(%(maxlength)s)',
'DateField': 'smalldatetime',
'DateTimeField': 'smalldatetime',
'FileField': 'varchar(100)',
'FilePathField': 'varchar(100)',
'FloatField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'ImageField': 'varchar(100)',
'IntegerField': 'int',
'IPAddressField': 'char(15)',
'ManyToManyField': None,
'NullBooleanField': 'bit',
'OneToOneField': 'int',
'PhoneNumberField': 'varchar(20)',
'PositiveIntegerField': 'int CONSTRAINT [CK_int_pos_%(column)s] CHECK ([%(column)s] > 0)',
'PositiveSmallIntegerField': 'smallint CONSTRAINT [CK_smallint_pos_%(column)s] CHECK ([%(column)s] > 0)',
'SlugField': 'varchar(%(maxlength)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'USStateField': 'varchar(2)',
}
|
enthought/etsproxy
|
refs/heads/master
|
enthought/util/random_state.py
|
1
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
""" The two classes here help keep up with the current "state" of the
random number generators in scipy.stats and random. They can be
used to save the current state of the random number generators to
be used in the future to ensure identical results between calls.
The RandomStateManager works as a stack with save_state, set_state
and restore_state methods that allow you to keep a stack of states
available. A common usage is as follows:
>>> # save the current state in a variable named "old_state"
>>> old_state = RandomState()
Perform some stochastic calculation...
>>> # Now if you want to have stochastic_calculation() return identical
>>> # results without disrupting other calculations that use random values
>>> # do the following:
>>> rsm = RandomStateManager()
>>> rsm.save_state()
>>> rsm.set_state(old_state)
Perform some stochastic calculation...
>>> rsm.restore_state()
Note that these routines currently only support the state of random and
scipy.stats. If you use other random number generators, their states
will not be managed correctly.
"""
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import random
from numpy.random import get_state as get_seed
from numpy.random import set_state as set_seed
class RandomState:
def __init__(self):
self.update()
def update(self):
self.stats_seed = get_seed()
self.random_state = random.getstate()
class RandomStateManager:
# todo - does it make any sense to use a stack structure?
# we currently store the seeds elsewhere anyway so the stack only
# ever has one element in it.
#
def __init__(self):
self.state_stack = []
def save_state(self):
current_state = RandomState()
self.state_stack.append(current_state)
def set_state(self, random_state):
seed = random_state.stats_seed
set_seed(seed)
state = random_state.random_state
random.setstate(state)
def restore_state(self):
try:
previous_state = self.state_stack.pop(-1)
self.set_state(previous_state)
except:
raise IndexError("trying to call restore_state without matching"
" call to save_state")
if __name__ == '__main__':
import doctest
doctest.testmod()
|
marcoarruda/MissionPlanner
|
refs/heads/master
|
Lib/lib2to3/fixes/fix_paren.py
|
61
|
"""Fixer that addes parentheses where they are required
This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
# By Taek Joo Kim and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import LParen, RParen
# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
class FixParen(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
atom< ('[' | '(')
(listmaker< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>
|
testlist_gexp< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>)
(']' | ')') >
"""
def transform(self, node, results):
target = results["target"]
lparen = LParen()
lparen.prefix = target.prefix
target.prefix = u"" # Make it hug the parentheses
target.insert_child(0, lparen)
target.append_child(RParen())
|
twitterdev/twitter-python-ads-sdk
|
refs/heads/master
|
twitter_ads/__init__.py
|
1
|
# Copyright (C) 2015 Twitter, Inc.
VERSION = (9, 0, 0)
API_VERSION = '9'
from twitter_ads.utils import get_version
__version__ = get_version()
|
j-bennet/pgcli
|
refs/heads/master
|
release.py
|
7
|
#!/usr/bin/env python
from __future__ import print_function
import re
import ast
import subprocess
import sys
DEBUG = False
def version(version_file):
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open(version_file, 'rb') as f:
ver = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return ver
def commit_for_release(version_file, ver):
cmd = ['git', 'reset']
print(' '.join(cmd))
subprocess.check_output(cmd)
cmd = ['git', 'add', version_file]
print(' '.join(cmd))
subprocess.check_output(cmd)
cmd = ['git', 'commit', '--message', 'Releasing version %s' % ver]
print(' '.join(cmd))
subprocess.check_output(cmd)
def create_git_tag(tag_name):
cmd = ['git', 'tag', tag_name]
print(' '.join(cmd))
subprocess.check_output(cmd)
def register_with_pypi():
cmd = ['python', 'setup.py', 'register']
print(' '.join(cmd))
subprocess.check_output(cmd)
def create_source_tarball():
cmd = ['python', 'setup.py', 'sdist']
print(' '.join(cmd))
subprocess.check_output(cmd)
def push_to_github():
cmd = ['git', 'push', 'origin']
print(' '.join(cmd))
subprocess.check_output(cmd)
def push_tags_to_github():
cmd = ['git', 'push', '--tags', 'origin']
print(' '.join(cmd))
subprocess.check_output(cmd)
if __name__ == '__main__':
if DEBUG:
subprocess.check_output = lambda x: x
ver = version('pgcli/__init__.py')
print('Releasing Version:', ver)
choice = raw_input('Are you sure? (y/N)')
if choice.lower() != 'y':
sys.exit(1)
commit_for_release('pgcli/__init__.py', ver)
create_git_tag('v%s' % ver)
register_with_pypi()
create_source_tarball()
push_to_github()
push_tags_to_github()
|
phobson/statsmodels
|
refs/heads/master
|
statsmodels/tsa/tests/test_stattools.py
|
1
|
from statsmodels.compat.python import lrange
from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf, kpss, ResultsStore,
arma_order_select_ic)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_warns,
assert_raises, dec, assert_)
from numpy import genfromtxt
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index, DataFrame
import os
import warnings
from statsmodels.tools.sm_exceptions import MissingDataError
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
_, _1, _2, self.store = adfuller(self.y, regression="nc", autolag=None,
maxlag=1, store=True)
def test_store_str(self):
assert_equal(self.store.__str__(), 'Augmented Dickey-Fuller Test Results')
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
def __init__(self):
self.x = np.concatenate((np.array([np.nan]),self.x))
self.acf = self.results['acvar'] # drop and conservative
self.qstat = self.results['Q1']
self.res_drop = acf(self.x, nlags=40, qstat=True, alpha=.05,
missing='drop')
self.res_conservative = acf(self.x, nlags=40, qstat=True, alpha=.05,
missing='conservative')
self.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
self.qstat_none = np.empty(40) * np.nan
self.res_none = acf(self.x, nlags=40, qstat=True, alpha=.05,
missing='none')
def test_raise(self):
assert_raises(MissingDataError, acf, self.x, nlags=40,
qstat=True, alpha=.05, missing='raise')
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8)
def test_acf_conservative(self):
assert_almost_equal(self.res_conservative[0][1:41], self.acf,
DECIMAL_8)
def test_qstat_none(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3)
# how to do this test? the correct q_stat depends on whether nobs=len(x) is
# used when x contains NaNs or whether nobs<len(x) when x contains NaNs
# def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0., 0.])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp', 'realcons']]
data = mdata.view((float, 2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'], decimal=7)
def test_granger_fails_on_nobs_check(self):
# Test that if maxlag is too large, Granger Test raises a clear error.
X = np.random.rand(10, 2)
grangercausalitytests(X, 2, verbose=False) # This should pass.
assert_raises(ValueError, grangercausalitytests, X, 3, verbose=False)
class SetupKPSS(object):
data = macrodata.load()
x = data.data['realgdp']
class TestKPSS(SetupKPSS):
"""
R-code
------
library(tseries)
kpss.stat(x, "Level")
kpss.stat(x, "Trend")
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
def test_fail_nonvector_input(self):
with warnings.catch_warnings(record=True) as w:
kpss(self.x) # should be fine
x = np.random.rand(20, 2)
assert_raises(ValueError, kpss, x)
def test_fail_unclear_hypothesis(self):
# these should be fine,
with warnings.catch_warnings(record=True) as w:
kpss(self.x, 'c')
kpss(self.x, 'C')
kpss(self.x, 'ct')
kpss(self.x, 'CT')
assert_raises(ValueError, kpss, self.x, "unclear hypothesis")
def test_teststat(self):
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, lags, crits = kpss(self.x, 'c', 3)
assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3)
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, lags, crits = kpss(self.x, 'ct', 3)
assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3)
def test_pval(self):
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, lags, crits = kpss(self.x, 'c', 3)
assert_equal(pval, 0.01)
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, lags, crits = kpss(self.x, 'ct', 3)
assert_equal(pval, 0.01)
def test_store(self):
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, crit, store = kpss(self.x, 'c', 3, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
assert_equal(store.lags, 3)
def test_lags(self):
with warnings.catch_warnings(record=True) as w:
kpss_stat, pval, lags, crits = kpss(self.x, 'c')
assert_equal(lags, int(np.ceil(12. * np.power(len(self.x) / 100., 1 / 4.))))
# assert_warns(UserWarning, kpss, self.x)
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
def test_acovf_fft_vs_convolution():
np.random.seed(1)
q = np.random.normal(size=100)
for demean in [True, False]:
for unbiased in [True, False]:
F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True)
F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@dec.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
import statsmodels.api as sm
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x , index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x , index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array([ 0.86074377817203640006, 0.85316549067906921611,
0.87104653774363305363, 0.60692382068987393851,
0.69225941967301307667, 0.73336177248909339976,
0.03661329261479619179, 0.15693067239962379955,
0.12777403512447857437, -0.27531446294481976 ,
-0.24198139631653581283, -0.23903317951236391359,
-0.26000241325906497947, -0.21282920015519238288,
-0.15943768324388354896, 0.25169301564268781179,
0.1762305709151877342 , 0.12678133368791388857,
0.89755829086753169399, 0.82667068795350151511])
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(sunspots.load_pandas().data[['SUNACTIVITY']], fft=True)
assert_equal(result.ndim, 1)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
|
jaredks/rumps
|
refs/heads/master
|
examples/example_timers.py
|
2
|
import rumps
import time
def timez():
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime())
@rumps.timer(1)
def a(sender):
print('%r %r' % (sender, timez()))
@rumps.clicked('Change timer')
def changeit(_):
response = rumps.Window('Enter new interval').run()
if response.clicked:
global_namespace_timer.interval = int(response.text)
@rumps.clicked('All timers')
def activetimers(_):
print(rumps.timers())
@rumps.clicked('Start timer')
def start_timer(_):
global_namespace_timer.start()
@rumps.clicked('Stop timer')
def stop_timer(_):
global_namespace_timer.stop()
if __name__ == "__main__":
global_namespace_timer = rumps.Timer(a, 4)
rumps.App('fuuu', menu=('Change timer', 'All timers', 'Start timer', 'Stop timer')).run()
|
anduslim/codex
|
refs/heads/master
|
codex_project/actors/migrations/0004_auto__chg_field_reading_value.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Reading.value'
db.alter_column('actors_reading', 'value', self.gf('django.db.models.fields.CharField')(max_length=100))
def backwards(self, orm):
# Changing field 'Reading.value'
db.alter_column('actors_reading', 'value', self.gf('django.db.models.fields.CharField')(max_length=50))
models = {
'actors.appconfig': {
'Meta': {'object_name': 'AppConfig'},
'config_id': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'actors.node': {
'Meta': {'object_name': 'Node'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'node'", 'to': "orm['actors.AppConfig']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'node_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'actors.reading': {
'Meta': {'object_name': 'Reading'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sensor_reading'", 'to': "orm['actors.Node']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sensor_reading'", 'to': "orm['actors.Sensor']"}),
'seq_no': ('django.db.models.fields.PositiveIntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 18, 0, 0)'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'actors.sensor': {
'Meta': {'object_name': 'Sensor'},
'data_format': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modality': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'actors.sensormap': {
'Meta': {'object_name': 'SensorMap'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sensor_map'", 'to': "orm['actors.AppConfig']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modality_bit': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sensor_map'", 'to': "orm['actors.Sensor']"})
}
}
complete_apps = ['actors']
|
domainxz/cuda-convnet2
|
refs/heads/master
|
shownet.py
|
180
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
|
NeCTAR-RC/heat
|
refs/heads/nectar/icehouse
|
heat/engine/resources/neutron/subnet.py
|
1
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import clients
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.neutron import neutron
from heat.openstack.common import log as logging
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
logger = logging.getLogger(__name__)
class Subnet(neutron.NeutronResource):
PROPERTIES = (
NETWORK_ID, CIDR, VALUE_SPECS, NAME, IP_VERSION,
DNS_NAMESERVERS, GATEWAY_IP, ENABLE_DHCP, ALLOCATION_POOLS,
TENANT_ID, HOST_ROUTES,
) = (
'network_id', 'cidr', 'value_specs', 'name', 'ip_version',
'dns_nameservers', 'gateway_ip', 'enable_dhcp', 'allocation_pools',
'tenant_id', 'host_routes',
)
_ALLOCATION_POOL_KEYS = (
ALLOCATION_POOL_START, ALLOCATION_POOL_END,
) = (
'start', 'end',
)
_HOST_ROUTES_KEYS = (
ROUTE_DESTINATION, ROUTE_NEXTHOP,
) = (
'destination', 'nexthop',
)
properties_schema = {
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the attached network.'),
required=True
),
CIDR: properties.Schema(
properties.Schema.STRING,
_('The CIDR.'),
required=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the creation request.'),
default={},
update_allowed=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of the subnet.'),
update_allowed=True
),
IP_VERSION: properties.Schema(
properties.Schema.INTEGER,
_('The IP version, which is 4 or 6.'),
default=4,
constraints=[
constraints.AllowedValues([4, 6]),
]
),
DNS_NAMESERVERS: properties.Schema(
properties.Schema.LIST,
_('A specified set of DNS name servers to be used.'),
default=[],
update_allowed=True
),
GATEWAY_IP: properties.Schema(
properties.Schema.STRING,
_('The gateway IP address.'),
update_allowed=True
),
ENABLE_DHCP: properties.Schema(
properties.Schema.BOOLEAN,
_('Set to true if DHCP is enabled and false if DHCP is disabled.'),
default=True,
update_allowed=True
),
ALLOCATION_POOLS: properties.Schema(
properties.Schema.LIST,
_('The start and end addresses for the allocation pools.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ALLOCATION_POOL_START: properties.Schema(
properties.Schema.STRING,
required=True
),
ALLOCATION_POOL_END: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the network. Only administrative'
' users can specify a tenant ID other than their own.')
),
HOST_ROUTES: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
ROUTE_DESTINATION: properties.Schema(
properties.Schema.STRING,
required=True
),
ROUTE_NEXTHOP: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
}
attributes_schema = {
"name": _("Friendly name of the subnet."),
"network_id": _("Parent network of the subnet."),
"tenant_id": _("Tenant owning the subnet."),
"allocation_pools": _("Ip allocation pools and their ranges."),
"gateway_ip": _("Ip of the subnet's gateway."),
"host_routes": _("Additional routes for this subnet."),
"ip_version": _("Ip version for the subnet."),
"cidr": _("CIDR block notation for this subnet."),
"dns_nameservers": _("List of dns nameservers."),
"enable_dhcp": _("'true' if DHCP is enabled for this subnet; 'false' "
"otherwise."),
"show": _("All attributes."),
}
update_allowed_keys = ('Properties',)
@classmethod
def _null_gateway_ip(cls, props):
if cls.GATEWAY_IP not in props:
return
# Specifying null in the gateway_ip will result in
# a property containing an empty string.
# A null gateway_ip has special meaning in the API
# so this needs to be set back to None.
# See bug https://bugs.launchpad.net/heat/+bug/1226666
if props.get(cls.GATEWAY_IP) == '':
props[cls.GATEWAY_IP] = None
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
self._null_gateway_ip(props)
subnet = self.neutron().create_subnet({'subnet': props})['subnet']
self.resource_id_set(subnet['id'])
def handle_delete(self):
client = self.neutron()
try:
client.delete_subnet(self.resource_id)
except NeutronClientException as ex:
self._handle_not_found_exception(ex)
else:
return self._delete_task()
def _show_resource(self):
return self.neutron().show_subnet(self.resource_id)['subnet']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
self.neutron().update_subnet(
self.resource_id, {'subnet': props})
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'OS::Neutron::Subnet': Subnet,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.