code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('gnowsys_ndf.ndf.views.file',
url(r'^[/]$', 'file', name='file'),
# url(r'^/(?P<file_id>[\w-]+)$', 'file', name='file'),
url(r'^/uploadDoc/$', 'uploadDoc', name='uploadDoc'), #Direct ot html template
url(r'^/submitDoc/', 'submitDoc', name='submitDoc'),
url(r'^/submit/', 'submitDoc', name='submitDoc'),
url(r'^/documentList/', 'GetDoc', name='documentList'),
url(r'^/readDoc/(?P<_id>[\w-]+)/(?:(?P<file_name>[^/]+))?$', 'readDoc', name='read_file'),
url(r'^/search/$', 'file_search', name='file_search'),
url(r'^/details/(?P<_id>[\w-]+)$', 'file_detail', name='file_detail'),
url(r'^/(?P<_id>[\w-]+)$', 'file_detail', name='file_detail'),
url(r'^/thumbnail/(?P<_id>[\w-]+)$', 'getFileThumbnail', name='getFileThumbnail'),
#url(r'^/delete_file/(?P<_id>[\w-]+)$', 'delete_file', name='delete_file'),
url(r'^/delete/(?P<_id>[\w-]+)$', 'delete_file', name='delete_file'),
url(r'^/edit_file/(?P<_id>[\w-]+)$', 'file_edit', name='file_edit'),
# url(r'^/data-review/$', 'data_review', name='data_review'),
# url(r'^/data-review/page-no=(?P<page_no>\d+)/$', 'data_review', name='data_review_page'),
# url(r'^/data-review/save/$', 'data_review_save', name='data_review_save'),
url(r'^/edit/(?P<_id>[\w-]+)$', 'file_edit', name='file_edit'),
url(r'^/(?P<filetype>[\w-]+)/page-no=(?P<page_no>\d+)/$', 'paged_file_objs', name='paged_file_objs'),
)
| supriyasawant/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/urls/file.py | Python | agpl-3.0 | 1,918 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRvcheck(RPackage):
"""Check latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cloud.r-project.org/package=rvcheck"
url = "https://cloud.r-project.org/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rvcheck"
version('0.1.3', sha256='0b59986c1ccc5b89f8aca8fa7cf62d0b875719addb40e08dbda1791cfd334fc4')
version('0.0.9', sha256='6e7be7b029d28181a1b57ebd4d25978f3459722ffdb45a3698157a7f943bea92')
depends_on('r@3.3.0:', when='@:0.1.1', type=('build', 'run'))
depends_on('r@3.4.0:', when='@0.1.3:', type=('build', 'run'))
depends_on('r-rlang', when='@0.1.1:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-rvcheck/package.py | Python | lgpl-2.1 | 958 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prokka(Package):
"""Prokka is a software tool to annotate bacterial, archaeal and viral
genomes quickly and produce standards-compliant output files."""
homepage = "https://github.com/tseemann/prokka"
url = "https://github.com/tseemann/prokka/archive/v1.14.5.tar.gz"
version('1.14.6', sha256='f730b5400ea9e507bfe6c5f3d22ce61960a897195c11571c2e1308ce2533faf8')
depends_on('perl', type='run')
depends_on('perl-bioperl', type='run')
depends_on('perl-xml-simple', type='run')
depends_on('perl-bio-searchio-hmmer', type='run')
depends_on('hmmer', type='run')
depends_on('blast-plus', type='run')
depends_on('prodigal', type='run')
depends_on('tbl2asn', type='run')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('binaries', prefix.binaries)
install_tree('db', prefix.db)
install_tree('doc', prefix.doc)
| iulian787/spack | var/spack/repos/builtin/packages/prokka/package.py | Python | lgpl-2.1 | 1,155 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class BashCompletion(AutotoolsPackage):
"""Programmable completion functions for bash."""
homepage = "https://github.com/scop/bash-completion"
url = "https://github.com/scop/bash-completion/archive/2.3.tar.gz"
git = "https://github.com/scop/bash-completion.git"
version('develop', branch='master')
version('2.7', sha256='dba2b88c363178622b61258f35d82df64dc8d279359f599e3b93eac0375a416c')
version('2.3', sha256='d92fcef5f6e3bbc68a84f0a7b063a1cd07b4000cc6e275cd1ff83863ab3b322a')
# Build dependencies
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
# Other dependencies
depends_on('bash@4.1:', type='run')
@run_before('install')
def create_install_directory(self):
mkdirp(join_path(self.prefix.share, 'bash-completion', 'completions'))
@run_after('install')
def show_message_to_user(self):
prefix = self.prefix
# Guidelines for individual user as provided by the author at
# https://github.com/scop/bash-completion
print('=====================================================')
print('Bash completion has been installed. To use it, please')
print('include the following lines in your ~/.bash_profile :')
print('')
print('# Use bash-completion, if available')
print('[[ $PS1 && -f %s/share/bash-completion/bash_completion ]] && \ ' % prefix) # NOQA: ignore=E501
print(' . %s/share/bash-completion/bash_completion' % prefix)
print('')
print('=====================================================')
| iulian787/spack | var/spack/repos/builtin/packages/bash-completion/package.py | Python | lgpl-2.1 | 1,869 |
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^test_click/$', 'django.views.generic.simple.direct_to_template',
{'template': 'test_app/wm_test_click.html'}, name='wm_test_click')
)
| michaelBenin/django-jenkins | tests/test_app/urls.py | Python | lgpl-3.0 | 247 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from ggrc.models.mixins import BusinessObject, Timeboxed, CustomAttributable
from ggrc.models.object_document import Documentable
from ggrc.models.object_owner import Ownable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState, track_state_for_class
class AccessGroup(HasObjectState,
CustomAttributable, Personable, Documentable, Relatable,
Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'access_groups'
_aliases = {"url": "Access Group URL"}
track_state_for_class(AccessGroup)
| edofic/ggrc-core | src/ggrc/models/access_group.py | Python | apache-2.0 | 777 |
"""Runs the Treadmill container cleanup job.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import appenv
from treadmill import cleanup
from treadmill import cli
from treadmill import utils
def init():
"""Top level command handler."""
@click.group(name='cleanup')
def cleanup_grp():
"""Cleanup click group."""
@cleanup_grp.command('watcher')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def cleanup_watcher(approot):
"""Start cleanup watcher."""
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.run()
@cleanup_grp.command('instance')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
@click.option('--runtime-param', type=cli.LIST, required=False)
@click.argument('instance', nargs=1)
def cleanup_instance(approot, runtime, instance, runtime_param):
"""Actually do the cleanup of the instance.
"""
param = utils.equals_list2dict(runtime_param or [])
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.invoke(runtime, instance, param)
del cleanup_watcher
del cleanup_instance
return cleanup_grp
| Morgan-Stanley/treadmill | lib/python/treadmill/sproc/cleanup.py | Python | apache-2.0 | 1,571 |
"""Tests for AVM Fritz!Box switch component."""
from datetime import timedelta
from unittest.mock import Mock
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import (
ATTR_STATE_DEVICE_LOCKED,
ATTR_STATE_LOCKED,
DOMAIN as FB_DOMAIN,
)
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.components.switch import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICES,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from . import FritzDeviceSwitchMock, setup_config_entry
from .const import CONF_FAKE_NAME, MOCK_CONFIG
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.{CONF_FAKE_NAME}"
async def test_setup(hass: HomeAssistant, fritz: Mock):
"""Test setup of platform."""
device = FritzDeviceSwitchMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
state = hass.states.get(ENTITY_ID)
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_FRIENDLY_NAME] == CONF_FAKE_NAME
assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device"
assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked"
assert ATTR_STATE_CLASS not in state.attributes
state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_temperature")
assert state
assert state.state == "1.23"
assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Temperature"
assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device"
assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_MEASUREMENT
state = hass.states.get(f"{ENTITY_ID}_humidity")
assert state is None
state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_power_consumption")
assert state
assert state.state == "5.678"
assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Power Consumption"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == POWER_WATT
assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_MEASUREMENT
state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_total_energy")
assert state
assert state.state == "1.234"
assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Total Energy"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == ENERGY_KILO_WATT_HOUR
assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_TOTAL_INCREASING
async def test_turn_on(hass: HomeAssistant, fritz: Mock):
"""Test turn device on."""
device = FritzDeviceSwitchMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert device.set_switch_state_on.call_count == 1
async def test_turn_off(hass: HomeAssistant, fritz: Mock):
"""Test turn device off."""
device = FritzDeviceSwitchMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert device.set_switch_state_off.call_count == 1
async def test_update(hass: HomeAssistant, fritz: Mock):
"""Test update without error."""
device = FritzDeviceSwitchMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert fritz().update_devices.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert fritz().update_devices.call_count == 2
assert fritz().login.call_count == 1
async def test_update_error(hass: HomeAssistant, fritz: Mock):
"""Test update with error."""
device = FritzDeviceSwitchMock()
fritz().update_devices.side_effect = HTTPError("Boom")
assert not await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert fritz().update_devices.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert fritz().update_devices.call_count == 2
assert fritz().login.call_count == 2
async def test_assume_device_unavailable(hass: HomeAssistant, fritz: Mock):
"""Test assume device as unavailable."""
device = FritzDeviceSwitchMock()
device.voltage = 0
device.energy = 0
device.power = 0
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
state = hass.states.get(ENTITY_ID)
assert state
assert state.state == STATE_UNAVAILABLE
| jawilson/home-assistant | tests/components/fritzbox/test_switch.py | Python | apache-2.0 | 5,502 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), None, size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, None, 1000)
if __name__ == "__main__":
test.main()
| kevin-coder/tensorflow-fork | tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py | Python | apache-2.0 | 3,611 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import contextlib
import functools
from functools import partial
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", [x]):
x_shape = shape_list(x)
x = tf.to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2], keepdims=True)
num_pixels = tf.to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
# TODO(noam): remove this function after TPUs do gather faster.
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
"""
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x)
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.contrib.eager.in_eager_mode():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3)
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = shape_list(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in range(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur_dim = shape_list(cur)[2]
if isinstance(cur_dim, int):
if cur_dim == 1:
cur = deconv1d(cur, i)
else:
cur = deconv2d(cur, i)
else:
cur = tf.cond(
tf.equal(cur_dim, 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size,
**kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias):
"""Layer norm raw computation."""
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias)
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
tf.to_float(shape[-1])))
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
def apply_norm(x, norm_type, depth, epsilon):
"""Apply Normalization."""
if norm_type == "layer":
return layer_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return tf.layers.batch_normalization(x, epsilon=epsilon)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.")
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
"""
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(x, norm_type, depth, epsilon)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
def layer_preprocess(layer_input, hparams):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess")
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 1d convolutions."""
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", values=[inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
depth = shape_list(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
"""
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return ones_matrix_band_part(
target_length,
source_length,
-1,
0,
out_shape=[1, target_length, source_length])
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typically the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
layer_name = "%s_{}" % name if name else "{}"
h = dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = dense(
h,
output_size,
activation=output_activation,
use_bias=True,
name=layer_name.format("conv2"))
return o
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs)
def conv_relu_conv(inputs,
filter_size,
output_size,
first_kernel_size=3,
second_kernel_size=3,
padding="SAME",
nonpadding_mask=None,
dropout=0.0,
name=None,
cache=None,
decode_loop_step=None):
"""Hidden layer with RELU activation followed by linear projection.
Args:
inputs: A tensor.
filter_size: An integer.
output_size: An integer.
first_kernel_size: An integer.
second_kernel_size: An integer.
padding: A string.
nonpadding_mask: A tensor.
dropout: A float.
name: A string.
cache: A dict, containing Tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU. If it is not None, the function
will do inplace update for the cache instead of concatenating the
current result to the cache.
Returns:
A Tensor.
"""
with tf.variable_scope(name, "conv_relu_conv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if cache:
if decode_loop_step is None:
inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = inplace_ops.alias_inplace_update(
tmp_f,
decode_loop_step * tf.shape(inputs)[1],
tf.transpose(inputs, perm=[1, 0, 2]))
inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2])
inputs = cache["f"] = inputs[:, -first_kernel_size:, :]
h = tpu_conv1d(
inputs, filter_size, first_kernel_size, padding=padding, name="conv1")
if cache:
h = h[:, -1:, :]
h = tf.nn.relu(h)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
return tpu_conv1d(
h, output_size, second_kernel_size, padding=padding, name="conv2")
def sepconv_relu_sepconv(inputs,
filter_size,
output_size,
first_kernel_size=(1, 1),
second_kernel_size=(1, 1),
padding="LEFT",
nonpadding_mask=None,
dropout=0.0,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = separable_conv(
inputs,
filter_size,
first_kernel_size,
activation=tf.nn.relu,
padding=padding,
name="conv1")
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
ret = separable_conv(
h, output_size, second_kernel_size, padding=padding, name="conv2")
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
second_kernel_size=(1, 1),
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
conv_f1 = conv if kernel_size == (1, 1) else separable_conv
h = conv_f1(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
"""
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
nonzero = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
weights = tf.to_float(tf.not_equal(labels, 0))
if taskid < 0:
raise ValueError("Task ID must be non-negative.")
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = tf.to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task."""
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True,
cutoff=0.0,
gaussian=False):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
optionally a FactoredTensor.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
cutoff: a float, at which point to have no loss.
gaussian: If true, use a Gaussian distribution for label smoothing
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
Raises:
ValueError: in case of unsupported argument types.
"""
if isinstance(logits, FactoredTensor):
if gaussian:
raise ValueError("Factored padded cross entropy with Gaussian smoothing "
"is not implemented yet.")
return padded_cross_entropy_factored(
logits,
labels,
label_smoothing,
weights_fn=weights_fn,
reduce_sum=reduce_sum)
confidence = 1.0 - label_smoothing
logits_shape = shape_list(logits)
vocab_size = logits_shape[-1]
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
if len(logits_shape) == 2:
# Deal with the case where we did not insert extra dimensions due to
# TPU issues. No pad-to-same-length happens in this case.
# TODO(noam): remove this logic once TPU can handle extra dimensions.
labels = tf.reshape(labels, [-1])
else:
logits, labels = pad_with_zeros(logits, labels)
logits = tf.reshape(
logits,
shape_list(labels) + [vocab_size],
name="padded_cross_entropy_size_check")
logits = tf.cast(logits, tf.float32)
xent = smoothing_cross_entropy(
logits, labels, vocab_size, confidence, gaussian=gaussian)
weights = weights_fn(labels)
if cutoff > 0.0:
xent = tf.nn.relu(xent - cutoff)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def _weights_one_third(labels):
"""Returns Tensor of shape [batch, height, width]. Each element is 1/3."""
return tf.ones(tf.shape(labels)[:-1]) / 3.
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
"""
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs)
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X > x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X > 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits)
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = tf.layers.dense(x, depth * 2, activation=None)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
def sru_with_scan(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
This implementation uses tf.scan and can incur overhead, see the full SRU
function doc for details and an implementation that is sometimes faster.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
# SRU state manipulation function.
def next_state(cur_state, args_tup):
cur_x_times_one_minus_f, cur_f = args_tup
return cur_f * cur_state + cur_x_times_one_minus_f
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
c_states = tf.scan(
next_state, (x_times_one_minus_f, f),
initializer=initial_state,
parallel_iterations=2,
name="scan_%d" % i)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
# Transpose back to batch-major.
x = tf.transpose(x, [1, 0, 2])
return tf.reshape(x, x_shape)
class CumsumprodCell(object):
"""Cumulative sum and product object for use with functional_rnn API."""
def __init__(self, initializer):
self._initializer = initializer
@property
def output_size(self):
return int(shape_list(self._initializer)[-1])
def zero_state(self, batch_size, dtype):
dtype = dtype or tf.float32
return tf.zeros([batch_size, self.output_size], dtype=dtype)
def __call__(self, inputs_t, state_t):
cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)
state_next = cur_f * state_t + cur_x_times_one_minus_f
outputs_t = state_next
return outputs_t, state_next
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs
def ravanbakhsh_set_layer(layer_size,
inputs,
mask=None,
sequential=False,
activation_fn=tf.nn.tanh,
dropout=0.0,
name=None):
"""Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .
More parameter-efficient version of a linear-set-layer with context.
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, vector]
containing the sequences of input vectors.
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
sequential: If true, will use a running global pool so each element will
only depend on those before it. Set true if this layer is being used in
an output sequence.
activation_fn: The activation function to use.
dropout: dropout.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, vector] containing the
sequences of transformed vectors.
"""
del dropout
with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
if sequential:
return linear_set_layer(
layer_size,
inputs - running_global_pool_1d(inputs),
activation_fn=activation_fn,
name=name)
return linear_set_layer(
layer_size,
inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
activation_fn=activation_fn,
name=name)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
if not hasattr(tf.get_default_graph(), "dependency_dict"):
setattr(tf.get_default_graph(), "dependency_dict", defaultdict(list))
return tf.get_default_graph().dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
"""
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor,
_convert_factored_tensor_to_tensor)
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None
@function.Defun(
noinline=True,
python_grad_func=smoothing_cross_entropy_factored_grad,
compiled=True,
separate_compiled_gradients=True)
def smoothing_cross_entropy_factored(a, b, labels, confidence):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
a: a Tensor with shape [batch, inner_dim]
b: a Tensor with shape [vocab_size, inner_dim]
labels: an integer Tensor with shape [batch]
confidence: a float
Returns:
A Tensor with shape [batch]
"""
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
parts = []
for part in range(num_splits):
with tf.control_dependencies(parts[-1:]):
logits = tf.matmul(a[part], b, transpose_b=True)
parts.append(
smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
return tf.concat(parts, 0)
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out
_function_cache = {}
def conv_hidden_relu_memory_efficient(x,
filter_size,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
filter_size: an integer - size of the hidden layer.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
# split batch-wise to avoid exhausting memory in cast the batch is large
# and the hidden layer is large.
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, shape_list(x))
return y
key = ("conv_hidden_relu_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
"""Gradient for efficiency."""
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = shape_list(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, default_name="ffn2", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
scale, bias = layer_norm_vars(io_size)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def list_product(els):
prod = els[0]
for el in els[1:]:
prod *= el
return prod
def sample_with_temperature(logits, temperature):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape())
return ret
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
cached_vs = []
cached_arg_scope = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
variables = [underlying_variable_ref(v) for v in variables]
# Recompute outputs
with tf.control_dependencies(output_grads):
with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
with tf.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
# TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.
# If the input gradients are bfloat16, it's assumed the variables are
# bfloat16. This is a hack to ensure that grad_vars are the right type.
if grad_inputs[0].dtype == tf.bfloat16:
grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
cached_vs.append(tf.get_variable_scope())
cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
return fn(*args)
return fn_with_recompute(*args)
def dense(x, units, **kwargs):
"""Identical to tf.layers.dense."""
return tf.layers.dense(x, units, **kwargs)
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
"""
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1."""
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1)
def brelu(x):
"""Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.relu(x1)
y2 = -tf.nn.relu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def belu(x):
"""Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.elu(x1)
y2 = -tf.nn.elu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def nac(x, depth, name=None, reuse=None):
"""NAC as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
x_shape = shape_list(x)
w = tf.get_variable("w", [x_shape[-1], depth])
m = tf.get_variable("m", [x_shape[-1], depth])
w = tf.tanh(w) * tf.nn.sigmoid(m)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
res_flat = tf.matmul(x_flat, w)
return tf.reshape(res_flat, x_shape[:-1] + [depth])
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m)
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores
def log_prob_from_logits(logits, reduce_axis=-1):
return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.contrib.eager.in_eager_mode():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x.name,
x.device, cast_x.device)
return cast_x
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def sliced_gan_loss(input1,
input2,
discriminator,
num_vecs,
do_random_vecs=True,
do_tanh=True,
return_logits=False):
"""Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.
Puts input1 and input2 through the provided discriminator to get logits.
Then, computes num_vecs random projections of the logits, sorts them on
the batch dimension and returns the L2 loss between the sorted vectors.
See the above-mentioned paper for the reasoning behind it.
Args:
input1: first discriminator inputs.
input2: second discriminator inputs.
discriminator: inputs -> logits function.
num_vecs: how many random vectors to use for projections.
do_random_vecs: whether to use random vectors or just tanh of the logits.
do_tanh: if true (default) we'll also just use tanh of the logits.
return_logits: Whether or not to return the logits.
Returns:
The generator loss, i.e., the sliced approximation of the distance between
the projected distributions (warning: discriminator should maximize it).
"""
with tf.variable_scope("sliced_gan"):
with tf.variable_scope("discriminator"):
logits1 = discriminator(input1)
with tf.variable_scope("discriminator", reuse=True):
logits2 = discriminator(input2)
if do_random_vecs:
random_vecs = tf.nn.l2_normalize(
tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)
def get_sorted_projections(x):
"""Make projections of x and sort them on the batch dimension."""
x = tf.reshape(x, [-1, shape_list(x)[-1]])
batch_size = shape_list(x)[0]
if do_random_vecs and do_tanh:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1)
elif do_random_vecs:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.matmul(n, random_vecs)
else:
proj = tf.tanh(x)
proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.
if is_xla_compiled():
proj_dtype = proj.dtype
proj = tf.cast(proj, tf.bfloat16)
# Currently TPU only supports 1-D top_k calls.
map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0]
values = tf.map_fn(map_fn, proj)
values = tf.cast(values, proj_dtype)
else:
values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)
return values
proj1 = get_sorted_projections(logits1)
proj2 = get_sorted_projections(logits2)
dist = tf.reduce_mean(tf.square(proj1 - proj2))
if return_logits:
return dist, logits1, logits2
return dist
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deep_discriminator(x,
batch_norm,
is_training,
filters=64,
filter_size=4,
stride=2,
output_size=1024):
"""Discriminator architecture based on InfoGAN."""
with tf.variable_scope(
"discriminator", initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = shape_list(x)[:3]
net = tf.layers.conv2d(
x, filters, filter_size, strides=stride, padding="SAME", name="conv1")
net = lrelu(net)
net = tf.layers.conv2d(
net,
2 * filters,
filter_size,
strides=stride,
padding="SAME",
name="conv2")
# [bs, h/4, w/4, 128]
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
net = tf.reduce_mean(net, axis=[1, 2])
else:
net = tf.reshape(net, [batch_size, size * 8])
net = tf.layers.dense(net, output_size, name="d_fc3")
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = tf.layers.conv2d(
x,
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))
if do_norm == "layer":
x = tf.contrib.layers.layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = tf.layers.dense(x, num_heads, name="mean_attn")
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return tf.layers.dense(tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]),
2 * shape[-1], name="mean_attn_final")
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = tf.layers.conv2d(
x, filters, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = tf.layers.conv2d(
x, filters1, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = tf.layers.conv2d(
x, filters2, kernel_size, strides=strides, padding="SAME", name="conv2")
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3]
return tf.image.resize_images(inputs, (height * f, width * f), method)
def tpu_safe_image_summary(image):
if is_xla_compiled():
# We only support float32 images at the moment due to casting complications.
if image.dtype != tf.float32:
image = tf.to_float(image)
else:
image = tf.cast(image, tf.uint8)
return image
# This has been (shamefully) copied from
# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py
#
# tensorflow/models cannot be pip installed, and even if it were we don't want
# to depend on all the models in it.
#
# Therefore copying and forgoing any more bugfixes into it is the most
# expedient way to use this function.
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = tf.contrib.layers.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding="valid")
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
def weight_targeting(w, k):
"""Weight-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
transpose_w = tf.transpose(w)
thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k]
mask = tf.to_float(thres[None, :] >= tf.abs(w))
return tf.reshape(mask, w_shape)
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = tf.contrib.framework.sort(norm, axis=0)[k]
mask = tf.to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(tf.to_float(k) * tf.to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / tf.to_float(batch_size)
def sparse_equals_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
dense_shape=tensor.dense_shape,
values=tf.equal(tensor.values, constant))
def sparse_expand_dims(tensor, current_num_dims, axis=0):
if axis == -1:
axis = current_num_dims
new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64)
cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims)
shape = tf.unstack(tensor.dense_shape, num=current_num_dims)
new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1)
return tf.SparseTensor(
indices=new_indices,
values=tensor.values,
dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:]))
def sparse_add_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
values=constant + tensor.values,
dense_shape=tensor.dense_shape)
def sparse_eye(size):
indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64)
values = tf.ones(size)
dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)]
return tf.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
# modification from https://github.com/tensorflow/tensorflow/pull/21276
# without special initialization for g
class WeightNorm(tf.keras.layers.Wrapper):
""" This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer))
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_checkpointable(layer, name="layer")
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/layers/common_layers.py | Python | apache-2.0 | 132,971 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from desktop.models import Settings
from oozie.tests import OozieBase
class TestAboutBase(object):
def setUp(self):
self.client = make_logged_in_client(username="about", is_superuser=False)
grant_access("about", "about", "about")
self.client_admin = make_logged_in_client(username="about_admin", is_superuser=True)
grant_access("about_admin", "about_admin", "about")
class TestAbout(TestAboutBase, OozieBase):
def test_admin_wizard_permissions(self):
response = self.client_admin.get(reverse('about:index'))
assert_true('Check Configuration' in response.content, response.content)
response = self.client.get(reverse('about:index'))
assert_false('Check Configuration' in response.content, response.content)
class TestAboutWithNoCluster(TestAboutBase):
def test_dump_config(self):
# Exception raised if bad unicode
self.client_admin.get(reverse('about:index'), HTTP_ACCEPT_LANGUAGE='fr-fr')
def test_collect_usage(self):
collect_usage = Settings.get_settings().collect_usage
try:
response = self.client.post(reverse('about:update_preferences'), {'collect_usage': False})
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_false(data['collect_usage'] == True) # Weird but works
response = self.client.post(reverse('about:update_preferences'), {'collect_usage': True})
data = json.loads(response.content)
assert_equal(data['status'], 0)
assert_true(data['collect_usage'])
finally:
settings = Settings.get_settings()
settings.collect_usage = collect_usage
settings.save()
| jayceyxc/hue | apps/about/src/about/tests.py | Python | apache-2.0 | 2,701 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController, OVSSwitch
def start_of13_switches(controller, switches):
for s in switches:
s.start([controller])
s.sendCmd('ovs-vsctl set bridge %s protocols=OpenFlow13' % s)
if '__main__' == __name__:
net = Mininet(controller=RemoteController, autoStaticArp=True, switch=OVSSwitch)
c1 = net.addController('c1', ip='127.0.0.1', port=6653)
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
s1.linkTo(s2)
s2.linkTo(s3)
s3.linkTo(s4)
s4.linkTo(s1)
s1.linkTo(h1)
s3.linkTo(h2)
net.build()
c1.start()
start_of13_switches(c1, [s1, s2, s3, s4])
CLI(net)
net.stop()
| machaharu/odenos | apps/mininet_examples/single_network_control/start_mininet.py | Python | apache-2.0 | 1,901 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import unittest
import marshmallow
from airflow.api_connexion.schemas.connection_schema import (
ConnectionCollection,
connection_collection_item_schema,
connection_collection_schema,
connection_schema,
)
from airflow.models import Connection
from airflow.utils.session import create_session, provide_session
from tests.test_utils.db import clear_db_connections
class TestConnectionCollectionItemSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_collection_item_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
},
)
def test_deserialize(self):
connection_dump_1 = {
'connection_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
}
connection_dump_2 = {
'connection_id': "mysql_default_2",
'conn_type': "postgres",
}
result_1 = connection_collection_item_schema.load(connection_dump_1)
result_2 = connection_collection_item_schema.load(connection_dump_2)
self.assertEqual(
result_1,
{
'conn_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
},
)
self.assertEqual(
result_2,
{
'conn_id': "mysql_default_2",
'conn_type': "postgres",
},
)
def test_deserialize_required_fields(self):
connection_dump_1 = {
'connection_id': "mysql_default_2",
}
with self.assertRaisesRegex(
marshmallow.exceptions.ValidationError,
re.escape("{'conn_type': ['Missing data for required field.']}"),
):
connection_collection_item_schema.load(connection_dump_1)
class TestConnectionCollectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model_1 = Connection(conn_id='mysql_default_1', conn_type='test-type')
connection_model_2 = Connection(conn_id='mysql_default_2', conn_type='test-type2')
connections = [connection_model_1, connection_model_2]
session.add_all(connections)
session.commit()
instance = ConnectionCollection(connections=connections, total_entries=2)
deserialized_connections = connection_collection_schema.dump(instance)
self.assertEqual(
deserialized_connections,
{
'connections': [
{
"connection_id": "mysql_default_1",
"conn_type": "test-type",
"host": None,
"login": None,
'schema': None,
'port': None,
},
{
"connection_id": "mysql_default_2",
"conn_type": "test-type2",
"host": None,
"login": None,
'schema': None,
'port': None,
},
],
'total_entries': 2,
},
)
class TestConnectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
password='test-password',
extra="{'key':'string'}",
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
def test_deserialize(self):
den = {
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
}
result = connection_schema.load(den)
self.assertEqual(
result,
{
'conn_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
| DinoCow/airflow | tests/api_connexion/schemas/test_connection_schema.py | Python | apache-2.0 | 7,097 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable=g-bad-name
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
from mapreduce.lib.graphy.backends import google_chart_api
from mapreduce.lib import simplejson
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from mapreduce import context
from mapreduce import hooks
from mapreduce import json_util
from mapreduce import util
# pylint: disable=protected-access
# Special datastore kinds for MR.
_MAP_REDUCE_KINDS = ("_AE_MR_MapreduceControl",
"_AE_MR_MapreduceState",
"_AE_MR_ShardState",
"_AE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
# Leave some wiggle room for headers and other fields.
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
# Update version when payload handling is changed
# in a backward incompatible way.
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
# TODO(user): Find a more space efficient way than urlencoding.
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
# Payload is small. Don't bother with anything.
if not compressed_payload:
self._payload = payload_str
# Compressed payload is small. Don't bother with datastore.
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
# Store payload in the datastore.
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name):
"""Get current counter value.
Args:
counter_name: counter name as string.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, 0)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
def to_dict(self):
"""Convert to dictionary.
Returns:
a dictionary with counter name as key and counter values as value.
"""
return self.counters
class MapperSpec(json_util.JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
"""
self.handler_spec = handler_spec
self.input_reader_spec = input_reader_spec
self.output_writer_spec = output_writer_spec
self.shard_count = int(shard_count)
self.params = params
def get_handler(self):
"""Get mapper handler instance.
This always creates a new instance of the handler. If the handler is a
callable instance, MR only wants to create a new instance at the
beginning of a shard or shard retry. The pickled callable instance
should be accessed from TransientShardState.
Returns:
handler instance as callable.
"""
return util.handler_for_name(self.handler_spec)
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def output_writer_class(self):
"""Get output writer class.
Returns:
output writer class object.
"""
return self.output_writer_spec and util.for_name(self.output_writer_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
result = {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count
}
if self.output_writer_spec:
result["mapper_output_writer"] = self.output_writer_spec
return result
def __str__(self):
return "MapperSpec(%s, %s, %s, %s)" % (
self.handler_spec, self.input_reader_spec, self.params,
self.shard_count)
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"],
json.get("mapper_output_writer")
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
class MapreduceSpec(json_util.JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params={},
hooks_class_name=None):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
self.hooks_class_name = hooks_class_name
self.__hooks = None
self.get_hooks() # Fail fast on an invalid hook class.
def get_hooks(self):
"""Returns a hooks.Hooks class or None if no hooks class has been set."""
if self.__hooks is None and self.hooks_class_name is not None:
hooks_class = util.for_name(self.hooks_class_name)
if not isinstance(hooks_class, type):
raise ValueError("hooks_class_name must refer to a class, got %s" %
type(hooks_class).__name__)
if not issubclass(hooks_class, hooks.Hooks):
raise ValueError(
"hooks_class_name must refer to a hooks.Hooks subclass")
self.__hooks = hooks_class(self)
return self.__hooks
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
"hooks_class_name": self.hooks_class_name,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
def __str__(self):
return str(self.to_json())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
@classmethod
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if this MR is still running.
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing. This starts as 0,
then set by KickOffJob handler to be the actual number of input
readers after input splitting, and is updated by Controller task
as shards finish.
start_time: When the job started.
writer_state: Json property to be used by writer to store its state.
This is filled when single output per job. Will be deprecated.
Use OutputWriter.get_filenames instead.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
# TODO(user): Replace mapreduce_spec with job_config.
mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
writer_state = json_util.JsonProperty(dict, indexed=False)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
result_status = db.StringProperty(required=False, choices=_RESULTS)
# For UI purposes only.
chart_url = db.TextProperty(default="")
chart_width = db.IntegerProperty(default=300, indexed=False)
sparkline_url = db.TextProperty(default="")
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceState"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), str(mapreduce_id))
@classmethod
def get_by_job_id(cls, mapreduce_id):
"""Retrieves the instance of state for a Job.
Args:
mapreduce_id: The mapreduce job to retrieve.
Returns:
instance of MapreduceState for passed id.
"""
return db.get(cls.get_key_by_job_id(mapreduce_id))
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
shard_count = len(shards_processed)
if shards_processed:
# Only 16 labels on the whole chart.
stride_length = max(1, shard_count / 16)
chart.bottom.labels = []
for x in xrange(shard_count):
if (x % stride_length == 0 or
x == shard_count - 1):
chart.bottom.labels.append(x)
else:
chart.bottom.labels.append("")
chart.left.labels = ["0", str(max(shards_processed))]
chart.left.min = 0
self.chart_width = min(700, max(300, shard_count * 20))
self.chart_url = chart.display.Url(self.chart_width, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
"""
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([])
return state
@staticmethod
def new_mapreduce_id():
"""Generate new mapreduce id."""
return util._get_descending_key()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
class TransientShardState(object):
"""A shard's states that are kept in task payload.
TransientShardState holds two types of states:
1. Some states just don't need to be saved to datastore. e.g.
serialized input reader and output writer instances.
2. Some states are duplicated from datastore, e.g. slice_id, shard_id.
These are used to validate the task.
"""
def __init__(self,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
initial_input_reader,
output_writer=None,
retries=0,
handler=None):
"""Init.
Args:
base_path: base path of this mapreduce job. Deprecated.
mapreduce_spec: an instance of MapReduceSpec.
shard_id: shard id.
slice_id: slice id. When enqueuing task for the next slice, this number
is incremented by 1.
input_reader: input reader instance for this shard.
initial_input_reader: the input reader instance before any iteration.
Used by shard retry.
output_writer: output writer instance for this shard, if exists.
retries: the number of retries of the current shard. Used to drop
tasks from old retries.
handler: map/reduce handler.
"""
self.base_path = base_path
self.mapreduce_spec = mapreduce_spec
self.shard_id = shard_id
self.slice_id = slice_id
self.input_reader = input_reader
self.initial_input_reader = initial_input_reader
self.output_writer = output_writer
self.retries = retries
self.handler = handler
self._input_reader_json = self.input_reader.to_json()
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler
def advance_for_next_slice(self, recovery_slice=False):
"""Advance relavent states for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
if recovery_slice:
self.slice_id += 2
# Restore input reader to the beginning of the slice.
self.input_reader = self.input_reader.from_json(self._input_reader_json)
else:
self.slice_id += 1
def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result
@classmethod
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = simplejson.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = simplejson.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
simplejson.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler)
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. ShardState key_name is equal to shard_id.
Shard state contains critical state to ensure the correctness of
shard execution. It is the single source of truth about a shard's
progress. For example:
1. A slice is allowed to run only if its payload matches shard state's
expectation.
2. A slice is considered running only if it has acquired the shard's lock.
3. A slice is considered done only if it has successfully committed shard
state to db.
Properties about the shard:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. All counters yielded
within mapreduce are stored here.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
retries: the number of times this shard has been retried.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
writer_state: writer state for this shard. The shard's output writer
instance can save in-memory output references to this field in its
"finalize" method.
Properties about slice management:
slice_id: slice id of current executing slice. A slice's task
will not run unless its slice_id matches this. Initial
value is 0. By the end of slice execution, this number is
incremented by 1.
slice_start_time: a slice updates this to now at the beginning of
execution. If the transaction succeeds, the current task holds
a lease of slice duration + some grace period. During this time, no
other task with the same slice_id will execute. Upon slice failure,
the task should try to unset this value to allow retries to carry on
ASAP.
slice_request_id: the request id that holds/held the lease. When lease has
expired, new request needs to verify that said request has indeed
ended according to logs API. Do this only when lease has expired
because logs API is expensive. This field should always be set/unset
with slice_start_time. It is possible Logs API doesn't log a request
at all or doesn't log the end of a request. So a new request can
proceed after a long conservative timeout.
slice_retries: the number of times a slice has been retried due to
processing data when lock is held. Taskqueue/datastore errors
related to slice/shard management are not counted. This count is
only a lower bound and is used to determined when to fail a slice
completely.
acquired_once: whether the lock for this slice has been acquired at
least once. When this is True, duplicates in outputs are possible.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
# Shard can be in aborted state when user issued abort, or controller
# issued abort because some other shard failed.
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Maximum number of shard states to hold in memory at any time.
_MAX_STATES_IN_MEMORY = 10
# Functional properties.
mapreduce_id = db.StringProperty(required=True)
active = db.BooleanProperty(default=True, indexed=False)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
retries = db.IntegerProperty(default=0, indexed=False)
writer_state = json_util.JsonProperty(dict, indexed=False)
slice_id = db.IntegerProperty(default=0, indexed=False)
slice_start_time = db.DateTimeProperty(indexed=False)
slice_request_id = db.ByteStringProperty(indexed=False)
slice_retries = db.IntegerProperty(default=0, indexed=False)
acquired_once = db.BooleanProperty(default=False, indexed=False)
# For UI purposes only.
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def __str__(self):
kv = {"active": self.active,
"slice_id": self.slice_id,
"last_work_item": self.last_work_item,
"update_time": self.update_time}
if self.result_status:
kv["result_status"] = self.result_status
if self.retries:
kv["retries"] = self.retries
if self.slice_start_time:
kv["slice_start_time"] = self.slice_start_time
if self.slice_retries:
kv["slice_retries"] = self.slice_retries
if self.slice_request_id:
kv["slice_request_id"] = self.slice_request_id
if self.acquired_once:
kv["acquired_once"] = self.acquired_once
keys = kv.keys()
keys.sort()
result = "ShardState is {"
for k in keys:
result += k + ":" + str(kv[k]) + ","
result += "}"
return result
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
def set_for_failure(self):
self.active = False
self.result_status = self.RESULT_FAILED
def set_for_abort(self):
self.active = False
self.result_status = self.RESULT_ABORTED
def set_for_success(self):
self.active = False
self.result_status = self.RESULT_SUCCESS
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_ShardState"
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Deprecated. Use find_all_by_mapreduce_state.
This will be removed after 1.8.9 release.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of ShardStates.
"""
return list(cls.find_all_by_mapreduce_state(mapreduce_state))
@classmethod
def find_all_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Never runs within a transaction since it may touch >5 entity groups (one
for each shard).
Args:
mapreduce_state: MapreduceState instance
Yields:
shard states sorted by shard id.
"""
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
i = 0
while i < len(keys):
@db.non_transactional
def no_tx_get(i):
return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY])
# We need a separate function to so that we can mix non-transactional and
# use be a generator
states = no_tx_get(i)
for s in states:
i += 1
if s is not None:
yield s
@classmethod
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
"""Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
"""
if mapreduce_state is None:
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceControl"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs)
class QuerySpec(object):
"""Encapsulates everything about a query needed by DatastoreInputReader."""
DEFAULT_BATCH_SIZE = 50
def __init__(self,
entity_kind,
keys_only=None,
filters=None,
batch_size=None,
model_class_path=None,
app=None,
ns=None):
self.entity_kind = entity_kind
self.keys_only = keys_only or False
self.filters = filters or None
self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE
self.model_class_path = model_class_path
self.app = app
self.ns = ns
def to_json(self):
return {"entity_kind": self.entity_kind,
"keys_only": self.keys_only,
"filters": self.filters,
"batch_size": self.batch_size,
"model_class_path": self.model_class_path,
"app": self.app,
"ns": self.ns}
@classmethod
def from_json(cls, json):
return cls(json["entity_kind"],
json["keys_only"],
json["filters"],
json["batch_size"],
json["model_class_path"],
json["app"],
json["ns"])
| Tong-Chen/genomics-tools | mapreduce-python/mapreduce/model.py | Python | apache-2.0 | 39,526 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted
| tensorflow/minigo | bigtable_input.py | Python | apache-2.0 | 29,247 |
"""
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
| enriquesanchezb/practica_utad_2016 | venv/lib/python2.7/site-packages/_pytest/cacheprovider.py | Python | apache-2.0 | 8,786 |
from __future__ import unicode_literals
import locale
def locale_decode(bytestr):
try:
return unicode(bytestr)
except UnicodeError:
return str(bytestr).decode(locale.getpreferredencoding())
| abarisain/mopidy | mopidy/utils/encoding.py | Python | apache-2.0 | 217 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015,掌阅科技
All rights reserved.
摘 要: query.py
创 建 者: WangLichao
创建日期: 2015-03-06
"""
# pylint: disable=arguments-differ
from tornado.web import UIModule
OPERATOR = {
'like': '包含',
'=': '等于',
'!=': '不等于',
'>': '大于',
'>=': '大于等于',
'<': '小于',
'<=': '小于等于',
}
class Query(UIModule):
'''操作条件
'''
def render(self, column, default_column,
default_operator, time_flag=False):
'''
Args:
column: 数据库列字典
default_column: 默认列
operator: 操作字典
default_operator: 默认操作
time_flag: 输入为时间字段标记
'''
return self.render_string("uimodule/query.html",
column=column,
default_column=default_column,
default_operator=default_operator,
operator=OPERATOR,
time_flag=time_flag)
| ireaderlab/zkdash | lib/uimodule/query.py | Python | apache-2.0 | 1,160 |
# coding=utf-8
# Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
import pytest
from distutils import version
import sys
from _pytest.config import get_plugin_manager
from pkg_resources import iter_entry_points
from _jb_runner_tools import jb_patch_separator, jb_doc_args, JB_DISABLE_BUFFERING, start_protocol, parse_arguments, \
set_parallel_mode
from teamcity import pytest_plugin
if __name__ == '__main__':
path, targets, additional_args = parse_arguments()
sys.argv += additional_args
joined_targets = jb_patch_separator(targets, fs_glue="/", python_glue="::", fs_to_python_glue=".py::")
# When file is launched in pytest it should be file.py: you can't provide it as bare module
joined_targets = [t + ".py" if ":" not in t else t for t in joined_targets]
sys.argv += [path] if path else joined_targets
# plugin is discovered automatically in 3, but not in 2
# to prevent "plugin already registered" problem we check it first
plugins_to_load = []
if not get_plugin_manager().hasplugin("pytest-teamcity"):
if "pytest-teamcity" not in map(lambda e: e.name, iter_entry_points(group='pytest11', name=None)):
plugins_to_load.append(pytest_plugin)
args = sys.argv[1:]
if "--jb-show-summary" in args:
args.remove("--jb-show-summary")
elif version.LooseVersion(pytest.__version__) >= version.LooseVersion("6.0"):
args += ["--no-header", "--no-summary", "-q"]
if JB_DISABLE_BUFFERING and "-s" not in args:
args += ["-s"]
jb_doc_args("pytest", args)
class Plugin:
@staticmethod
def pytest_configure(config):
if getattr(config.option, "numprocesses", None):
set_parallel_mode()
start_protocol()
sys.exit(pytest.main(args, plugins_to_load + [Plugin]))
| siosio/intellij-community | python/helpers/pycharm/_jb_pytest_runner.py | Python | apache-2.0 | 1,933 |
import pytz
import dateutil.parser
from waterbutler.core import metadata
class BaseOsfStorageMetadata:
@property
def provider(self):
return 'osfstorage'
class BaseOsfStorageItemMetadata(BaseOsfStorageMetadata):
def __init__(self, raw, materialized):
super().__init__(raw)
self._materialized = materialized
@property
def name(self):
return self.raw['name']
@property
def path(self):
return self.raw['path']
@property
def materialized_path(self):
return self._materialized
class OsfStorageFileMetadata(BaseOsfStorageItemMetadata, metadata.BaseFileMetadata):
@property
def modified(self):
return self.raw['modified']
@property
def modified_utc(self):
try:
return self.raw['modified_utc']
except KeyError:
if self.raw['modified'] is None:
return None
# Kludge for OSF, whose modified attribute does not include
# tzinfo but is assumed to be UTC.
parsed_datetime = dateutil.parser.parse(self.raw['modified'])
if not parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.replace(tzinfo=pytz.UTC)
return parsed_datetime.isoformat()
@property
def created_utc(self):
try:
return self.raw['created_utc']
except KeyError:
if self.raw['created'] is None:
return None
# Kludge for OSF, whose created attribute does not include
# tzinfo but is assumed to be UTC.
parsed_datetime = dateutil.parser.parse(self.raw['created'])
if not parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.replace(tzinfo=pytz.UTC)
return parsed_datetime.isoformat()
@property
def size(self):
return self.raw['size']
@property
def content_type(self):
return self.raw.get('contentType')
@property
def etag(self):
return '{}::{}'.format(self.raw['version'], self.path)
@property
def extra(self):
"""osfstorage-specific metadata for files.
* ``guid``: Always `None`. Added in anticipation of OSF-side support, which was then
abandoned after technical consideration. Left in to avoid breaking clients that expect
the key to be present.
* ``version``: The version number of the *most recent* version, not the requested version.
* ``downloads``: Number of times the file has been downloaded.
* ``checkout``: Whether this file has been checked-out and is therefore read-only to all
but the user who has checked it out.
* ``latestVersionSeen``: Whether the requesting user has seen the most recent version of
the file. `True` if so. `False` if a newer version exists that the user has not yet
seen. `None` if the user has not seen *any* version of the file.
"""
return {
'guid': self.raw.get('guid', None),
'version': self.raw['version'],
'downloads': self.raw['downloads'],
'checkout': self.raw['checkout'],
'latestVersionSeen': self.raw.get('latestVersionSeen', None),
'hashes': {
'md5': self.raw['md5'],
'sha256': self.raw['sha256']
},
}
class OsfStorageFolderMetadata(BaseOsfStorageItemMetadata, metadata.BaseFolderMetadata):
pass
class OsfStorageRevisionMetadata(BaseOsfStorageMetadata, metadata.BaseFileRevisionMetadata):
@property
def modified(self):
return self.raw['date']
@property
def version_identifier(self):
return 'version'
@property
def version(self):
return str(self.raw['index'])
@property
def extra(self):
return {
'user': self.raw['user'],
'downloads': self.raw['downloads'],
'hashes': {
'md5': self.raw['md5'],
'sha256': self.raw['sha256']
},
}
| felliott/waterbutler | waterbutler/providers/osfstorage/metadata.py | Python | apache-2.0 | 4,099 |
# Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from collections import OrderedDict
# eclipse works with linux paths
from posixpath import normpath, join, basename
from .tool import Tool, Builder, Exporter
from .gccarm import MakefileGccArm
from ..util import SOURCE_KEYS
logger = logging.getLogger('progen.tools.eclipse')
class EclipseGnuARM(Tool, Exporter, Builder):
file_types = {'cpp': 1, 'c': 1, 's': 1, 'obj': 1, 'lib': 1, 'h': 1}
generated_project = {
'path': '',
'files': {
'proj_file': '',
'cproj': '',
'makefile': '',
}
}
def __init__(self, workspace, env_settings):
self.definitions = 0
self.exporter = MakefileGccArm(workspace, env_settings)
self.workspace = workspace
self.env_settings = env_settings
@staticmethod
def get_toolnames():
return ['eclipse_make_gcc_arm', 'make_gcc_arm']
@staticmethod
def get_toolchain():
return 'gcc_arm'
def _expand_one_file(self, source, new_data, extension):
return {"path": join('PARENT-%s-PROJECT_LOC' % new_data['output_dir']['rel_path'], normpath(source)), "name": basename(
source), "type": self.file_types[extension.lower()]}
def _expand_sort_key(self, file) :
return file['name'].lower()
def export_workspace(self):
logger.debug("Current version of CoIDE does not support workspaces")
def export_project(self):
""" Processes groups and misc options specific for eclipse, and run generator """
output = copy.deepcopy(self.generated_project)
data_for_make = self.workspace.copy()
self.exporter.process_data_for_makefile(data_for_make)
output['path'], output['files']['makefile'] = self.gen_file_jinja('makefile_gcc.tmpl', data_for_make, 'Makefile', data_for_make['output_dir']['path'])
expanded_dic = self.workspace.copy()
expanded_dic['rel_path'] = data_for_make['output_dir']['rel_path']
groups = self._get_groups(expanded_dic)
expanded_dic['groups'] = {}
for group in groups:
expanded_dic['groups'][group] = []
self._iterate(self.workspace, expanded_dic)
# Project file
project_path, output['files']['cproj'] = self.gen_file_jinja(
'eclipse_makefile.cproject.tmpl', expanded_dic, '.cproject', data_for_make['output_dir']['path'])
project_path, output['files']['proj_file'] = self.gen_file_jinja(
'eclipse.project.tmpl', expanded_dic, '.project', data_for_make['output_dir']['path'])
return output
def get_generated_project_files(self):
return {'path': self.workspace['path'], 'files': [self.workspace['files']['proj_file'], self.workspace['files']['cproj'],
self.workspace['files']['makefile']]}
| 0xc0170/project_generator | project_generator/tools/eclipse.py | Python | apache-2.0 | 3,406 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from testtools import matchers
from keystone import auth
from keystone.common import authorization
from keystone.common import cache
from keystone import exception
from keystone import middleware
from keystone.policy.backends import rules
from keystone.tests import unit
from keystone.tests.unit import rest
CONF = cfg.CONF
DEFAULT_DOMAIN_ID = 'default'
TIME_FORMAT = unit.TIME_FORMAT
class AuthTestMixin(object):
"""To hold auth building helper functions."""
def build_auth_scope(self, project_id=None, project_name=None,
project_domain_id=None, project_domain_name=None,
domain_id=None, domain_name=None, trust_id=None,
unscoped=None):
scope_data = {}
if unscoped:
scope_data['unscoped'] = {}
if project_id or project_name:
scope_data['project'] = {}
if project_id:
scope_data['project']['id'] = project_id
else:
scope_data['project']['name'] = project_name
if project_domain_id or project_domain_name:
project_domain_json = {}
if project_domain_id:
project_domain_json['id'] = project_domain_id
else:
project_domain_json['name'] = project_domain_name
scope_data['project']['domain'] = project_domain_json
if domain_id or domain_name:
scope_data['domain'] = {}
if domain_id:
scope_data['domain']['id'] = domain_id
else:
scope_data['domain']['name'] = domain_name
if trust_id:
scope_data['OS-TRUST:trust'] = {}
scope_data['OS-TRUST:trust']['id'] = trust_id
return scope_data
def build_password_auth(self, user_id=None, username=None,
user_domain_id=None, user_domain_name=None,
password=None):
password_data = {'user': {}}
if user_id:
password_data['user']['id'] = user_id
else:
password_data['user']['name'] = username
if user_domain_id or user_domain_name:
password_data['user']['domain'] = {}
if user_domain_id:
password_data['user']['domain']['id'] = user_domain_id
else:
password_data['user']['domain']['name'] = user_domain_name
password_data['user']['password'] = password
return password_data
def build_token_auth(self, token):
return {'id': token}
def build_authentication_request(self, token=None, user_id=None,
username=None, user_domain_id=None,
user_domain_name=None, password=None,
kerberos=False, **kwargs):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_data = {}
auth_data['identity'] = {'methods': []}
if kerberos:
auth_data['identity']['methods'].append('kerberos')
auth_data['identity']['kerberos'] = {}
if token:
auth_data['identity']['methods'].append('token')
auth_data['identity']['token'] = self.build_token_auth(token)
if user_id or username:
auth_data['identity']['methods'].append('password')
auth_data['identity']['password'] = self.build_password_auth(
user_id, username, user_domain_id, user_domain_name, password)
if kwargs:
auth_data['scope'] = self.build_auth_scope(**kwargs)
return {'auth': auth_data}
class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
AuthTestMixin):
def config_files(self):
config_files = super(RestfulTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def get_extensions(self):
extensions = set(['revoke'])
if hasattr(self, 'EXTENSION_NAME'):
extensions.add(self.EXTENSION_NAME)
return extensions
def generate_paste_config(self):
new_paste_file = None
try:
new_paste_file = unit.generate_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
# no need to report this error here, as most tests will not have
# EXTENSION_TO_ADD defined.
pass
finally:
return new_paste_file
def remove_generated_paste_config(self):
try:
unit.remove_generated_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
pass
def setUp(self, app_conf='keystone'):
"""Setup for v3 Restful Test Cases.
"""
new_paste_file = self.generate_paste_config()
self.addCleanup(self.remove_generated_paste_config)
if new_paste_file:
app_conf = 'config:%s' % (new_paste_file)
super(RestfulTestCase, self).setUp(app_conf=app_conf)
self.empty_context = {'environment': {}}
# Initialize the policy engine and allow us to write to a temp
# file in each test to create the policies
rules.reset()
# drop the policy rules
self.addCleanup(rules.reset)
def load_backends(self):
# ensure the cache region instance is setup
cache.configure_cache_region(cache.REGION)
super(RestfulTestCase, self).load_backends()
def load_fixtures(self, fixtures):
self.load_sample_data()
def _populate_default_domain(self):
if CONF.database.connection == unit.IN_MEM_DB_CONN_STRING:
# NOTE(morganfainberg): If an in-memory db is being used, be sure
# to populate the default domain, this is typically done by
# a migration, but the in-mem db uses model definitions to create
# the schema (no migrations are run).
try:
self.resource_api.get_domain(DEFAULT_DOMAIN_ID)
except exception.DomainNotFound:
domain = {'description': (u'Owns users and tenants (i.e. '
u'projects) available on Identity '
u'API v2.'),
'enabled': True,
'id': DEFAULT_DOMAIN_ID,
'name': u'Default'}
self.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain)
def load_sample_data(self):
self._populate_default_domain()
self.domain_id = uuid.uuid4().hex
self.domain = self.new_domain_ref()
self.domain['id'] = self.domain_id
self.resource_api.create_domain(self.domain_id, self.domain)
self.project_id = uuid.uuid4().hex
self.project = self.new_project_ref(
domain_id=self.domain_id)
self.project['id'] = self.project_id
self.resource_api.create_project(self.project_id, self.project)
self.user = self.new_user_ref(domain_id=self.domain_id)
password = self.user['password']
self.user = self.identity_api.create_user(self.user)
self.user['password'] = password
self.user_id = self.user['id']
self.default_domain_project_id = uuid.uuid4().hex
self.default_domain_project = self.new_project_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_project['id'] = self.default_domain_project_id
self.resource_api.create_project(self.default_domain_project_id,
self.default_domain_project)
self.default_domain_user = self.new_user_ref(
domain_id=DEFAULT_DOMAIN_ID)
password = self.default_domain_user['password']
self.default_domain_user = (
self.identity_api.create_user(self.default_domain_user))
self.default_domain_user['password'] = password
self.default_domain_user_id = self.default_domain_user['id']
# create & grant policy.json's default role for admin_required
self.role_id = uuid.uuid4().hex
self.role = self.new_role_ref()
self.role['id'] = self.role_id
self.role['name'] = 'admin'
self.role_api.create_role(self.role_id, self.role)
self.assignment_api.add_role_to_user_and_project(
self.user_id, self.project_id, self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.default_domain_project_id,
self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.project_id,
self.role_id)
self.region_id = uuid.uuid4().hex
self.region = self.new_region_ref()
self.region['id'] = self.region_id
self.catalog_api.create_region(
self.region.copy())
self.service_id = uuid.uuid4().hex
self.service = self.new_service_ref()
self.service['id'] = self.service_id
self.catalog_api.create_service(
self.service_id,
self.service.copy())
self.endpoint_id = uuid.uuid4().hex
self.endpoint = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint['id'] = self.endpoint_id
self.endpoint['region_id'] = self.region['id']
self.catalog_api.create_endpoint(
self.endpoint_id,
self.endpoint.copy())
# The server adds 'enabled' and defaults to True.
self.endpoint['enabled'] = True
def new_ref(self):
"""Populates a ref with attributes common to some API entities."""
return unit.new_ref()
def new_region_ref(self):
return unit.new_region_ref()
def new_service_ref(self):
return unit.new_service_ref()
def new_endpoint_ref(self, service_id, interface='public', **kwargs):
return unit.new_endpoint_ref(
service_id, interface=interface, default_region_id=self.region_id,
**kwargs)
def new_domain_ref(self):
return unit.new_domain_ref()
def new_project_ref(self, domain_id=None, parent_id=None, is_domain=False):
return unit.new_project_ref(domain_id=domain_id, parent_id=parent_id,
is_domain=is_domain)
def new_user_ref(self, domain_id, project_id=None):
return unit.new_user_ref(domain_id, project_id=project_id)
def new_group_ref(self, domain_id):
return unit.new_group_ref(domain_id)
def new_credential_ref(self, user_id, project_id=None, cred_type=None):
return unit.new_credential_ref(user_id, project_id=project_id,
cred_type=cred_type)
def new_role_ref(self):
return unit.new_role_ref()
def new_policy_ref(self):
return unit.new_policy_ref()
def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False):
return unit.new_trust_ref(
trustor_user_id, trustee_user_id, project_id=project_id,
impersonation=impersonation, expires=expires, role_ids=role_ids,
role_names=role_names, remaining_uses=remaining_uses,
allow_redelegation=allow_redelegation)
def create_new_default_project_for_user(self, user_id, domain_id,
enable_project=True):
ref = self.new_project_ref(domain_id=domain_id)
ref['enabled'] = enable_project
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': user_id},
body=body)
self.assertValidUserResponse(r)
return project
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'project': {
'id': self.project['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_domain_scoped_token(self):
"""Convenience method for requesting domain scoped token."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'domain': {
'id': self.domain['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_requested_token(self, auth):
"""Request the specific token we want."""
r = self.v3_authenticate_token(auth)
return r.headers.get('X-Subject-Token')
def v3_authenticate_token(self, auth, expected_status=201):
return self.admin_request(method='POST',
path='/v3/auth/tokens',
body=auth,
expected_status=expected_status)
def v3_noauth_request(self, path, **kwargs):
# request does not require auth token header
path = '/v3' + path
return self.admin_request(path=path, **kwargs)
def v3_request(self, path, **kwargs):
# check to see if caller requires token for the API call.
if kwargs.pop('noauth', None):
return self.v3_noauth_request(path, **kwargs)
# Check if the caller has passed in auth details for
# use in requesting the token
auth_arg = kwargs.pop('auth', None)
if auth_arg:
token = self.get_requested_token(auth_arg)
else:
token = kwargs.pop('token', None)
if not token:
token = self.get_scoped_token()
path = '/v3' + path
return self.admin_request(path=path, token=token, **kwargs)
def get(self, path, **kwargs):
r = self.v3_request(method='GET', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def head(self, path, **kwargs):
r = self.v3_request(method='HEAD', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
self.assertEqual('', r.body)
return r
def post(self, path, **kwargs):
r = self.v3_request(method='POST', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 201)
return r
def put(self, path, **kwargs):
r = self.v3_request(method='PUT', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def patch(self, path, **kwargs):
r = self.v3_request(method='PATCH', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def delete(self, path, **kwargs):
r = self.v3_request(method='DELETE', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def assertValidErrorResponse(self, r):
resp = r.result
self.assertIsNotNone(resp.get('error'))
self.assertIsNotNone(resp['error'].get('code'))
self.assertIsNotNone(resp['error'].get('title'))
self.assertIsNotNone(resp['error'].get('message'))
self.assertEqual(int(resp['error']['code']), r.status_code)
def assertValidListLinks(self, links, resource_url=None):
self.assertIsNotNone(links)
self.assertIsNotNone(links.get('self'))
self.assertThat(links['self'], matchers.StartsWith('http://localhost'))
if resource_url:
self.assertThat(links['self'], matchers.EndsWith(resource_url))
self.assertIn('next', links)
if links['next'] is not None:
self.assertThat(links['next'],
matchers.StartsWith('http://localhost'))
self.assertIn('previous', links)
if links['previous'] is not None:
self.assertThat(links['previous'],
matchers.StartsWith('http://localhost'))
def assertValidListResponse(self, resp, key, entity_validator, ref=None,
expected_length=None, keys_to_check=None,
resource_url=None):
"""Make assertions common to all API list responses.
If a reference is provided, it's ID will be searched for in the
response, and asserted to be equal.
"""
entities = resp.result.get(key)
self.assertIsNotNone(entities)
if expected_length is not None:
self.assertEqual(expected_length, len(entities))
elif ref is not None:
# we're at least expecting the ref
self.assertNotEmpty(entities)
# collections should have relational links
self.assertValidListLinks(resp.result.get('links'),
resource_url=resource_url)
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidEntity(entity, keys_to_check=keys_to_check)
entity_validator(entity)
if ref:
entity = [x for x in entities if x['id'] == ref['id']][0]
self.assertValidEntity(entity, ref=ref,
keys_to_check=keys_to_check)
entity_validator(entity, ref)
return entities
def assertValidResponse(self, resp, key, entity_validator, *args,
**kwargs):
"""Make assertions common to all API responses."""
entity = resp.result.get(key)
self.assertIsNotNone(entity)
keys = kwargs.pop('keys_to_check', None)
self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs)
entity_validator(entity, *args, **kwargs)
return entity
def assertValidEntity(self, entity, ref=None, keys_to_check=None):
"""Make assertions common to all API entities.
If a reference is provided, the entity will also be compared against
the reference.
"""
if keys_to_check is not None:
keys = keys_to_check
else:
keys = ['name', 'description', 'enabled']
for k in ['id'] + keys:
msg = '%s unexpectedly None in %s' % (k, entity)
self.assertIsNotNone(entity.get(k), msg)
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('self'))
self.assertThat(entity['links']['self'],
matchers.StartsWith('http://localhost'))
self.assertIn(entity['id'], entity['links']['self'])
if ref:
for k in keys:
msg = '%s not equal: %s != %s' % (k, ref[k], entity[k])
self.assertEqual(ref[k], entity[k])
return entity
# auth validation
def assertValidISO8601ExtendedFormatDatetime(self, dt):
try:
return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
self.assertIsInstance(dt, datetime.datetime)
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
token = r.result['token']
self.assertIsNotNone(token.get('expires_at'))
expires_at = self.assertValidISO8601ExtendedFormatDatetime(
token['expires_at'])
self.assertIsNotNone(token.get('issued_at'))
issued_at = self.assertValidISO8601ExtendedFormatDatetime(
token['issued_at'])
self.assertTrue(issued_at < expires_at)
self.assertIn('user', token)
self.assertIn('id', token['user'])
self.assertIn('name', token['user'])
self.assertIn('domain', token['user'])
self.assertIn('id', token['user']['domain'])
if user is not None:
self.assertEqual(user['id'], token['user']['id'])
self.assertEqual(user['name'], token['user']['name'])
self.assertEqual(user['domain_id'], token['user']['domain']['id'])
return token
def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
self.assertNotIn('roles', token)
self.assertNotIn('catalog', token)
self.assertNotIn('project', token)
self.assertNotIn('domain', token)
return token
def assertValidScopedTokenResponse(self, r, *args, **kwargs):
require_catalog = kwargs.pop('require_catalog', True)
endpoint_filter = kwargs.pop('endpoint_filter', False)
ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0)
token = self.assertValidTokenResponse(r, *args, **kwargs)
if require_catalog:
endpoint_num = 0
self.assertIn('catalog', token)
if isinstance(token['catalog'], list):
# only test JSON
for service in token['catalog']:
for endpoint in service['endpoints']:
self.assertNotIn('enabled', endpoint)
self.assertNotIn('legacy_endpoint_id', endpoint)
self.assertNotIn('service_id', endpoint)
endpoint_num += 1
# sub test for the OS-EP-FILTER extension enabled
if endpoint_filter:
self.assertEqual(ep_filter_assoc, endpoint_num)
else:
self.assertNotIn('catalog', token)
self.assertIn('roles', token)
self.assertTrue(token['roles'])
for role in token['roles']:
self.assertIn('id', role)
self.assertIn('name', role)
return token
def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('project', token)
self.assertIn('id', token['project'])
self.assertIn('name', token['project'])
self.assertIn('domain', token['project'])
self.assertIn('id', token['project']['domain'])
self.assertIn('name', token['project']['domain'])
self.assertEqual(self.role_id, token['roles'][0]['id'])
return token
def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
trust = token.get('OS-TRUST:trust')
self.assertIsNotNone(trust)
self.assertIsNotNone(trust.get('id'))
self.assertIsInstance(trust.get('impersonation'), bool)
self.assertIsNotNone(trust.get('trustor_user'))
self.assertIsNotNone(trust.get('trustee_user'))
self.assertIsNotNone(trust['trustor_user'].get('id'))
self.assertIsNotNone(trust['trustee_user'].get('id'))
def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('domain', token)
self.assertIn('id', token['domain'])
self.assertIn('name', token['domain'])
return token
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
del token['token']['expires_at']
del token['token']['issued_at']
return token
a_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['expires_at'])
b_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['expires_at'])
self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at)
a_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['issued_at'])
b_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['issued_at'])
self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at)
return self.assertDictEqual(normalize(a), normalize(b))
# catalog validation
def assertValidCatalogResponse(self, resp, *args, **kwargs):
self.assertEqual(set(['catalog', 'links']), set(resp.json.keys()))
self.assertValidCatalog(resp.json['catalog'])
self.assertIn('links', resp.json)
self.assertIsInstance(resp.json['links'], dict)
self.assertEqual(['self'], list(resp.json['links'].keys()))
self.assertEqual(
'http://localhost/v3/auth/catalog',
resp.json['links']['self'])
def assertValidCatalog(self, entity):
self.assertIsInstance(entity, list)
self.assertTrue(len(entity) > 0)
for service in entity:
self.assertIsNotNone(service.get('id'))
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
self.assertNotIn('enabled', service)
self.assertTrue(len(service['endpoints']) > 0)
for endpoint in service['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('interface'))
self.assertIsNotNone(endpoint.get('url'))
self.assertNotIn('enabled', endpoint)
self.assertNotIn('legacy_endpoint_id', endpoint)
self.assertNotIn('service_id', endpoint)
# region validation
def assertValidRegionListResponse(self, resp, *args, **kwargs):
# NOTE(jaypipes): I have to pass in a blank keys_to_check parameter
# below otherwise the base assertValidEntity method
# tries to find a "name" and an "enabled" key in the
# returned ref dicts. The issue is, I don't understand
# how the service and endpoint entity assertions below
# actually work (they don't raise assertions), since
# AFAICT, the service and endpoint tables don't have
# a "name" column either... :(
return self.assertValidListResponse(
resp,
'regions',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegionResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'region',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegion(self, entity, ref=None):
self.assertIsNotNone(entity.get('description'))
if ref:
self.assertEqual(ref['description'], entity['description'])
return entity
# service validation
def assertValidServiceListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'services',
self.assertValidService,
*args,
**kwargs)
def assertValidServiceResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'service',
self.assertValidService,
*args,
**kwargs)
def assertValidService(self, entity, ref=None):
self.assertIsNotNone(entity.get('type'))
self.assertIsInstance(entity.get('enabled'), bool)
if ref:
self.assertEqual(ref['type'], entity['type'])
return entity
# endpoint validation
def assertValidEndpointListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'endpoints',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpointResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'endpoint',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpoint(self, entity, ref=None):
self.assertIsNotNone(entity.get('interface'))
self.assertIsNotNone(entity.get('service_id'))
self.assertIsInstance(entity['enabled'], bool)
# this is intended to be an unexposed implementation detail
self.assertNotIn('legacy_endpoint_id', entity)
if ref:
self.assertEqual(ref['interface'], entity['interface'])
self.assertEqual(ref['service_id'], entity['service_id'])
if ref.get('region') is not None:
self.assertEqual(ref['region_id'], entity.get('region_id'))
return entity
# domain validation
def assertValidDomainListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'domains',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomainResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'domain',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomain(self, entity, ref=None):
if ref:
pass
return entity
# project validation
def assertValidProjectListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'projects',
self.assertValidProject,
*args,
**kwargs)
def assertValidProjectResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'project',
self.assertValidProject,
*args,
**kwargs)
def assertValidProject(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
# user validation
def assertValidUserListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'users',
self.assertValidUser,
*args,
**kwargs)
def assertValidUserResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'user',
self.assertValidUser,
*args,
**kwargs)
def assertValidUser(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
self.assertIsNotNone(entity.get('email'))
self.assertIsNone(entity.get('password'))
self.assertNotIn('tenantId', entity)
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
self.assertEqual(ref['email'], entity['email'])
if 'default_project_id' in ref:
self.assertIsNotNone(ref['default_project_id'])
self.assertEqual(ref['default_project_id'],
entity['default_project_id'])
return entity
# group validation
def assertValidGroupListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'groups',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroupResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'group',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroup(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# credential validation
def assertValidCredentialListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'credentials',
self.assertValidCredential,
keys_to_check=['blob', 'user_id', 'type'],
*args,
**kwargs)
def assertValidCredentialResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'credential',
self.assertValidCredential,
keys_to_check=['blob', 'user_id', 'type'],
*args,
**kwargs)
def assertValidCredential(self, entity, ref=None):
self.assertIsNotNone(entity.get('user_id'))
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['user_id'], entity['user_id'])
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
self.assertEqual(ref.get('project_id'), entity.get('project_id'))
return entity
# role validation
def assertValidRoleListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'roles',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRoleResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'role',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRole(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# role assignment validation
def assertValidRoleAssignmentListResponse(self, resp, expected_length=None,
resource_url=None):
entities = resp.result.get('role_assignments')
if expected_length:
self.assertEqual(expected_length, len(entities))
# Collections should have relational links
self.assertValidListLinks(resp.result.get('links'),
resource_url=resource_url)
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidRoleAssignment(entity)
return entities
def assertValidRoleAssignment(self, entity, ref=None):
# A role should be present
self.assertIsNotNone(entity.get('role'))
self.assertIsNotNone(entity['role'].get('id'))
# Only one of user or group should be present
if entity.get('user'):
self.assertIsNone(entity.get('group'))
self.assertIsNotNone(entity['user'].get('id'))
else:
self.assertIsNotNone(entity.get('group'))
self.assertIsNotNone(entity['group'].get('id'))
# A scope should be present and have only one of domain or project
self.assertIsNotNone(entity.get('scope'))
if entity['scope'].get('project'):
self.assertIsNone(entity['scope'].get('domain'))
self.assertIsNotNone(entity['scope']['project'].get('id'))
else:
self.assertIsNotNone(entity['scope'].get('domain'))
self.assertIsNotNone(entity['scope']['domain'].get('id'))
# An assignment link should be present
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('assignment'))
if ref:
links = ref.pop('links')
try:
self.assertDictContainsSubset(ref, entity)
self.assertIn(links['assignment'],
entity['links']['assignment'])
finally:
if links:
ref['links'] = links
def assertRoleAssignmentInListResponse(self, resp, ref, expected=1):
found_count = 0
for entity in resp.result.get('role_assignments'):
try:
self.assertValidRoleAssignment(entity, ref=ref)
except Exception:
# It doesn't match, so let's go onto the next one
pass
else:
found_count += 1
self.assertEqual(expected, found_count)
def assertRoleAssignmentNotInListResponse(self, resp, ref):
self.assertRoleAssignmentInListResponse(resp, ref=ref, expected=0)
# policy validation
def assertValidPolicyListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'policies',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicyResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'policy',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicy(self, entity, ref=None):
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
return entity
# trust validation
def assertValidTrustListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'trusts',
self.assertValidTrustSummary,
keys_to_check=['trustor_user_id',
'trustee_user_id',
'impersonation'],
*args,
**kwargs)
def assertValidTrustResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'trust',
self.assertValidTrust,
keys_to_check=['trustor_user_id',
'trustee_user_id',
'impersonation'],
*args,
**kwargs)
def assertValidTrustSummary(self, entity, ref=None):
return self.assertValidTrust(entity, ref, summary=True)
def assertValidTrust(self, entity, ref=None, summary=False):
self.assertIsNotNone(entity.get('trustor_user_id'))
self.assertIsNotNone(entity.get('trustee_user_id'))
self.assertIsNotNone(entity.get('impersonation'))
self.assertIn('expires_at', entity)
if entity['expires_at'] is not None:
self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at'])
if summary:
# Trust list contains no roles, but getting a specific
# trust by ID provides the detailed response containing roles
self.assertNotIn('roles', entity)
self.assertIn('project_id', entity)
else:
for role in entity['roles']:
self.assertIsNotNone(role)
self.assertValidEntity(role, keys_to_check=['name'])
self.assertValidRole(role)
self.assertValidListLinks(entity.get('roles_links'))
# always disallow role xor project_id (neither or both is allowed)
has_roles = bool(entity.get('roles'))
has_project = bool(entity.get('project_id'))
self.assertFalse(has_roles ^ has_project)
if ref:
self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id'])
self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id'])
self.assertEqual(ref['project_id'], entity['project_id'])
if entity.get('expires_at') or ref.get('expires_at'):
entity_exp = self.assertValidISO8601ExtendedFormatDatetime(
entity['expires_at'])
ref_exp = self.assertValidISO8601ExtendedFormatDatetime(
ref['expires_at'])
self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp)
else:
self.assertEqual(ref.get('expires_at'),
entity.get('expires_at'))
return entity
def build_external_auth_request(self, remote_user,
remote_domain=None, auth_data=None,
kerberos=False):
context = {'environment': {'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'}}
if remote_domain:
context['environment']['REMOTE_DOMAIN'] = remote_domain
if not auth_data:
auth_data = self.build_authentication_request(
kerberos=kerberos)['auth']
no_context = None
auth_info = auth.controllers.AuthInfo.create(no_context, auth_data)
auth_context = {'extras': {}, 'method_names': []}
return context, auth_info, auth_context
class VersionTestCase(RestfulTestCase):
def test_get_version(self):
pass
# NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py
# because we need the token
class AuthContextMiddlewareTestCase(RestfulTestCase):
def _mock_request_object(self, token_id):
class fake_req(object):
headers = {middleware.AUTH_TOKEN_HEADER: token_id}
environ = {}
return fake_req()
def test_auth_context_build_by_middleware(self):
# test to make sure AuthContextMiddleware successful build the auth
# context from the incoming auth token
admin_token = self.get_scoped_token()
req = self._mock_request_object(admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.user['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'])
def test_auth_context_override(self):
overridden_context = 'OVERRIDDEN_CONTEXT'
# this token should not be used
token = uuid.uuid4().hex
req = self._mock_request_object(token)
req.environ[authorization.AUTH_CONTEXT_ENV] = overridden_context
application = None
middleware.AuthContextMiddleware(application).process_request(req)
# make sure overridden context take precedence
self.assertEqual(overridden_context,
req.environ.get(authorization.AUTH_CONTEXT_ENV))
def test_admin_token_auth_context(self):
# test to make sure AuthContextMiddleware does not attempt to build
# auth context if the incoming auth token is the special admin token
req = self._mock_request_object(CONF.admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertDictEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
{})
def test_unscoped_token_auth_context(self):
unscoped_token = self.get_unscoped_token()
req = self._mock_request_object(unscoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
for key in ['project_id', 'domain_id', 'domain_name']:
self.assertNotIn(
key,
req.environ.get(authorization.AUTH_CONTEXT_ENV))
def test_project_scoped_token_auth_context(self):
project_scoped_token = self.get_scoped_token()
req = self._mock_request_object(project_scoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.project['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id'])
def test_domain_scoped_token_auth_context(self):
# grant the domain role to user
path = '/domains/%s/users/%s/roles/%s' % (
self.domain['id'], self.user['id'], self.role['id'])
self.put(path=path)
domain_scoped_token = self.get_domain_scoped_token()
req = self._mock_request_object(domain_scoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.domain['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id'])
self.assertEqual(
self.domain['name'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name'])
class JsonHomeTestMixin(object):
"""JSON Home test
Mixin this class to provide a test for the JSON-Home response for an
extension.
The base class must set JSON_HOME_DATA to a dict of relationship URLs
(rels) to the JSON-Home data for the relationship. The rels and associated
data must be in the response.
"""
def test_get_json_home(self):
resp = self.get('/', convert=False,
headers={'Accept': 'application/json-home'})
self.assertThat(resp.headers['Content-Type'],
matchers.Equals('application/json-home'))
resp_data = jsonutils.loads(resp.body)
# Check that the example relationships are present.
for rel in self.JSON_HOME_DATA:
self.assertThat(resp_data['resources'][rel],
matchers.Equals(self.JSON_HOME_DATA[rel]))
class AssignmentTestMixin(object):
"""To hold assignment helper functions."""
def build_role_assignment_query_url(self, effective=False, **filters):
"""Build and return a role assignment query url with provided params.
Available filters are: domain_id, project_id, user_id, group_id,
role_id and inherited_to_projects.
"""
query_params = '?effective' if effective else ''
for k, v in filters.items():
query_params += '?' if not query_params else '&'
if k == 'inherited_to_projects':
query_params += 'scope.OS-INHERIT:inherited_to=projects'
else:
if k in ['domain_id', 'project_id']:
query_params += 'scope.'
elif k not in ['user_id', 'group_id', 'role_id']:
raise ValueError(
'Invalid key \'%s\' in provided filters.' % k)
query_params += '%s=%s' % (k.replace('_', '.'), v)
return '/role_assignments%s' % query_params
def build_role_assignment_link(self, **attribs):
"""Build and return a role assignment link with provided attributes.
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
if attribs.get('domain_id'):
link = '/domains/' + attribs['domain_id']
else:
link = '/projects/' + attribs['project_id']
if attribs.get('user_id'):
link += '/users/' + attribs['user_id']
else:
link += '/groups/' + attribs['group_id']
link += '/roles/' + attribs['role_id']
if attribs.get('inherited_to_projects'):
return '/OS-INHERIT%s/inherited_to_projects' % link
return link
def build_role_assignment_entity(self, link=None, **attribs):
"""Build and return a role assignment entity with provided attributes.
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
entity = {'links': {'assignment': (
link or self.build_role_assignment_link(**attribs))}}
if attribs.get('domain_id'):
entity['scope'] = {'domain': {'id': attribs['domain_id']}}
else:
entity['scope'] = {'project': {'id': attribs['project_id']}}
if attribs.get('user_id'):
entity['user'] = {'id': attribs['user_id']}
if attribs.get('group_id'):
entity['links']['membership'] = ('/groups/%s/users/%s' %
(attribs['group_id'],
attribs['user_id']))
else:
entity['group'] = {'id': attribs['group_id']}
entity['role'] = {'id': attribs['role_id']}
if attribs.get('inherited_to_projects'):
entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
return entity
| idjaw/keystone | keystone/tests/unit/test_v3.py | Python | apache-2.0 | 51,696 |
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import glob
import os
import time
import urllib2
from oslo.config import cfg
from oslo.utils import strutils
import six
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passedf to the NFS client. See section '
'of the nfs man page for details'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._get_hypervisor_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in specs.iteritems():
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
return self.get_config(connection_info, disk_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = netdisk_properties['secret_type']
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
{'command': iscsi_command, 'out': out, 'err': err})
# NOTE(bpokorny): iscsi_command can contain passwords so we need to
# sanitize the password in the message.
LOG.debug(strutils.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
#
# as we are parsing a command line utility, allow for the
# possibility that additional debug data is spewed in the
# stream, and only grab actual ip / iqn lines.
targets = []
for data in [line.split() for line in output.splitlines()]:
if len(data) == 2 and data[1].startswith('iqn.'):
targets.append(data)
return targets
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['host_device']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
if self.use_multipath:
# multipath installed, discovering other targets if available
# multipath should be configured on the nova-compute node,
# in order to fit storage vendor
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_LW("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev, 'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
# we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
connection_info['data']['host_device'] = host_device
return self.get_config(connection_info, disk_info)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except processutils.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_LW('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# duplicate logins crash iscsiadm after load,
# so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
# as this might be one of many paths,
# only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath(['-r'], check_exit_code=[0, 1, 21])
def _get_host_device(self, iscsi_properties):
return ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
def _reconnect(self, iscsi_properties):
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
def _get_host_device(self, iser_properties):
time.sleep(1)
host_device = None
device = ("ip-%s-iscsi-%s-lun-%s" %
(iser_properties['target_portal'],
iser_properties['target_iqn'],
iser_properties.get('target_lun', 0)))
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iser_properties):
self._run_iscsiadm(iser_properties,
('--interface', 'iser', '--op', 'new'))
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
return self.get_config(connection_info, disk_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options is not None:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtAOEVolumeDriver,
self).get_config(connection_info, disk_info)
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
conf.source_type = "block"
conf.source_path = aoedevpath
return conf
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
# NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath, 'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
return self.get_config(connection_info, mount_device)
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(data['export']))
path = os.path.join(path, data['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, mount_device):
data = connection_info['data']
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(data['export'], data.get('options'))
return self.get_config(connection_info, mount_device)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
ports = fc_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(str(wwn))
elif isinstance(ports, six.string_types):
wwns.append(str(ports))
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = libvirt_utils.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device, 'tries': tries})
linuxscsi.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = [device_info]
return self.get_config(connection_info, disk_info)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s", devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtScalityVolumeDriver,
self).get_config(connection_info, disk_info)
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
conf.source_type = 'file'
conf.source_path = path
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
return self.get_config(connection_info, disk_info)
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _LW("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _LW("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _LW("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _LW("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
| badock/nova | nova/virt/libvirt/volume.py | Python | apache-2.0 | 47,624 |
"""."""
def get_systeminfo(resource, config, interactive=False):
"""."""
return {'ohai': 'there!'}
| lil-cain/satori | satori/sysinfo/ohai.py | Python | apache-2.0 | 109 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_crpolicy_binding(base_resource) :
""" Binding class showing the crpolicy that can be bound to crvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._hits = 0
self._name = ""
self._targetvserver = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetvserver(self) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
return self._targetvserver
except Exception as e:
raise e
@targetvserver.setter
def targetvserver(self, targetvserver) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
self._targetvserver = targetvserver
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_crpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_crpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_crpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetvserver = resource.targetvserver
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetvserver = resource[i].targetvserver
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_crpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_crpolicy_binding resources.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_crpolicy_binding resources configued on NetScaler.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class crvserver_crpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_crpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_crpolicy_binding = [crvserver_crpolicy_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/cr/crvserver_crpolicy_binding.py | Python | apache-2.0 | 7,634 |
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from nacl.encoding import HexEncoder
from nacl.exceptions import CryptoError
from nacl.public import Box, PrivateKey, PublicKey
VECTORS = [
# privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext
(
b"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a",
b"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a",
b"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb",
b"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f",
b"69696ee955b62b73cd62bda875fc73d68219e0036b7a0b37",
(b"be075fc53c81f2d5cf141316ebeb0c7b5228c52a4c62cbd44b66849b64244ffce5e"
b"cbaaf33bd751a1ac728d45e6c61296cdc3c01233561f41db66cce314adb310e3be8"
b"250c46f06dceea3a7fa1348057e2f6556ad6b1318a024a838f21af1fde048977eb4"
b"8f59ffd4924ca1c60902e52f0a089bc76897040e082f937763848645e0705"),
(b"f3ffc7703f9400e52a7dfb4b3d3305d98e993b9f48681273c29650ba32fc76ce483"
b"32ea7164d96a4476fb8c531a1186ac0dfc17c98dce87b4da7f011ec48c97271d2c2"
b"0f9b928fe2270d6fb863d51738b48eeee314a7cc8ab932164548e526ae902243685"
b"17acfeabd6bb3732bc0e9da99832b61ca01b6de56244a9e88d5f9b37973f622a43d"
b"14a6599b1f654cb45a74e355a5"),
),
]
def test_generate_private_key():
PrivateKey.generate()
def test_box_creation():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
Box(priv, pub)
def test_box_decode():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b1 = Box(priv, pub)
b2 = Box.decode(b1._shared_key)
assert b1._shared_key == b2._shared_key
def test_box_bytes():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
assert bytes(b) == b._shared_key
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_encryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubalice = PublicKey(pubalice, encoder=HexEncoder)
privbob = PrivateKey(privbob, encoder=HexEncoder)
box = Box(privbob, pubalice)
encrypted = box.encrypt(
binascii.unhexlify(plaintext),
binascii.unhexlify(nonce),
encoder=HexEncoder,
)
expected = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
assert encrypted == expected
assert encrypted.nonce == nonce
assert encrypted.ciphertext == ciphertext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privalice = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice, pubbob)
nonce = binascii.unhexlify(nonce)
decrypted = binascii.hexlify(
box.decrypt(ciphertext, nonce, encoder=HexEncoder),
)
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption_combined(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privalice = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice, pubbob)
combined = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
decrypted = binascii.hexlify(box.decrypt(combined, encoder=HexEncoder))
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice", "pubalice", "privbob", "pubbob", "nonce", "plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_failed_decryption(
privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext):
pubbob = PublicKey(pubbob, encoder=HexEncoder)
privbob = PrivateKey(privbob, encoder=HexEncoder)
# this cannot decrypt the ciphertext! the ciphertext must be decrypted by
# (privalice, pubbob) or (privbob, pubalice)
box = Box(privbob, pubbob)
with pytest.raises(CryptoError):
box.decrypt(ciphertext, binascii.unhexlify(nonce), encoder=HexEncoder)
def test_box_wrong_length():
with pytest.raises(ValueError):
PublicKey(b"")
with pytest.raises(ValueError):
PrivateKey(b"")
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
with pytest.raises(ValueError):
b.encrypt(b"", b"")
with pytest.raises(ValueError):
b.decrypt(b"", b"")
def check_type_error(expected, f, *args):
with pytest.raises(TypeError) as e:
f(*args)
assert expected in str(e)
def test_wrong_types():
priv = PrivateKey.generate()
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, 12)
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, priv)
check_type_error("PrivateKey must be created from a 32 byte seed",
PrivateKey, priv.public_key)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, 13)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, priv)
check_type_error("PublicKey must be created from 32 bytes",
PublicKey, priv.public_key)
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv, "not a public key")
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv.encode(), priv.public_key.encode())
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv, priv.public_key.encode())
check_type_error("Box must be created from a PrivateKey and a PublicKey",
Box, priv.encode(), priv.public_key)
| xueyumusic/pynacl | tests/test_box.py | Python | apache-2.0 | 7,660 |
from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
import time
from flask._compat import PY2
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
FILTER_BY_OWNER = False
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = AUTHENTICATE
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
class TreeForm(Form):
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active, DM.owners == current_user.username).all()
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {dag.dag_id: dag for dag in dags if (dag.owner == current_user.username and (not dag.parent_dag))}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if not base_date:
base_date = dag.latest_execution_date or datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
base_date = utils.round_time(base_date, dag.schedule_interval)
form = TreeForm(data={'base_date': base_date, 'num_runs': num_runs})
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
if start_date:
difference = base_date - start_date
offset = timedelta(seconds=int(difference.total_seconds() % dag.schedule_interval.total_seconds()))
base_date -= offset
base_date -= timedelta(microseconds=base_date.microsecond)
from_date = (base_date - (num_runs * dag.schedule_interval))
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / len(dag.roots)
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t, set()) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0], set())
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), old_div((
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds(),(60*60))
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queued_dttm', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=VisiblePasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print(bp)
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| nkhuyu/airflow | airflow/www/app.py | Python | apache-2.0 | 68,914 |
# Copyright 2015 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from neutron._i18n import _
LOG = logging.getLogger(__name__)
class ItemAllocator(object):
"""Manages allocation of items from a pool
Some of the allocations such as link local addresses used for routing
inside the fip namespaces need to persist across agent restarts to maintain
consistency. Persisting such allocations in the neutron database is
unnecessary and would degrade performance. ItemAllocator utilizes local
file system to track allocations made for objects of a given class.
The persistent datastore is a file. The records are one per line of
the format: key<delimiter>value. For example if the delimiter is a ','
(the default value) then the records will be: key,value (one per line)
"""
def __init__(self, state_file, ItemClass, item_pool, delimiter=','):
"""Read the file with previous allocations recorded.
See the note in the allocate method for more detail.
"""
self.ItemClass = ItemClass
self.state_file = state_file
self.allocations = {}
self.remembered = {}
self.pool = item_pool
read_error = False
for line in self._read():
try:
key, saved_value = line.strip().split(delimiter)
self.remembered[key] = self.ItemClass(saved_value)
except ValueError:
read_error = True
LOG.warning("Invalid line in %(file)s, "
"ignoring: %(line)s",
{'file': state_file, 'line': line})
self.pool.difference_update(self.remembered.values())
if read_error:
LOG.debug("Re-writing file %s due to read error", state_file)
self._write_allocations()
def lookup(self, key):
"""Try to lookup an item of ItemClass type.
See if there are any current or remembered allocations for the key.
"""
if key in self.allocations:
return self.allocations[key]
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]
def allocate(self, key):
"""Try to allocate an item of ItemClass type.
I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.
Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.
So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desperate step will not
happen often in practice.
"""
entry = self.lookup(key)
if entry:
return entry
if not self.pool:
# Desperate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# The number of address pairs allocated from the
# pool depends upon the prefix length specified
# in DVR_FIP_LL_CIDR
raise RuntimeError(_("Cannot allocate item of type: "
"%(class)s from pool using file %(file)s")
% {'class': self.ItemClass,
'file': self.state_file})
self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]
def release(self, key):
if self.lookup(key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()
def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)
def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)
def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()
| noironetworks/neutron | neutron/agent/l3/item_allocator.py | Python | apache-2.0 | 5,147 |
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.forms.widgets import RadioFieldRenderer
from django.forms.utils import flatatt
from django.utils.html import escape, format_html, format_html_join, smart_urlquote
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils import six
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n'
% (name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), static('admin/')))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{0} {1}<br />{2} {3}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{0}>\n{1}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{0}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.to
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse('admin:%s_%s_changelist' %
(rel_to._meta.app_label,
rel_to._meta.model_name),
current_app=self.admin_site.name)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> '
% (related_url, url, name))
extra.append('<img src="%s" width="16" height="16" alt="%s" /></a>'
% (static('admin/img/selector-search.gif'), _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.to in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def media(self):
return self.widget.media
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import TO_FIELD_VAR
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.model_name)
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
url_params = '?%s=%s' % (TO_FIELD_VAR, self.rel.get_related_field().name)
# TODO: "add_id_" is hard-coded here. This should instead use the
# correct API to determine the ID dynamically.
output.append('<a href="%s%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> '
% (related_url, url_params, name))
output.append('<img src="%s" width="10" height="10" alt="%s"/></a>'
% (static('admin/img/icon_addlink.gif'), _('Add Another')))
return mark_safe(''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{0} <a{1}>{2}</a><br />{3} {4}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| ZhaoCJ/django | django/contrib/admin/widgets.py | Python | bsd-3-clause | 13,529 |
"""=============================
Subclassing ndarray in python
=============================
Introduction
------------
Subclassing ndarray is relatively simple, but it has some complications
compared to other Python objects. On this page we explain the machinery
that allows you to subclass ndarray, and the implications for
implementing a subclass.
ndarrays and object creation
============================
Subclassing ndarray is complicated by the fact that new instances of
ndarray classes can come about in three different ways. These are:
#. Explicit constructor call - as in ``MySubClass(params)``. This is
the usual route to Python instance creation.
#. View casting - casting an existing ndarray as a given subclass
#. New from template - creating a new instance from a template
instance. Examples include returning slices from a subclassed array,
creating return types from ufuncs, and copying arrays. See
:ref:`new-from-template` for more details
The last two are characteristics of ndarrays - in order to support
things like array slicing. The complications of subclassing ndarray are
due to the mechanisms numpy has to support these latter two routes of
instance creation.
.. _view-casting:
View casting
------------
*View casting* is the standard ndarray mechanism by which you take an
ndarray of any subclass, and return a view of the array as another
(specified) subclass:
>>> import numpy as np
>>> # create a completely useless ndarray subclass
>>> class C(np.ndarray): pass
>>> # create a standard ndarray
>>> arr = np.zeros((3,))
>>> # take a view of it, as our useless subclass
>>> c_arr = arr.view(C)
>>> type(c_arr)
<class 'C'>
.. _new-from-template:
Creating new from template
--------------------------
New instances of an ndarray subclass can also come about by a very
similar mechanism to :ref:`view-casting`, when numpy finds it needs to
create a new instance from a template instance. The most obvious place
this has to happen is when you are taking slices of subclassed arrays.
For example:
>>> v = c_arr[1:]
>>> type(v) # the view is of type 'C'
<class 'C'>
>>> v is c_arr # but it's a new instance
False
The slice is a *view* onto the original ``c_arr`` data. So, when we
take a view from the ndarray, we return a new ndarray, of the same
class, that points to the data in the original.
There are other points in the use of ndarrays where we need such views,
such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
(see also :ref:`array-wrap`), and reducing methods (like
``c_arr.mean()``.
Relationship of view casting and new-from-template
--------------------------------------------------
These paths both use the same machinery. We make the distinction here,
because they result in different input to your methods. Specifically,
:ref:`view-casting` means you have created a new instance of your array
type from any potential subclass of ndarray. :ref:`new-from-template`
means you have created a new instance of your class from a pre-existing
instance, allowing you - for example - to copy across attributes that
are particular to your subclass.
Implications for subclassing
----------------------------
If we subclass ndarray, we need to deal not only with explicit
construction of our array type, but also :ref:`view-casting` or
:ref:`new-from-template`. NumPy has the machinery to do this, and this
machinery that makes subclassing slightly non-standard.
There are two aspects to the machinery that ndarray uses to support
views and new-from-template in subclasses.
The first is the use of the ``ndarray.__new__`` method for the main work
of object initialization, rather then the more usual ``__init__``
method. The second is the use of the ``__array_finalize__`` method to
allow subclasses to clean up after the creation of views and new
instances from templates.
A brief Python primer on ``__new__`` and ``__init__``
=====================================================
``__new__`` is a standard Python method, and, if present, is called
before ``__init__`` when we create a class instance. See the `python
__new__ documentation
<https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
For example, consider the following Python code:
.. testcode::
class C(object):
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
# The `object` type __new__ method takes a single argument.
return object.__new__(cls)
def __init__(self, *args):
print('type(self) in __init__:', type(self))
print('Args in __init__:', args)
meaning that we get:
>>> c = C('hello')
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
type(self) in __init__: <class 'C'>
Args in __init__: ('hello',)
When we call ``C('hello')``, the ``__new__`` method gets its own class
as first argument, and the passed argument, which is the string
``'hello'``. After python calls ``__new__``, it usually (see below)
calls our ``__init__`` method, with the output of ``__new__`` as the
first argument (now a class instance), and the passed arguments
following.
As you can see, the object can be initialized in the ``__new__``
method or the ``__init__`` method, or both, and in fact ndarray does
not have an ``__init__`` method, because all the initialization is
done in the ``__new__`` method.
Why use ``__new__`` rather than just the usual ``__init__``? Because
in some cases, as for ndarray, we want to be able to return an object
of some other class. Consider the following:
.. testcode::
class D(C):
def __new__(cls, *args):
print('D cls is:', cls)
print('D args in __new__:', args)
return C.__new__(C, *args)
def __init__(self, *args):
# we never get here
print('In D __init__')
meaning that:
>>> obj = D('hello')
D cls is: <class 'D'>
D args in __new__: ('hello',)
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
>>> type(obj)
<class 'C'>
The definition of ``C`` is the same as before, but for ``D``, the
``__new__`` method returns an instance of class ``C`` rather than
``D``. Note that the ``__init__`` method of ``D`` does not get
called. In general, when the ``__new__`` method returns an object of
class other than the class in which it is defined, the ``__init__``
method of that class is not called.
This is how subclasses of the ndarray class are able to return views
that preserve the class type. When taking a view, the standard
ndarray machinery creates the new ndarray object with something
like::
obj = ndarray.__new__(subtype, shape, ...
where ``subdtype`` is the subclass. Thus the returned view is of the
same class as the subclass, rather than being of class ``ndarray``.
That solves the problem of returning views of the same type, but now
we have a new problem. The machinery of ndarray can set the class
this way, in its standard methods for taking views, but the ndarray
``__new__`` method knows nothing of what we have done in our own
``__new__`` method in order to set attributes, and so on. (Aside -
why not call ``obj = subdtype.__new__(...`` then? Because we may not
have a ``__new__`` method with the same call signature).
The role of ``__array_finalize__``
==================================
``__array_finalize__`` is the mechanism that numpy provides to allow
subclasses to handle the various ways that new instances get created.
Remember that subclass instances can come about in these three ways:
#. explicit constructor call (``obj = MySubClass(params)``). This will
call the usual sequence of ``MySubClass.__new__`` then (if it exists)
``MySubClass.__init__``.
#. :ref:`view-casting`
#. :ref:`new-from-template`
Our ``MySubClass.__new__`` method only gets called in the case of the
explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
``MySubClass.__init__`` to deal with the view casting and
new-from-template. It turns out that ``MySubClass.__array_finalize__``
*does* get called for all three methods of object creation, so this is
where our object creation housekeeping usually goes.
* For the explicit constructor call, our subclass will need to create a
new ndarray instance of its own class. In practice this means that
we, the authors of the code, will need to make a call to
``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to
``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an
existing array (see below)
* For view casting and new-from-template, the equivalent of
``ndarray.__new__(MySubClass,...`` is called, at the C level.
The arguments that ``__array_finalize__`` receives differ for the three
methods of instance creation above.
The following code allows us to look at the call sequences and arguments:
.. testcode::
import numpy as np
class C(np.ndarray):
def __new__(cls, *args, **kwargs):
print('In __new__ with class %s' % cls)
return super(C, cls).__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# in practice you probably will not need or want an __init__
# method for your subclass
print('In __init__ with class %s' % self.__class__)
def __array_finalize__(self, obj):
print('In array_finalize:')
print(' self type is %s' % type(self))
print(' obj type is %s' % type(obj))
Now:
>>> # Explicit constructor
>>> c = C((10,))
In __new__ with class <class 'C'>
In array_finalize:
self type is <class 'C'>
obj type is <type 'NoneType'>
In __init__ with class <class 'C'>
>>> # View casting
>>> a = np.arange(10)
>>> cast_a = a.view(C)
In array_finalize:
self type is <class 'C'>
obj type is <type 'numpy.ndarray'>
>>> # Slicing (example of new-from-template)
>>> cv = c[:1]
In array_finalize:
self type is <class 'C'>
obj type is <class 'C'>
The signature of ``__array_finalize__`` is::
def __array_finalize__(self, obj):
One sees that the ``super`` call, which goes to
``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our
own class (``self``) as well as the object from which the view has been
taken (``obj``). As you can see from the output above, the ``self`` is
always a newly created instance of our subclass, and the type of ``obj``
differs for the three instance creation methods:
* When called from the explicit constructor, ``obj`` is ``None``
* When called from view casting, ``obj`` can be an instance of any
subclass of ndarray, including our own.
* When called in new-from-template, ``obj`` is another instance of our
own subclass, that we might use to update the new ``self`` instance.
Because ``__array_finalize__`` is the only method that always sees new
instances being created, it is the sensible place to fill in instance
defaults for new object attributes, among other tasks.
This may be clearer with an example.
Simple example - adding an extra attribute to ndarray
-----------------------------------------------------
.. testcode::
import numpy as np
class InfoArray(np.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None, info=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = super(InfoArray, subtype).__new__(subtype, shape, dtype,
buffer, offset, strides,
order)
# set the new 'info' attribute to the value passed
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.info = getattr(obj, 'info', None)
# We do not need to return anything
Using the object looks like this:
>>> obj = InfoArray(shape=(3,)) # explicit constructor
>>> type(obj)
<class 'InfoArray'>
>>> obj.info is None
True
>>> obj = InfoArray(shape=(3,), info='information')
>>> obj.info
'information'
>>> v = obj[1:] # new-from-template - here - slicing
>>> type(v)
<class 'InfoArray'>
>>> v.info
'information'
>>> arr = np.arange(10)
>>> cast_arr = arr.view(InfoArray) # view casting
>>> type(cast_arr)
<class 'InfoArray'>
>>> cast_arr.info is None
True
This class isn't very useful, because it has the same constructor as the
bare ndarray object, including passing in buffers and shapes and so on.
We would probably prefer the constructor to be able to take an already
formed ndarray from the usual numpy calls to ``np.array`` and return an
object.
Slightly more realistic example - attribute added to existing array
-------------------------------------------------------------------
Here is a class that takes a standard ndarray that already exists, casts
as our type, and adds an extra attribute.
.. testcode::
import numpy as np
class RealisticInfoArray(np.ndarray):
def __new__(cls, input_array, info=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.info = getattr(obj, 'info', None)
So:
>>> arr = np.arange(5)
>>> obj = RealisticInfoArray(arr, info='information')
>>> type(obj)
<class 'RealisticInfoArray'>
>>> obj.info
'information'
>>> v = obj[1:]
>>> type(v)
<class 'RealisticInfoArray'>
>>> v.info
'information'
.. _array-ufunc:
``__array_ufunc__`` for ufuncs
------------------------------
.. versionadded:: 1.13
A subclass can override what happens when executing numpy ufuncs on it by
overriding the default ``ndarray.__array_ufunc__`` method. This method is
executed *instead* of the ufunc and should return either the result of the
operation, or :obj:`NotImplemented` if the operation requested is not
implemented.
The signature of ``__array_ufunc__`` is::
def __array_ufunc__(ufunc, method, *inputs, **kwargs):
- *ufunc* is the ufunc object that was called.
- *method* is a string indicating how the Ufunc was called, either
``"__call__"`` to indicate it was called directly, or one of its
:ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``,
``"reduceat"``, ``"outer"``, or ``"at"``.
- *inputs* is a tuple of the input arguments to the ``ufunc``
- *kwargs* contains any optional or keyword arguments passed to the
function. This includes any ``out`` arguments, which are always
contained in a tuple.
A typical implementation would convert any inputs or outputs that are
instances of one's own class, pass everything on to a superclass using
``super()``, and finally return the results after possible
back-conversion. An example, taken from the test case
``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the
following.
.. testcode::
input numpy as np
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, A):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = kwargs.pop('out', None)
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, A):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super(A, self).__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], A):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(A)
if output is None else output)
for result, output in zip(results, outputs))
if results and isinstance(results[0], A):
results[0].info = info
return results[0] if len(results) == 1 else results
So, this class does not actually do anything interesting: it just
converts any instances of its own to regular ndarray (otherwise, we'd
get infinite recursion!), and adds an ``info`` dictionary that tells
which inputs and outputs it converted. Hence, e.g.,
>>> a = np.arange(5.).view(A)
>>> b = np.sin(a)
>>> b.info
{'inputs': [0]}
>>> b = np.sin(np.arange(5.), out=(a,))
>>> b.info
{'outputs': [0]}
>>> a = np.arange(5.).view(A)
>>> b = np.ones(1).view(A)
>>> c = a + b
>>> c.info
{'inputs': [0, 1]}
>>> a += b
>>> a.info
{'inputs': [0, 1], 'outputs': [0]}
Note that another approach would be to to use ``getattr(ufunc,
methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example,
the result would be identical, but there is a difference if another operand
also defines ``__array_ufunc__``. E.g., lets assume that we evalulate
``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has
an override. If you use ``super`` as in the example,
``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which
means it cannot evaluate the result itself. Thus, it will return
`NotImplemented` and so will our class ``A``. Then, control will be passed
over to ``b``, which either knows how to deal with us and produces a result,
or does not and returns `NotImplemented`, raising a ``TypeError``.
If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we
effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__``
will be called, but now it sees an ``ndarray`` as the other argument. Likely,
it will know how to handle this, and return a new instance of the ``B`` class
to us. Our example class is not set up to handle this, but it might well be
the best approach if, e.g., one were to re-implement ``MaskedArray`` using
``__array_ufunc__``.
As a final note: if the ``super`` route is suited to a given class, an
advantage of using it is that it helps in constructing class hierarchies.
E.g., suppose that our other class ``B`` also used the ``super`` in its
``__array_ufunc__`` implementation, and we created a class ``C`` that depended
on both, i.e., ``class C(A, B)`` (with, for simplicity, not another
``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would
pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to
``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to
``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate.
.. _array-wrap:
``__array_wrap__`` for ufuncs and other functions
-------------------------------------------------
Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using
``__array_wrap__`` and ``__array_prepare__``. These two allowed one to
change the output type of a ufunc, but, in contrast to
``__array_ufunc__``, did not allow one to make any changes to the inputs.
It is hoped to eventually deprecate these, but ``__array_wrap__`` is also
used by other numpy functions and methods, such as ``squeeze``, so at the
present time is still needed for full functionality.
Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of
allowing a subclass to set the type of the return value and update
attributes and metadata. Let's show how this works with an example. First
we return to the simpler example subclass, but with a different name and
some print statements:
.. testcode::
import numpy as np
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def __array_finalize__(self, obj):
print('In __array_finalize__:')
print(' self is %s' % repr(self))
print(' obj is %s' % repr(obj))
if obj is None: return
self.info = getattr(obj, 'info', None)
def __array_wrap__(self, out_arr, context=None):
print('In __array_wrap__:')
print(' self is %s' % repr(self))
print(' arr is %s' % repr(out_arr))
# then just call the parent
return super(MySubClass, self).__array_wrap__(self, out_arr, context)
We run a ufunc on an instance of our new array:
>>> obj = MySubClass(np.arange(5), info='spam')
In __array_finalize__:
self is MySubClass([0, 1, 2, 3, 4])
obj is array([0, 1, 2, 3, 4])
>>> arr2 = np.arange(5)+1
>>> ret = np.add(arr2, obj)
In __array_wrap__:
self is MySubClass([0, 1, 2, 3, 4])
arr is array([1, 3, 5, 7, 9])
In __array_finalize__:
self is MySubClass([1, 3, 5, 7, 9])
obj is MySubClass([0, 1, 2, 3, 4])
>>> ret
MySubClass([1, 3, 5, 7, 9])
>>> ret.info
'spam'
Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method
with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result
of the addition. In turn, the default ``__array_wrap__``
(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``,
and called ``__array_finalize__`` - hence the copying of the ``info``
attribute. This has all happened at the C level.
But, we could do anything we wanted:
.. testcode::
class SillySubClass(np.ndarray):
def __array_wrap__(self, arr, context=None):
return 'I lost your data'
>>> arr1 = np.arange(5)
>>> obj = arr1.view(SillySubClass)
>>> arr2 = np.arange(5)
>>> ret = np.multiply(obj, arr2)
>>> ret
'I lost your data'
So, by defining a specific ``__array_wrap__`` method for our subclass,
we can tweak the output from ufuncs. The ``__array_wrap__`` method
requires ``self``, then an argument - which is the result of the ufunc -
and an optional parameter *context*. This parameter is returned by
ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc,
domain of the ufunc), but is not set by other numpy functions. Though,
as seen above, it is possible to do otherwise, ``__array_wrap__`` should
return an instance of its containing class. See the masked array
subclass for an implementation.
In addition to ``__array_wrap__``, which is called on the way out of the
ufunc, there is also an ``__array_prepare__`` method which is called on
the way into the ufunc, after the output arrays are created but before any
computation has been performed. The default implementation does nothing
but pass through the array. ``__array_prepare__`` should not attempt to
access the array data or resize the array, it is intended for setting the
output array type, updating attributes and metadata, and performing any
checks based on the input that may be desired before computation begins.
Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
subclass thereof or raise an error.
Extra gotchas - custom ``__del__`` methods and ndarray.base
-----------------------------------------------------------
One of the problems that ndarray solves is keeping track of memory
ownership of ndarrays and their views. Consider the case where we have
created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
The two objects are looking at the same memory. NumPy keeps track of
where the data came from for a particular array or view, with the
``base`` attribute:
>>> # A normal ndarray, that owns its own data
>>> arr = np.zeros((4,))
>>> # In this case, base is None
>>> arr.base is None
True
>>> # We take a view
>>> v1 = arr[1:]
>>> # base now points to the array that it derived from
>>> v1.base is arr
True
>>> # Take a view of a view
>>> v2 = v1[1:]
>>> # base points to the view it derived from
>>> v2.base is v1
True
In general, if the array owns its own memory, as for ``arr`` in this
case, then ``arr.base`` will be None - there are some exceptions to this
- see the numpy book for more details.
The ``base`` attribute is useful in being able to tell whether we have
a view or the original array. This in turn can be useful if we need
to know whether or not to do some specific cleanup when the subclassed
array is deleted. For example, we may only want to do the cleanup if
the original array is deleted, but not the views. For an example of
how this can work, have a look at the ``memmap`` class in
``numpy.core``.
Subclassing and Downstream Compatibility
----------------------------------------
When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray``
interface, it is your responsibility to decide how aligned your APIs will be
with those of numpy. For convenience, many numpy functions that have a corresponding
``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking
if the first argument to a function has a method of the same name. If it exists, the
method is called instead of coercing the arguments to a numpy array.
For example, if you want your sub-class or duck-type to be compatible with
numpy's ``sum`` function, the method signature for this object's ``sum`` method
should be the following:
.. testcode::
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
...
This is the exact same method signature for ``np.sum``, so now if a user calls
``np.sum`` on this object, numpy will call the object's own ``sum`` method and
pass in these arguments enumerated above in the signature, and no errors will
be raised because the signatures are completely compatible with each other.
If, however, you decide to deviate from this signature and do something like this:
.. testcode::
def sum(self, axis=None, dtype=None):
...
This object is no longer compatible with ``np.sum`` because if you call ``np.sum``,
it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError
to be raised.
If you wish to maintain compatibility with numpy and its subsequent versions (which
might add new keyword arguments) but do not want to surface all of numpy's arguments,
your function's signature should accept ``**kwargs``. For example:
.. testcode::
def sum(self, axis=None, dtype=None, **unused_kwargs):
...
This object is now compatible with ``np.sum`` again because any extraneous arguments
(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the
``**unused_kwargs`` parameter.
"""
from __future__ import division, absolute_import, print_function
| MSeifert04/numpy | numpy/doc/subclassing.py | Python | bsd-3-clause | 28,624 |
# Created On: 2010-06-02
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QRadioButton
from .util import horizontalSpacer
class RadioBox(QWidget):
def __init__(self, parent=None, items=None, spread=True, **kwargs):
# If spread is False, insert a spacer in the layout so that the items don't use all the
# space they're given but rather align left.
if items is None:
items = []
super().__init__(parent, **kwargs)
self._buttons = []
self._labels = items
self._selected_index = 0
self._spacer = horizontalSpacer() if not spread else None
self._layout = QHBoxLayout(self)
self._update_buttons()
#--- Private
def _update_buttons(self):
if self._spacer is not None:
self._layout.removeItem(self._spacer)
to_remove = self._buttons[len(self._labels):]
for button in to_remove:
self._layout.removeWidget(button)
button.setParent(None)
del self._buttons[len(self._labels):]
to_add = self._labels[len(self._buttons):]
for _ in to_add:
button = QRadioButton(self)
self._buttons.append(button)
self._layout.addWidget(button)
button.toggled.connect(self.buttonToggled)
if self._spacer is not None:
self._layout.addItem(self._spacer)
if not self._buttons:
return
for button, label in zip(self._buttons, self._labels):
button.setText(label)
self._update_selection()
def _update_selection(self):
self._selected_index = max(0, min(self._selected_index, len(self._buttons)-1))
selected = self._buttons[self._selected_index]
selected.setChecked(True)
#--- Event Handlers
def buttonToggled(self):
for i, button in enumerate(self._buttons):
if button.isChecked():
self._selected_index = i
self.itemSelected.emit(i)
break
#--- Signals
itemSelected = pyqtSignal(int)
#--- Properties
@property
def buttons(self):
return self._buttons[:]
@property
def items(self):
return self._labels[:]
@items.setter
def items(self, value):
self._labels = value
self._update_buttons()
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, value):
self._selected_index = value
self._update_selection()
| hsoft/qtlib | radio_box.py | Python | bsd-3-clause | 2,903 |
import numpy as np
import warnings
import operator
from heapq import merge
class intervals(object):
r"""
This class implements methods for intervals or union of two unbounded
intervals, when all these sets have a point in their intersection
"""
def __init__(self, I = None):
"""
Create a intervals object, with some unbounded and bounded intervals
Parameters
----------
I : tuple
I is a tuple (inf, sup), the interval created
Returns
-------
interv : intervals
The intervals object
Warning : sup has to be larger than inf. If not, it raises a
ValueError exception
If sup == inf, it creates an empty interval, and raise a Warning
>>> I = intervals()
>>> I2 = intervals((-1, 1))
"""
if I == None:
self._U = []
else:
## Check that the interval is correct
(inf, sup) = I
if sup < inf:
raise ValueError("The given tuple " + \
"does not represent an interval : " + repr(I))
# elif inf == sup:
# self._U = []
else:
self._U = [I]
def __call__(self, x):
"""
Check if x is in the intersection of the intervals
Parameters
----------
x : float
The point you want to know if it is in the intervals
Returns
-------
is_in : bool
True if x is in the intersection, False if it's not
Examples
--------
>>> I = intervals()
>>> I(2)
False
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> x1, x2, x3, x4, x5 = 0.5, 1.5, 5, 6.5, 8
>>> I(x1), I(x2), I(x3), I(x4), I(x5)
(True, False, True, False, False)
"""
return any( a <= x and x <= b for (a, b) in self )
def __len__(self):
"""
Return the number of connex intervas composing this instance
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> len(I)
2
"""
return len(self._U)
def __invert__(self):
"""
Return the complement of the interval in the reals
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> print(~I)
[(-inf, 0), (1, 4), (6, inf)]
"""
if len(self) == 0:
return intervals((-np.inf, np.inf))
inverse = intervals()
a, _ = self._U[0]
if a > -np.inf:
inverse._U.append((-np.inf, a))
for (a1, b1), (a2, b2) in zip(self._U[:-1], self._U[1:]):
inverse._U.append((b1, a2))
_, b = self._U[-1]
if b < np.inf:
inverse._U.append((b, np.inf))
return inverse
def __repr__(self):
return repr(self._U)
def __iter__(self):
return iter(self._U)
def __getitem__(self,index):
return self._U[index]
@staticmethod
def union(*interv):
"""
Return the union of all the given intervals
Parameters
----------
interv1, ... : interv
intervals instance
Returns
-------
union, a new intervals instance, representing the union of interv1, ...
>>> I = intervals.union(intervals((-np.inf, 0)), \
intervals((-1, 1)), \
intervals((3, 6)))
>>> print(I)
[(-inf, 1), (3, 6)]
"""
## Define the union of an empty family as an empty set
union = intervals()
if len(interv) == 0:
return interv
interv_merged_gen = merge(*interv)
old_a, old_b = None, None
for new_a, new_b in interv_merged_gen:
if old_b is not None and new_a < old_b: # check to see if union of (old_a, old_b) and
# (new_a, new_b) is (old_a, new_b)
old_b = max(old_b, new_b)
elif old_b is None: # first interval
old_a, old_b = new_a, new_b
else:
union._U.append((old_a, old_b))
old_a, old_b = new_a, new_b
union._U.append((old_a, old_b))
return union
@staticmethod
def intersection(*interv):
"""
Return the intersection of all the given intervals
Parameters
----------
interv1, ... : interv
intervals instance
Returns
-------
intersection, a new intervals instance, representing the intersection
of interv1, ...
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> print(I)
[(0, 1), (4, 6)]
"""
if len(interv) == 0:
I = intervals()
return ~I
return ~(intervals.union(*(~I for I in interv)))
def __add__(self, offset):
"""
Add an offset to the intervals
Parameters
----------
off : float
The offset added
Returns
-------
interv : intervals
a new instance, self + offset
Examples
--------
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> J = I+2
>>> print(J)
[(2, 3), (6, 8)]
"""
interv = intervals()
interv._U = [(a+offset, b+offset) for (a, b) in self._U]
return interv
if __name__ == "__main__":
import doctest
doctest.testmod()
| selective-inference/selective-inference | selectinf/constraints/intervals.py | Python | bsd-3-clause | 6,279 |
import copy
import json
import os
import asyncio
import pytest
import webdriver
from urllib.parse import urlunsplit
from tests.support import defaults
from tests.support.helpers import cleanup_session, deep_update
from tests.support.inline import build_inline
from tests.support.http_request import HTTPRequest
# The webdriver session can outlive a pytest session
_current_session = None
# The event loop needs to outlive the webdriver session
_event_loop = None
_custom_session = False
def pytest_configure(config):
# register the capabilities marker
config.addinivalue_line(
"markers",
"capabilities: mark test to use capabilities"
)
@pytest.fixture
def capabilities():
"""Default capabilities to use for a new WebDriver session."""
return {}
def pytest_generate_tests(metafunc):
if "capabilities" in metafunc.fixturenames:
marker = metafunc.definition.get_closest_marker(name="capabilities")
if marker:
metafunc.parametrize("capabilities", marker.args, ids=None)
@pytest.fixture(scope="session")
def event_loop():
"""Change event_loop fixture to global."""
global _event_loop
if _event_loop is None:
_event_loop = asyncio.get_event_loop_policy().new_event_loop()
return _event_loop
@pytest.fixture
def http(configuration):
return HTTPRequest(configuration["host"], configuration["port"])
@pytest.fixture
def server_config():
with open(os.environ.get("WD_SERVER_CONFIG_FILE"), "r") as f:
return json.load(f)
@pytest.fixture(scope="session")
def configuration():
host = os.environ.get("WD_HOST", defaults.DRIVER_HOST)
port = int(os.environ.get("WD_PORT", str(defaults.DRIVER_PORT)))
capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}"))
return {
"host": host,
"port": port,
"capabilities": capabilities
}
async def reset_current_session_if_necessary(caps):
global _current_session
# If there is a session with different requested capabilities active than
# the one we would like to create, end it now.
if _current_session is not None:
if not _current_session.match(caps):
is_bidi = isinstance(_current_session, webdriver.BidiSession)
if is_bidi:
await _current_session.end()
else:
_current_session.end()
_current_session = None
@pytest.fixture(scope="function")
async def session(capabilities, configuration):
"""Create and start a session for a test that does not itself test session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps)
_current_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session
cleanup_session(_current_session)
@pytest.fixture(scope="function")
async def bidi_session(capabilities, configuration):
"""Create and start a bidi session.
Can be used for a test that does not itself test bidi session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
caps.update({"webSocketUrl": True})
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps,
enable_bidi=True)
_current_session.start()
await _current_session.bidi_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session.bidi_session
await _current_session.bidi_session.end()
cleanup_session(_current_session)
@pytest.fixture(scope="function")
def current_session():
return _current_session
@pytest.fixture
def url(server_config):
def url(path, protocol="http", domain="", subdomain="", query="", fragment=""):
domain = server_config["domains"][domain][subdomain]
port = server_config["ports"][protocol][0]
host = "{0}:{1}".format(domain, port)
return urlunsplit((protocol, host, path, query, fragment))
return url
@pytest.fixture
def inline(url):
"""Take a source extract and produces well-formed documents.
Based on the desired document type, the extract is embedded with
predefined boilerplate in order to produce well-formed documents.
The media type and character set may also be individually configured.
This helper function originally used data URLs, but since these
are not universally supported (or indeed standardised!) across
browsers, it now delegates the serving of the document to wptserve.
This file also acts as a wptserve handler (see the main function
below) which configures the HTTP response using query parameters.
This function returns a URL to the wptserve handler, which in turn
will serve an HTTP response with the requested source extract
inlined in a well-formed document, and the Content-Type header
optionally configured using the desired media type and character set.
Any additional keyword arguments are passed on to the build_url
function, which comes from the url fixture.
"""
def inline(src, **kwargs):
return build_inline(url, src, **kwargs)
return inline
@pytest.fixture
def iframe(inline):
"""Inline document extract as the source document of an <iframe>."""
def iframe(src, **kwargs):
return "<iframe src='{}'></iframe>".format(inline(src, **kwargs))
return iframe
| nwjs/chromium.src | third_party/blink/web_tests/external/wpt/webdriver/tests/support/fixtures.py | Python | bsd-3-clause | 7,196 |
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.charts.lineplots import ScatterPlot
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
from excelcolors import *
class ScatterLinesMarkers(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
apply(Drawing.__init__,(self,width,height)+args,kw)
self._add(self,ScatterPlot(),name='chart',validate=None,desc="The main chart")
self.chart.width = 115
self.chart.height = 80
self.chart.x = 30
self.chart.y = 40
self.chart.lines[0].strokeColor = color01
self.chart.lines[1].strokeColor = color02
self.chart.lines[2].strokeColor = color03
self.chart.lines[3].strokeColor = color04
self.chart.lines[4].strokeColor = color05
self.chart.lines[5].strokeColor = color06
self.chart.lines[6].strokeColor = color07
self.chart.lines[7].strokeColor = color08
self.chart.lines[8].strokeColor = color09
self.chart.lines[9].strokeColor = color10
self.chart.fillColor = backgroundGrey
self.chart.lineLabels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontSize = 7
self.chart.xValueAxis.forceZero = 0
self.chart.data = [((100,100), (200,200), (250,210), (300,300), (400,500)), ((100,200), (200,300), (250,200), (300,400), (400, 600))]
self.chart.xValueAxis.avoidBoundFrac = 1
self.chart.xValueAxis.gridEnd = 115
self.chart.xValueAxis.tickDown = 3
self.chart.xValueAxis.visibleGrid = 1
self.chart.yValueAxis.tickLeft = 3
self.chart.yValueAxis.labels.fontName = 'Helvetica'
self.chart.yValueAxis.labels.fontSize = 7
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets'), (color02, 'Sprockets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextSpace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self.chart.lineLabelFormat = None
self.chart.xLabel = 'X Axis'
self.chart.y = 30
self.chart.yLabel = 'Y Axis'
self.chart.yValueAxis.gridEnd = 115
self.chart.yValueAxis.visibleGrid = 1
self.chart.yValueAxis.labelTextFormat = '%d'
self.chart.yValueAxis.forceZero = 1
self.chart.xValueAxis.forceZero = 1
self.chart.joinedLines = 1
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
ScatterLinesMarkers().save(formats=['pdf'],outDir=None,fnRoot='scatter_lines_markers')
| tschalch/pyTray | src/lib/reportlab/graphics/samples/scatter_lines_markers.py | Python | bsd-3-clause | 3,824 |
import os
import sys
import time
'''
@author: msune,omoya,CarolinaFernandez
@@organization: i2CAT, OFELIA FP7
Persistence engine
Implementes driver-based persistence backend selection
'''
class PersistenceEngine():
#Default Class Attributes
_defaultParser = "RegexParser"
_defaultPersistence = "Django"
#Drivers
_drivers = ["Django","RAWFile"]
#Fill with appropiate path
PATH_TO_DRIVERS="backends"
def __init__(self):
raise Exception("Static class cannot be instanciated")
@staticmethod
def _getDriver(driverName):
print "driver name: %s" %driverName
if driverName == "Django":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.django.Django'
try:
exec('from ' + PATH + ' import Django')
return Django
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
elif driverName == "RAWFile":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.rawfile.RAWFile'
try:
exec('from ' + PATH + ' import RAWFile')
return RAWFile
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
else:
raise Exception(driverName + ' not supported')
@staticmethod
def save(obj, pBackend, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).save(obj, parser, **kwargs)
@staticmethod
def load(tableName, pBackend, resolverMappings, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).load(tableName, resolverMappings, parser, **kwargs)
'''
Retrieves every Driver's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(tableName, pBackend):
return PersistenceEngine._getDriver(pBackend).loadAll(tableName)
'''
Deletes a Driver's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, pBackend):
return PersistenceEngine._getDriver(pBackend).delete(tableID)
| jpardobl/naman | naman/core/pypelib/persistence/PersistenceEngine.py | Python | bsd-3-clause | 2,078 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import legacy_page_test
from telemetry.timeline.model import TimelineModel
from telemetry.timeline import tracing_config
from telemetry.util import statistics
from telemetry.value import scalar
class TaskExecutionTime(legacy_page_test.LegacyPageTest):
IDLE_SECTION_TRIGGER = 'SingleThreadIdleTaskRunner::RunTask'
IDLE_SECTION = 'IDLE'
NORMAL_SECTION = 'NORMAL'
_TIME_OUT_IN_SECONDS = 60
_NUMBER_OF_RESULTS_TO_DISPLAY = 10
_BROWSER_THREADS = ['Chrome_ChildIOThread',
'Chrome_IOThread']
_RENDERER_THREADS = ['Chrome_ChildIOThread',
'Chrome_IOThread',
'CrRendererMain']
_CATEGORIES = ['benchmark',
'blink',
'blink.console',
'blink_gc',
'cc',
'gpu',
'ipc',
'renderer.scheduler',
'toplevel',
'v8',
'webkit.console']
def __init__(self):
super(TaskExecutionTime, self).__init__()
self._renderer_process = None
self._browser_process = None
self._results = None
def WillNavigateToPage(self, page, tab):
config = tracing_config.TracingConfig()
for category in self._CATEGORIES:
config.tracing_category_filter.AddIncludedCategory(category)
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(
config, self._TIME_OUT_IN_SECONDS)
def ValidateAndMeasurePage(self, page, tab, results):
trace_data = tab.browser.platform.tracing_controller.StopTracing()
timeline_model = TimelineModel(trace_data)
self._renderer_process = timeline_model.GetRendererProcessFromTabId(tab.id)
self._browser_process = timeline_model.browser_process
self._AddResults(results)
def _AddResults(self, results):
self._results = results
for thread in self._BROWSER_THREADS:
self._AddTasksFromThreadToResults(self._browser_process, thread)
for thread in self._RENDERER_THREADS:
self._AddTasksFromThreadToResults(self._renderer_process, thread)
def _AddTasksFromThreadToResults(self, process, thread_name):
if process is None:
return
sections = TaskExecutionTime._GetSectionsForThread(process, thread_name)
self._ReportSectionPercentages(sections.values(),
'%s:%s' % (process.name, thread_name))
# Create list with top |_NUMBER_OF_RESULTS_TO_DISPLAY| for each section.
for section in sections.itervalues():
if section.name == TaskExecutionTime.IDLE_SECTION:
# Skip sections we don't report.
continue
self._AddSlowestTasksToResults(section.tasks.values())
def _AddSlowestTasksToResults(self, tasks):
sorted_tasks = sorted(
tasks,
key=lambda slice: slice.median_self_duration,
reverse=True)
for task in sorted_tasks[:self.GetExpectedResultCount()]:
self._results.AddValue(scalar.ScalarValue(
self._results.current_page,
task.name,
'ms',
task.median_self_duration,
description='Slowest tasks'))
def _ReportSectionPercentages(self, section_values, metric_prefix):
all_sectionstotal_duration = sum(
section.total_duration for section in section_values)
if not all_sectionstotal_duration:
# Nothing was recorded, so early out.
return
for section in section_values:
section_name = section.name or TaskExecutionTime.NORMAL_SECTION
section_percentage_of_total = (
(section.total_duration * 100.0) / all_sectionstotal_duration)
self._results.AddValue(scalar.ScalarValue(
self._results.current_page,
'%s:Section_%s' % (metric_prefix, section_name),
'%',
section_percentage_of_total,
description='Idle task percentage'))
@staticmethod
def _GetSectionsForThread(process, target_thread):
sections = {}
for thread in process.threads.itervalues():
if thread.name != target_thread:
continue
for task_slice in thread.IterAllSlices():
_ProcessTasksForThread(
sections,
'%s:%s' % (process.name, thread.name),
task_slice)
return sections
@staticmethod
def GetExpectedResultCount():
return TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY
def _ProcessTasksForThread(
sections,
thread_name,
task_slice,
section_name=None):
if task_slice.self_thread_time is None:
# Early out if this slice is a TRACE_EVENT_INSTANT, as it has no duration.
return
# Note: By setting a different section below we split off this task into
# a different sorting bucket. Too add extra granularity (e.g. tasks executed
# during page loading) add logic to set a different section name here. The
# section name is set before the slice's data is recorded so the triggering
# event will be included in its own section (i.e. the idle trigger will be
# recorded as an idle event).
if task_slice.name == TaskExecutionTime.IDLE_SECTION_TRIGGER:
section_name = TaskExecutionTime.IDLE_SECTION
# Add the thread name and section (e.g. 'Idle') to the test name
# so it is human-readable.
reported_name = thread_name + ':'
if section_name:
reported_name += section_name + ':'
if 'src_func' in task_slice.args:
# Data contains the name of the timed function, use it as the name.
reported_name += task_slice.args['src_func']
elif 'line' in task_slice.args:
# Data contains IPC class and line numbers, use these as the name.
reported_name += 'IPC_Class_' + str(task_slice.args['class'])
reported_name += ':Line_' + str(task_slice.args['line'])
else:
# Fall back to use the name of the task slice.
reported_name += task_slice.name.lower()
# Replace any '.'s with '_'s as V8 uses them and it confuses the dashboard.
reported_name = reported_name.replace('.', '_')
# If this task is in a new section create a section object and add it to the
# section dictionary.
if section_name not in sections:
sections[section_name] = Section(section_name)
sections[section_name].AddTask(reported_name, task_slice.self_thread_time)
# Process sub slices recursively, passing the current section down.
for sub_slice in task_slice.sub_slices:
_ProcessTasksForThread(
sections,
thread_name,
sub_slice,
section_name)
class NameAndDurations(object):
def __init__(self, name, self_duration):
self.name = name
self.self_durations = [self_duration]
def Update(self, self_duration):
self.self_durations.append(self_duration)
@property
def median_self_duration(self):
return statistics.Median(self.self_durations)
class Section(object):
def __init__(self, name):
# A section holds a dictionary, keyed on task name, of all the tasks that
# exist within it and the total duration of those tasks.
self.name = name
self.tasks = {}
self.total_duration = 0
def AddTask(self, name, duration):
if name in self.tasks:
# section_tasks already contains an entry for this (e.g. from an earlier
# slice), add the new duration so we can calculate a median value later.
self.tasks[name].Update(duration)
else:
# This is a new task so create a new entry for it.
self.tasks[name] = NameAndDurations(name, duration)
# Accumulate total duration for all tasks in this section.
self.total_duration += duration
| axinging/chromium-crosswalk | tools/perf/measurements/task_execution_time.py | Python | bsd-3-clause | 7,687 |
from __future__ import print_function
import sys
def main():
for line in sys.stdin:
line = line.strip()
if not line.startswith("#STRUCT_SIZER"):
continue
line = line[14:]
line = line.replace("#", "")
print(line)
if __name__ == "__main__":
main()
| JLLeitschuh/allwpilib | wpilibj/src/athena/cpp/nivision/get_struct_size.py | Python | bsd-3-clause | 308 |
from __future__ import absolute_import
__all__ = ("DebugMeta",)
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
class DebugMeta(Interface):
"""
Holds debug meta information for processing stacktraces
and similar things. This information is deleted after event processing.
Currently two attributes exist:
``sdk_info``:
sets the SDK that is used for the system. This affects the lookup
for system symbols. If not defined, system symbols are not looked up.
``images``:
a list of debug images and their mappings.
"""
ephemeral = False
path = "debug_meta"
external_type = "debugmeta"
@classmethod
def to_python(cls, data):
return cls(
images=data.get("images", None) or [],
sdk_info=data.get("sdk_info"),
is_debug_build=data.get("is_debug_build"),
)
def to_json(self):
return prune_empty_keys(
{
"images": self.images or None,
"sdk_info": self.sdk_info or None,
"is_debug_build": self.is_debug_build,
}
)
| mvaled/sentry | src/sentry/interfaces/debug_meta.py | Python | bsd-3-clause | 1,172 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe for uploading DM results.
import calendar
PYTHON_VERSION_COMPATIBILITY = "PY3"
DEPS = [
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
'gsutil',
'vars',
]
DM_JSON = 'dm.json'
VERBOSE_LOG = 'verbose.log'
def RunSteps(api):
api.vars.setup()
revision = api.properties['revision']
results_dir = api.path['start_dir'].join('test')
# Upload the images. It is *vital* that the images are uploaded first
# so they exist whenever the json is processed.
image_dest_path = 'gs://%s/dm-images-v1' % api.properties['gs_bucket']
for ext in ['.png', '.pdf']:
files_to_upload = api.file.glob_paths(
'find %s images' % ext,
results_dir,
'*%s' % ext,
test_data=['someimage.png'])
# For some reason, glob returns results_dir when it should return nothing.
files_to_upload = [f for f in files_to_upload if str(f).endswith(ext)]
if len(files_to_upload) > 0:
api.gsutil.cp('%s images' % ext, results_dir.join('*%s' % ext),
image_dest_path, multithread=True)
# Compute the directory to upload results to
now = api.time.utcnow()
summary_dest_path = '/'.join([
'dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
revision,
api.vars.builder_name,
str(int(calendar.timegm(now.utctimetuple())))])
# Trybot results are further siloed by issue/patchset.
if api.vars.is_trybot:
summary_dest_path = '/'.join(('trybot', summary_dest_path,
str(api.vars.issue), str(api.vars.patchset)))
summary_dest_path = 'gs://%s/%s' % (api.properties['gs_bucket'],
summary_dest_path)
# Directly upload dm.json and verbose.log if it exists
json_file = results_dir.join(DM_JSON)
log_file = results_dir.join(VERBOSE_LOG)
api.gsutil.cp('dm.json', json_file,
summary_dest_path + '/' + DM_JSON, extra_args=['-Z'])
files = api.file.listdir('check for optional verbose.log file',
results_dir, test_data=['dm.json', 'verbose.log'])
if log_file in files:
api.gsutil.cp('verbose.log', log_file,
summary_dest_path + '/' + VERBOSE_LOG, extra_args=['-Z'])
def GenTests(api):
builder = 'Upload-Test-Debian10-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
yield (
api.test('normal_bot') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen')
)
yield (
api.test('alternate_bucket') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm-alt',
revision='abc123',
path_config='kitchen')
)
yield (
api.test('failed_once') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen') +
api.step_data('upload .png images', retcode=1)
)
yield (
api.test('failed_all') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen') +
api.step_data('upload .png images', retcode=1) +
api.step_data('upload .png images (attempt 2)', retcode=1) +
api.step_data('upload .png images (attempt 3)', retcode=1) +
api.step_data('upload .png images (attempt 4)', retcode=1) +
api.step_data('upload .png images (attempt 5)', retcode=1)
)
yield (
api.test('trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen')
)
| youtube/cobalt | third_party/skia_next/third_party/skia/infra/bots/recipes/upload_dm_results.py | Python | bsd-3-clause | 4,195 |
from django.conf.urls.defaults import patterns
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
( r'^$', 'statserver.stats.views.browse' ),
( r'^stats/addnode$', 'statserver.stats.views.addnode' ),
( r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| MiltosD/CEF-ELRC | misc/tools/statserver/urls.py | Python | bsd-3-clause | 427 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
from social.utils import setting_name
EMAIL_LENGTH = getattr(settings, setting_name('EMAIL_LENGTH'), 254)
class Migration(migrations.Migration):
dependencies = [
('default', '0002_add_related_name'),
]
operations = [
migrations.AlterField(
model_name='code',
name='email',
field=models.EmailField(max_length=EMAIL_LENGTH),
),
]
| webjunkie/python-social-auth | social/apps/django_app/default/migrations/0003_alter_email_max_length.py | Python | bsd-3-clause | 548 |
from __future__ import absolute_import
import bokeh.util.string as bus
class Test_escape(object):
def test_default_quote(self):
assert bus.escape("foo'bar") == "foo'bar"
assert bus.escape('foo"bar') == "foo"bar"
def test_quote_False(self):
assert bus.escape("foo'bar", quote=False) == "foo'bar"
assert bus.escape('foo"bar', quote=False) == 'foo"bar'
def test_quote_custom(self):
assert bus.escape("foo'bar", quote=('"'),) == "foo'bar"
assert bus.escape("foo'bar", quote=("'"),) == "foo'bar"
assert bus.escape('foo"bar', quote=("'"),) == 'foo"bar'
assert bus.escape('foo"bar', quote=('"'),) == "foo"bar"
def test_amp(self):
assert bus.escape("foo&bar") == "foo&bar"
def test_lt(self):
assert bus.escape("foo<bar") == "foo<bar"
def test_gt(self):
assert bus.escape("foo>bar") == "foo>bar"
class Test_format_doctring(object):
def test_no_argument(self):
doc__ = "hello world"
assert bus.format_docstring(doc__) == doc__
doc__ = None
assert bus.format_docstring(doc__) == None
def test_arguments_unused(self):
doc__ = "hello world"
assert bus.format_docstring(doc__, 'hello ', not_used='world') == doc__
doc__ = None
assert bus.format_docstring(doc__, 'hello ', not_used='world') == None
def test_arguments(self):
doc__ = "-- {}{as_parameter} --"
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') == "-- hello world --"
doc__ = None
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') == None
class Test_indent(object):
TEXT = "some text\nto indent\n goes here"
def test_default_args(self):
assert bus.indent(self.TEXT) == " some text\n to indent\n goes here"
def test_with_n(self):
assert bus.indent(self.TEXT, n=3) == " some text\n to indent\n goes here"
def test_with_ch(self):
assert bus.indent(self.TEXT, ch="-") == "--some text\n--to indent\n-- goes here"
class Test_nice_join(object):
def test_default(self):
assert bus.nice_join(["one"]) == "one"
assert bus.nice_join(["one", "two"]) == "one or two"
assert bus.nice_join(["one", "two", "three"]) == "one, two or three"
assert bus.nice_join(["one", "two", "three", "four"]) == "one, two, three or four"
def test_string_conjunction(self):
assert bus.nice_join(["one"], conjuction="and") == "one"
assert bus.nice_join(["one", "two"], conjuction="and") == "one and two"
assert bus.nice_join(["one", "two", "three"], conjuction="and") == "one, two and three"
assert bus.nice_join(["one", "two", "three", "four"], conjuction="and") == "one, two, three and four"
def test_None_conjunction(self):
assert bus.nice_join(["one"], conjuction=None) == "one"
assert bus.nice_join(["one", "two"], conjuction=None) == "one, two"
assert bus.nice_join(["one", "two", "three"], conjuction=None) == "one, two, three"
assert bus.nice_join(["one", "two", "three", "four"], conjuction=None) == "one, two, three, four"
def test_sep(self):
assert bus.nice_join(["one"], sep='; ') == "one"
assert bus.nice_join(["one", "two"], sep='; ') == "one or two"
assert bus.nice_join(["one", "two", "three"], sep='; ') == "one; two or three"
assert bus.nice_join(["one", "two", "three", "four"], sep="; ") == "one; two; three or four"
def test_snakify():
assert bus.snakify("MyClassName") == "my_class_name"
assert bus.snakify("My1Class23Name456") == "my1_class23_name456"
assert bus.snakify("MySUPERClassName") == "my_super_class_name"
| mindriot101/bokeh | bokeh/util/tests/test_string.py | Python | bsd-3-clause | 3,770 |
#!/usr/bin/python
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_dxgi_format_table.py:
# Code generation for DXGI format map.
from datetime import date
import sys
sys.path.append('../..')
import angle_format
template_cpp = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// DXGI format info:
// Determining metadata about a DXGI format.
#include "libANGLE/renderer/Format.h"
using namespace angle;
namespace rx
{{
namespace d3d11
{{
GLenum GetComponentType(DXGI_FORMAT dxgiFormat)
{{
switch (dxgiFormat)
{{
{component_type_cases} default:
break;
}}
UNREACHABLE();
return GL_NONE;
}}
}} // namespace d3d11
namespace d3d11_angle
{{
const Format &GetFormat(DXGI_FORMAT dxgiFormat)
{{
switch (dxgiFormat)
{{
{format_cases} default:
break;
}}
UNREACHABLE();
return Format::Get(Format::ID::NONE);
}}
}} // namespace d3d11_angle
}} // namespace rx
"""
template_format_case = """ case DXGI_FORMAT_{dxgi_format}:
return {result};
"""
template_undefined_case = """ case DXGI_FORMAT_{dxgi_format}:
break;
"""
def format_case(dxgi_format, result):
return template_format_case.format(
dxgi_format = dxgi_format,
result = result)
def undefined_case(dxgi_format):
return template_undefined_case.format(dxgi_format = dxgi_format)
component_cases = ""
format_cases = ""
input_data = 'dxgi_format_data.json'
dxgi_map = angle_format.load_json(input_data)
types = {
'SNORM': 'GL_SIGNED_NORMALIZED',
'UNORM': 'GL_UNSIGNED_NORMALIZED',
'SINT': 'GL_INT',
'UINT': 'GL_UNSIGNED_INT',
'FLOAT': 'GL_FLOAT',
'SHAREDEXP': 'GL_FLOAT'
}
angle_to_gl = angle_format.load_inverse_table('../../angle_format_map.json')
all_angle = angle_to_gl.keys()
for dxgi_format, angle_format in sorted(dxgi_map.iteritems()):
found = [ctype in dxgi_format for ctype in types.keys()]
count = reduce((lambda a, b: int(a) + int(b)), found)
component_type = 'GL_NONE'
if count == 1:
gltype = next(gltype for ctype, gltype in types.iteritems() if ctype in dxgi_format)
component_cases += format_case(dxgi_format, gltype)
else:
component_cases += undefined_case(dxgi_format)
if angle_format == "":
angle_format = dxgi_format
if angle_format in all_angle:
angle_format = "Format::Get(Format::ID::" + angle_format + ")"
format_cases += format_case(dxgi_format, angle_format)
else:
format_cases += undefined_case(dxgi_format)
with open('dxgi_format_map_autogen.cpp', 'wt') as out_file:
output_cpp = template_cpp.format(
script_name = sys.argv[0],
data_source_name = input_data,
copyright_year = date.today().year,
component_type_cases = component_cases,
format_cases = format_cases)
out_file.write(output_cpp)
out_file.close()
| ecoal95/angle | src/libANGLE/renderer/d3d/d3d11/gen_dxgi_format_table.py | Python | bsd-3-clause | 3,280 |
#!/usr/bin/env python
import json
import codecs
from csvkit import CSVKitReader
from csvkit.cli import CSVKitUtility, match_column_identifier
from csvkit.exceptions import NonUniqueKeyColumnException
class CSVJSON(CSVKitUtility):
description = 'Convert a CSV file into JSON (or GeoJSON).'
override_flags = ['H']
def add_arguments(self):
self.argparser.add_argument('-i', '--indent', dest='indent', type=int, default=None,
help='Indent the output JSON this many spaces. Disabled by default.')
self.argparser.add_argument('-k', '--key', dest='key', type=str, default=None,
help='Output JSON as an array of objects keyed by a given column, KEY, rather than as a list. All values in the column must be unique. If --lat and --lon are also specified, this column will be used as GeoJSON Feature ID.')
self.argparser.add_argument('--lat', dest='lat', type=str, default=None,
help='A column index or name containing a latitude. Output will be GeoJSON instead of JSON. Only valid if --lon is also specified.')
self.argparser.add_argument('--lon', dest='lon', type=str, default=None,
help='A column index or name containing a longitude. Output will be GeoJSON instead of JSON. Only valid if --lat is also specified.')
self.argparser.add_argument('--crs', dest='crs', type=str, default=None,
help='A coordinate reference system string to be included with GeoJSON output. Only valid if --lat and --lon are also specified.')
def main(self):
"""
Convert CSV to JSON.
"""
if self.args.lat and not self.args.lon:
self.argparser.error('--lon is required whenever --lat is specified.')
if self.args.lon and not self.args.lat:
self.argparser.error('--lat is required whenever --lon is specified.')
if self.args.crs and not self.args.lat:
self.argparser.error('--crs is only allowed when --lat and --lon are also specified.')
rows = CSVKitReader(self.args.file, **self.reader_kwargs)
column_names = rows.next()
stream = codecs.getwriter('utf-8')(self.output_file)
# GeoJSON
if self.args.lat and self.args.lon:
features = []
min_lon = None
min_lat = None
max_lon = None
max_lat = None
lat_column = match_column_identifier(column_names, self.args.lat, self.args.zero_based)
lon_column = match_column_identifier(column_names, self.args.lon, self.args.zero_based)
if self.args.key:
id_column = match_column_identifier(column_names, self.args.key, self.args.zero_based)
else:
id_column = None
for row in rows:
feature = { 'type': 'Feature' }
properties = {}
geoid = None
lat = None
lon = None
for i, c in enumerate(row):
if i == lat_column:
lat = float(c)
if min_lat is None or lat < min_lat:
min_lat = lat
if max_lat is None or lat > max_lat:
max_lat = lat
elif i == lon_column:
lon = float(c)
if min_lon is None or lon < min_lon:
min_lon = lon
if max_lon is None or lon > max_lon:
max_lon = lon
elif id_column is not None and i == id_column:
geoid = c
else:
properties[column_names[i]] = c
if id_column is not None:
feature['id'] = geoid
feature['geometry'] = {
'type': 'Point',
'coordinates': [lon, lat]
}
feature['properties'] = properties
features.append(feature)
output = {
'type': 'FeatureCollection',
'bbox': [min_lon, min_lat, max_lon, max_lat],
'features': features
}
if self.args.crs:
output['crs'] = {
'type': 'name',
'properties': {
'name': self.args.crs
}
}
# Keyed JSON
elif self.args.key:
output = {}
for row in rows:
row_dict = dict(zip(column_names, row))
k = row_dict[self.args.key]
if k in output:
raise NonUniqueKeyColumnException('Value %s is not unique in the key column.' % unicode(k))
output[k] = row_dict
# Boring JSON
else:
output = [dict(zip(column_names, row)) for row in rows]
json.dump(output, stream, ensure_ascii=False, indent=self.args.indent, encoding='utf-8')
def launch_new_instance():
utility = CSVJSON()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| danpalmer/open-data-quality-dashboard | tools/csvkit/csvkit/utilities/csvjson.py | Python | mit | 5,216 |
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose.Barcode Cloud SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "datamatrix"
#Set Generated Barcode Image Format
format = "PNG"
try:
#invoke Aspose.BarCode Cloud SDK API to create barcode and get image stream
response = barcodeApi.GetBarcodeGenerate(text=text, type=type, format=format)
if response.Status == "OK":
#download generated barcode from api response
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 | asposebarcode/Aspose_BarCode_Cloud | Examples/Python/generating-saving/without-cloud-storage/generate-barcode-and-get-image-as-stream.py | Python | mit | 1,515 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import re
from lxml import etree
from oslo_concurrency import processutils
from oslo_log import log as logging
import nova.conf
from nova.i18n import _
from nova.i18n import _LI
from nova.i18n import _LW
from nova.objects import fields as obj_fields
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt.volume import remotefs
from nova.virt import volumeutils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
RESIZE_SNAPSHOT_NAME = 'nova-resize'
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
return volumeutils.get_iscsi_initiator()
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# Explicitly inherit the value of 'cluster_size' property of a qcow2
# overlay image from its backing file. This can be useful in cases
# when people create a base image with a non-default 'cluster_size'
# value or cases when images were created with very old QEMU
# versions which had a different default 'cluster_size'.
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def create_ploop_image(disk_format, path, size, fs_type):
"""Create ploop image
:param disk_format: Disk image format (as known by ploop)
:param path: Desired location of the ploop image
:param size: Desired size of ploop image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
:param fs_type: Filesystem type
"""
if not fs_type:
fs_type = CONF.default_ephemeral_format or \
disk.FS_FORMAT_EXT4
execute('mkdir', '-p', path)
disk_path = os.path.join(path, 'root.hds')
execute('ploop', 'init', '-s', size, '-f', disk_format, '-t', fs_type,
disk_path, run_as_root=True, check_exit_code=True)
# Add read access for all users, because "ploop init" creates
# disk with rw rights only for root. OpenStack user should have access
# to the disk to request info via "qemu-img info"
execute('chmod', '-R', 'a+r', path,
run_as_root=True, check_exit_code=True)
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers we have to tell libvirt
which one should be used.
Xen supports the following drivers: "tap", "tap2", "phy", "file", or
"qemu", being "qemu" the preferred one. Qemu only supports "qemu".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt.virt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4002000 == 4.2.0
if hypervisor_version >= 4002000:
try:
execute('xend', 'status',
run_as_root=True, check_exit_code=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("xend is not found")
# libvirt will try to use libxl toolstack
return 'qemu'
else:
raise
except processutils.ProcessExecutionError:
LOG.debug("xend is not started")
# libvirt will try to use libxl toolstack
return 'qemu'
# libvirt will use xend/xm toolstack
try:
out, err = execute('tap-ctl', 'check', check_exit_code=False)
if out == 'ok\n':
# 4000000 == 4.0.0
if hypervisor_version > 4000000:
return "tap2"
else:
return "tap"
else:
LOG.info(_LI("tap-ctl check: %s"), out)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed")
else:
raise
return "file"
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path, format=None):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:param format: the on-disk format of path
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path, format).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True, format=None):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path, format).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None, receive=False,
on_execute=None, on_completion=None,
compression=True):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
:param receive: Reverse the rsync direction
:param on_execute: Callback method to store pid of process in cache
:param on_completion: Callback method to remove pid of process from cache
:param compression: Allows to use rsync operation with or without
compression
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
# we add '-r' argument because ploop disks are directories
execute('cp', '-r', src, dest)
else:
if receive:
src = "%s:%s" % (utils.safe_ip_format(host), src)
else:
dest = "%s:%s" % (utils.safe_ip_format(host), dest)
remote_filesystem_driver = remotefs.RemoteFilesystem()
remote_filesystem_driver.copy_file(src, dest,
on_execute=on_execute, on_completion=on_completion,
compression=compression)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def update_mtime(path):
"""Touch a file without being the owner.
:param path: File bump the mtime on
"""
try:
execute('touch', '-c', path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
# touch can intermittently fail when launching several instances with
# the same base image and using shared storage, so log the exception
# but don't fail. Ideally we'd know if we were on shared storage and
# would re-raise the error if we are not on shared storage.
LOG.warning(_LW("Failed to update mtime on path %(path)s. "
"Error: %(error)s"),
{'path': path, "error": exc})
def _id_map_to_config(id_map):
return "%s:%s:%s" % (id_map.start, id_map.target, id_map.count)
def chown_for_id_maps(path, id_maps):
"""Change ownership of file or directory for an id mapped
environment
:param path: File or directory whose ownership to change
:param id_maps: List of type LibvirtConfigGuestIDMap
"""
uid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestUIDMap)])
gid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestGIDMap)])
execute('nova-idmapshift', '-i', '-u', uid_maps_str,
'-g', gid_maps_str, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
if dest_fmt == 'ploop':
dest_fmt = 'parallels'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def path_exists(path):
"""Returns if path exists
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.path.exists(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
os_type = domain.find('os/type').text
driver = None
if CONF.libvirt.virt_type == 'lxc':
filesystem = domain.find('devices/filesystem')
driver = filesystem.find('driver')
source = filesystem.find('source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
elif (CONF.libvirt.virt_type == 'parallels' and
os_type == obj_fields.VMMode.EXE):
filesystem = domain.find('devices/filesystem')
driver = filesystem.find('driver')
source = filesystem.find('source')
disk_path = source.get('file')
else:
disk = domain.find('devices/disk')
driver = disk.find('driver')
source = disk.find('source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt.images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
if driver is not None:
format = driver.get('type')
# This is a legacy quirk of libvirt/xen. Everything else should
# report the on-disk format in type.
if format == 'aio':
format = 'raw'
else:
format = None
return (disk_path, format)
def get_disk_type_from_path(path):
"""Retrieve disk type (raw, qcow2, lvm, ploop) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
elif (os.path.isdir(path) and
os.path.exists(os.path.join(path, "DiskDescriptor.xml"))):
return 'ploop'
# We can't reliably determine the type from this path
return None
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id):
"""Grab image."""
images.fetch_to_raw(context, image_id, target)
def fetch_raw_image(context, target, image_id):
"""Grab initrd or kernel image.
This function does not attempt raw conversion, as these images will
already be in raw format.
"""
images.fetch(context, image_id, target)
def get_instance_path(instance, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage.
:param instance: the instance we want a path for
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
if relative:
return instance.uuid
return os.path.join(CONF.instances_path, instance.uuid)
def get_instance_path_at_destination(instance, migrate_data=None):
"""Get the instance path on destination node while live migration.
This method determines the directory name for instance storage on
destination node, while live migration.
:param instance: the instance we want a path for
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
:returns: a path to store information about that instance
"""
instance_relative_path = None
if migrate_data:
instance_relative_path = migrate_data.instance_relative_path
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = get_instance_path(instance)
return instance_dir
def get_arch(image_meta):
"""Determine the architecture of the guest (or host).
This method determines the CPU architecture that must be supported by
the hypervisor. It gets the (guest) arch info from image_meta properties,
and it will fallback to the nova-compute (host) arch if no architecture
info is provided in image_meta.
:param image_meta: the metadata associated with the instance image
:returns: guest (or host) architecture
"""
if image_meta:
image_arch = image_meta.properties.get('hw_architecture')
if image_arch is not None:
return image_arch
return obj_fields.Architecture.from_host()
def is_mounted(mount_path, source=None):
"""Check if the given source is mounted at given destination point."""
if not os.path.ismount(mount_path):
return False
if source is None:
return True
with open('/proc/mounts', 'r') as proc_mounts:
mounts = [mount.split() for mount in proc_mounts.readlines()]
return any(mnt[0] == source and mnt[1] == mount_path for mnt in mounts)
def is_valid_hostname(hostname):
return re.match(r"^[\w\-\.:]+$", hostname)
| OpenSciViz/cloudstack | openstack/src/python/nova-libvirt/backup/utils.py | Python | mit | 18,922 |
import logging
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from django.http import HttpResponsePermanentRedirect
from django.conf import settings
from django.core.urlresolvers import reverse
from readthedocs.builds.models import Build, Version
from readthedocs.builds.filters import BuildFilter
from readthedocs.projects.models import Project
from redis import Redis, ConnectionError
log = logging.getLogger(__name__)
class BuildBase(object):
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get('project_slug', None)
self.project = get_object_or_404(
Project.objects.protected(self.request.user),
slug=self.project_slug
)
queryset = Build.objects.public(user=self.request.user, project=self.project)
return queryset
class BuildList(BuildBase, ListView):
def get_context_data(self, **kwargs):
context = super(BuildList, self).get_context_data(**kwargs)
filter = BuildFilter(self.request.GET, queryset=self.get_queryset())
active_builds = self.get_queryset().exclude(state="finished").values('id')
context['project'] = self.project
context['filter'] = filter
context['active_builds'] = active_builds
context['versions'] = Version.objects.public(user=self.request.user, project=self.project)
try:
redis = Redis.from_url(settings.BROKER_URL)
context['queue_length'] = redis.llen('celery')
except ConnectionError:
context['queue_length'] = None
return context
class BuildDetail(BuildBase, DetailView):
pk_url_kwarg = 'build_pk'
def get_context_data(self, **kwargs):
context = super(BuildDetail, self).get_context_data(**kwargs)
context['project'] = self.project
return context
# Old build view redirects
def builds_redirect_list(request, project_slug):
return HttpResponsePermanentRedirect(reverse('builds_project_list', args=[project_slug]))
def builds_redirect_detail(request, project_slug, pk):
return HttpResponsePermanentRedirect(reverse('builds_detail', args=[project_slug, pk]))
| espdev/readthedocs.org | readthedocs/builds/views.py | Python | mit | 2,216 |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-segwitheight={COINBASE_MATURITY + 5}',
'-addresstype=legacy',
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.nodes[0].generate(2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness=False, accept=False):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| JeremyRubin/bitcoin | test/functional/feature_nulldummy.py | Python | mit | 7,178 |
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| gltn/stdm | stdm/third_party/sqlalchemy/dialects/sqlite/json.py | Python | gpl-2.0 | 2,292 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.service.AddressBook.scenario.base import BaseScenario
from papyon.service.AddressBook.scenario.base import Scenario
from papyon.service.description.AB.constants import ContactEmailType
from papyon.profile import ContactType, NetworkID
__all__ = ['MessengerContactAddScenario']
class MessengerContactAddScenario(BaseScenario):
def __init__(self, ab, callback, errback,
account='',
network_id=NetworkID.MSN,
contact_type=ContactType.REGULAR,
contact_info={},
invite_display_name='',
invite_message=''):
"""Adds a messenger contact and updates the address book.
@param ab: the address book service
@param callback: tuple(callable, *args)
@param errback: tuple(callable, *args)"""
BaseScenario.__init__(self, Scenario.CONTACT_SAVE, callback, errback)
self._ab = ab
self.account = account
self.network_id = network_id
self.contact_type = contact_type
self.contact_info = contact_info
self.invite_display_name = invite_display_name
self.invite_message = invite_message
self.auto_manage_allow_list = True
def execute(self):
invite_info = { 'display_name' : self.invite_display_name,
'invite_message' : self.invite_message }
if self.network_id == NetworkID.MSN:
self.contact_info['passport_name'] = self.account
self.contact_info['contact_type'] = self.contact_type
self.contact_info['is_messenger_user'] = True
elif self.network_id == NetworkID.EXTERNAL:
self.contact_info.setdefault('email', {})[ContactEmailType.EXTERNAL] = self.account
self.contact_info['capability'] = self.network_id
else:
raise NotImplementedError("Network ID '%s' is not implemented" %
self.network_id)
self._ab.ContactAdd(self._callback,
self._errback,
self._scenario,
self.contact_info,
invite_info,
self.auto_manage_allow_list)
| freedesktop-unofficial-mirror/papyon | papyon/service/AddressBook/scenario/contacts/messenger_contact_add.py | Python | gpl-2.0 | 3,036 |
from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| jenshnielsen/basemap | examples/maskoceans.py | Python | gpl-2.0 | 1,922 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
generate_test_mask_image.py
---------------------
Date : February 2015
Copyright : (C) 2015 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2015'
__copyright__ = '(C) 2015, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
# Generates (or updates) a unit test image mask, which is used to specify whether
# a pixel in the control image should be checked (black pixel in mask) or not (white
# pixel in mask). For non black or white pixels, the pixels lightness is used to
# specify a maximum delta for each color component
import os
import sys
import argparse
from PyQt5.QtGui import QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb
import struct
import urllib.request, urllib.error, urllib.parse
import glob
def error(msg):
print(msg)
sys.exit(1)
def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
if (path[:7] == 'http://' or path[:7] == 'file://' or path[:8] == 'https://'):
#fetch remote image
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
image = QImage(path)
return image
def getControlImagePath(path):
if os.path.isfile(path):
return path
#else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0] for x in os.walk(control_images_folder) if path in x[0]]
if len(matching_control_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(matching_control_images) == 0:
error('No matching control images found for {}'.format(path))
found_control_image_path = matching_control_images[0]
#check for a single matching expected image
images = glob.glob(os.path.join(found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(path))
found_image = filtered_images[0]
print('Found matching control image: {}'.format(found_image))
return found_image
def updateMask(control_image_path, rendered_image_path, mask_image_path):
control_image = imageFromPath(control_image_path)
if not control_image:
error('Could not read control image {}'.format(control_image_path))
rendered_image = imageFromPath(rendered_image_path)
if not rendered_image:
error('Could not read rendered image {}'.format(rendered_image_path))
if not rendered_image.width() == control_image.width() or not rendered_image.height() == control_image.height():
print(('Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(control_image.width(),
control_image.height(),
rendered_image.width(),
rendered_image.height())))
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
#read current mask, if it exist
mask_image = imageFromPath(mask_image_path)
if mask_image.isNull():
print('Mask image does not exist, creating {}'.format(mask_image_path))
mask_image = QImage(control_image.width(), control_image.height(), QImage.Format_ARGB32)
mask_image.fill(QColor(0, 0, 0))
#loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
#ignore pixel
continue
expected_rgb = struct.unpack('I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack('I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
#update mask image
mask_image.setPixel(x, y, qRgb(difference, difference, difference))
mismatch_count += 1
if mismatch_count:
#update mask
mask_image.save(mask_image_path, "png")
print('Updated {} pixels in {}'.format(mismatch_count, mask_image_path))
else:
print('No mismatches in {}'.format(mask_image_path))
parser = argparse.ArgumentParser() # OptionParser("usage: %prog control_image rendered_image mask_image")
parser.add_argument('control_image')
parser.add_argument('rendered_image')
parser.add_argument('mask_image', nargs='?', default=None)
args = parser.parse_args()
args.control_image = getControlImagePath(args.control_image)
if not args.mask_image:
args.mask_image = args.control_image[:-4] + '_mask.png'
updateMask(args.control_image, args.rendered_image, args.mask_image)
| wbyne/QGIS | scripts/generate_test_mask_image.py | Python | gpl-2.0 | 6,625 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
class MESH_MT_vertex_group_specials(Menu):
bl_label = "Vertex Group Specials"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
layout.operator("object.vertex_group_sort", icon='SORTALPHA', text="Sort by Name").sort_type = 'NAME'
layout.operator("object.vertex_group_sort", icon='ARMATURE_DATA', text="Sort by Bone Hierarchy").sort_type = 'BONE_HIERARCHY'
layout.operator("object.vertex_group_copy", icon='COPY_ID')
layout.operator("object.vertex_group_copy_to_linked", icon='LINK_AREA')
layout.operator("object.vertex_group_copy_to_selected", icon='LINK_AREA')
layout.operator("object.vertex_group_mirror", icon='ARROW_LEFTRIGHT').use_topology = False
layout.operator("object.vertex_group_mirror", text="Mirror Vertex Group (Topology)", icon='ARROW_LEFTRIGHT').use_topology = True
layout.operator("object.vertex_group_remove_from", icon='X', text="Remove from All Groups").use_all_groups = True
layout.operator("object.vertex_group_remove_from", icon='X', text="Clear Active Group").use_all_verts = True
layout.operator("object.vertex_group_remove", icon='X', text="Delete All Groups").all = True
layout.separator()
layout.operator("object.vertex_group_lock", icon='LOCKED', text="Lock All").action = 'LOCK'
layout.operator("object.vertex_group_lock", icon='UNLOCKED', text="UnLock All").action = 'UNLOCK'
layout.operator("object.vertex_group_lock", icon='LOCKED', text="Lock Invert All").action = 'INVERT'
class MESH_MT_shape_key_specials(Menu):
bl_label = "Shape Key Specials"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
layout.operator("object.shape_key_transfer", icon='COPY_ID') # icon is not ideal
layout.operator("object.join_shapes", icon='COPY_ID') # icon is not ideal
layout.operator("object.shape_key_mirror", icon='ARROW_LEFTRIGHT').use_topology = False
layout.operator("object.shape_key_mirror", text="Mirror Shape Key (Topology)", icon='ARROW_LEFTRIGHT').use_topology = True
layout.operator("object.shape_key_add", icon='ZOOMIN', text="New Shape From Mix").from_mix = True
layout.operator("object.shape_key_remove", icon='X', text="Delete All Shapes").all = True
layout.operator("object.shape_key_move", icon='TRIA_UP_BAR', text="Move To Top").type = 'TOP'
layout.operator("object.shape_key_move", icon='TRIA_DOWN_BAR', text="Move To Bottom").type = 'BOTTOM'
class MESH_UL_vgroups(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, bpy.types.VertexGroup))
vgroup = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(vgroup, "name", text="", emboss=False, icon_value=icon)
icon = 'LOCKED' if vgroup.lock_weight else 'UNLOCKED'
layout.prop(vgroup, "lock_weight", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MESH_UL_shape_keys(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, bpy.types.ShapeKey))
obj = active_data
# key = data
key_block = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
split = layout.split(0.66, False)
split.prop(key_block, "name", text="", emboss=False, icon_value=icon)
row = split.row(align=True)
if key_block.mute or (obj.mode == 'EDIT' and not (obj.use_shape_key_edit_mode and obj.type == 'MESH')):
row.active = False
if not item.id_data.use_relative:
row.prop(key_block, "frame", text="", emboss=False)
elif index > 0:
row.prop(key_block, "value", text="", emboss=False)
else:
row.label(text="")
row.prop(key_block, "mute", text="", emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MESH_UL_uvmaps_vcols(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, (bpy.types.MeshTexturePolyLayer, bpy.types.MeshLoopColorLayer)))
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(item, "name", text="", emboss=False, icon_value=icon)
icon = 'RESTRICT_RENDER_OFF' if item.active_render else 'RESTRICT_RENDER_ON'
layout.prop(item, "active_render", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MeshButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
return context.mesh and (engine in cls.COMPAT_ENGINES)
class DATA_PT_context_mesh(MeshButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
ob = context.object
mesh = context.mesh
space = context.space_data
if ob:
layout.template_ID(ob, "data")
elif mesh:
layout.template_ID(space, "pin_id")
class DATA_PT_normals(MeshButtonsPanel, Panel):
bl_label = "Normals"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
mesh = context.mesh
split = layout.split()
col = split.column()
col.prop(mesh, "use_auto_smooth")
sub = col.column()
sub.active = mesh.use_auto_smooth and not mesh.has_custom_normals
sub.prop(mesh, "auto_smooth_angle", text="Angle")
split.prop(mesh, "show_double_sided")
class DATA_PT_texture_space(MeshButtonsPanel, Panel):
bl_label = "Texture Space"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
mesh = context.mesh
layout.prop(mesh, "texture_mesh")
layout.separator()
layout.prop(mesh, "use_auto_texspace")
row = layout.row()
row.column().prop(mesh, "texspace_location", text="Location")
row.column().prop(mesh, "texspace_size", text="Size")
class DATA_PT_vertex_groups(MeshButtonsPanel, Panel):
bl_label = "Vertex Groups"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
obj = context.object
return (obj and obj.type in {'MESH', 'LATTICE'} and (engine in cls.COMPAT_ENGINES))
def draw(self, context):
layout = self.layout
ob = context.object
group = ob.vertex_groups.active
rows = 2
if group:
rows = 4
row = layout.row()
row.template_list("MESH_UL_vgroups", "", ob, "vertex_groups", ob.vertex_groups, "active_index", rows=rows)
col = row.column(align=True)
col.operator("object.vertex_group_add", icon='ZOOMIN', text="")
col.operator("object.vertex_group_remove", icon='ZOOMOUT', text="").all = False
col.menu("MESH_MT_vertex_group_specials", icon='DOWNARROW_HLT', text="")
if group:
col.separator()
col.operator("object.vertex_group_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.vertex_group_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.vertex_groups and (ob.mode == 'EDIT' or (ob.mode == 'WEIGHT_PAINT' and ob.type == 'MESH' and ob.data.use_paint_mask_vertex)):
row = layout.row()
sub = row.row(align=True)
sub.operator("object.vertex_group_assign", text="Assign")
sub.operator("object.vertex_group_remove_from", text="Remove")
sub = row.row(align=True)
sub.operator("object.vertex_group_select", text="Select")
sub.operator("object.vertex_group_deselect", text="Deselect")
layout.prop(context.tool_settings, "vertex_group_weight", text="Weight")
class DATA_PT_shape_keys(MeshButtonsPanel, Panel):
bl_label = "Shape Keys"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
obj = context.object
return (obj and obj.type in {'MESH', 'LATTICE', 'CURVE', 'SURFACE'} and (engine in cls.COMPAT_ENGINES))
def draw(self, context):
layout = self.layout
ob = context.object
key = ob.data.shape_keys
kb = ob.active_shape_key
enable_edit = ob.mode != 'EDIT'
enable_edit_value = False
if ob.show_only_shape_key is False:
if enable_edit or (ob.type == 'MESH' and ob.use_shape_key_edit_mode):
enable_edit_value = True
row = layout.row()
rows = 2
if kb:
rows = 4
row.template_list("MESH_UL_shape_keys", "", key, "key_blocks", ob, "active_shape_key_index", rows=rows)
col = row.column()
sub = col.column(align=True)
sub.operator("object.shape_key_add", icon='ZOOMIN', text="").from_mix = False
sub.operator("object.shape_key_remove", icon='ZOOMOUT', text="").all = False
sub.menu("MESH_MT_shape_key_specials", icon='DOWNARROW_HLT', text="")
if kb:
col.separator()
sub = col.column(align=True)
sub.operator("object.shape_key_move", icon='TRIA_UP', text="").type = 'UP'
sub.operator("object.shape_key_move", icon='TRIA_DOWN', text="").type = 'DOWN'
split = layout.split(percentage=0.4)
row = split.row()
row.enabled = enable_edit
row.prop(key, "use_relative")
row = split.row()
row.alignment = 'RIGHT'
sub = row.row(align=True)
sub.label() # XXX, for alignment only
subsub = sub.row(align=True)
subsub.active = enable_edit_value
subsub.prop(ob, "show_only_shape_key", text="")
sub.prop(ob, "use_shape_key_edit_mode", text="")
sub = row.row()
if key.use_relative:
sub.operator("object.shape_key_clear", icon='X', text="")
else:
sub.operator("object.shape_key_retime", icon='RECOVER_LAST', text="")
if key.use_relative:
if ob.active_shape_key_index != 0:
row = layout.row()
row.active = enable_edit_value
row.prop(kb, "value")
split = layout.split()
col = split.column(align=True)
col.active = enable_edit_value
col.label(text="Range:")
col.prop(kb, "slider_min", text="Min")
col.prop(kb, "slider_max", text="Max")
col = split.column(align=True)
col.active = enable_edit_value
col.label(text="Blend:")
col.prop_search(kb, "vertex_group", ob, "vertex_groups", text="")
col.prop_search(kb, "relative_key", key, "key_blocks", text="")
else:
layout.prop(kb, "interpolation")
row = layout.column()
row.active = enable_edit_value
row.prop(key, "eval_time")
class DATA_PT_uv_texture(MeshButtonsPanel, Panel):
bl_label = "UV Maps"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
me = context.mesh
row = layout.row()
col = row.column()
col.template_list("MESH_UL_uvmaps_vcols", "uvmaps", me, "uv_textures", me.uv_textures, "active_index", rows=1)
col = row.column(align=True)
col.operator("mesh.uv_texture_add", icon='ZOOMIN', text="")
col.operator("mesh.uv_texture_remove", icon='ZOOMOUT', text="")
class DATA_PT_vertex_colors(MeshButtonsPanel, Panel):
bl_label = "Vertex Colors"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
me = context.mesh
row = layout.row()
col = row.column()
col.template_list("MESH_UL_uvmaps_vcols", "vcols", me, "vertex_colors", me.vertex_colors, "active_index", rows=1)
col = row.column(align=True)
col.operator("mesh.vertex_color_add", icon='ZOOMIN', text="")
col.operator("mesh.vertex_color_remove", icon='ZOOMOUT', text="")
class DATA_PT_customdata(MeshButtonsPanel, Panel):
bl_label = "Geometry Data"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
obj = context.object
me = context.mesh
col = layout.column()
col.operator("mesh.customdata_mask_clear", icon='X')
col.operator("mesh.customdata_skin_clear", icon='X')
if me.has_custom_normals:
col.operator("mesh.customdata_custom_splitnormals_clear", icon='X')
else:
col.operator("mesh.customdata_custom_splitnormals_add", icon='ZOOMIN')
col = layout.column()
col.enabled = (obj.mode != 'EDIT')
col.prop(me, "use_customdata_vertex_bevel")
col.prop(me, "use_customdata_edge_bevel")
col.prop(me, "use_customdata_edge_crease")
class DATA_PT_custom_props_mesh(MeshButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.Mesh
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
| Microvellum/Fluid-Designer | win64-vc/2.78/scripts/startup/bl_ui/properties_data_mesh.py | Python | gpl-3.0 | 15,183 |
#!/usr/bin/python
####
# 06/2010 Nic Wolfe <nic@wolfeden.ca>
# 02/2006 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib
import urllib2
import mimetools, mimetypes
import os, sys
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request | jimyx17/jimh | lib/MultipartPostHandler.py | Python | gpl-3.0 | 3,642 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.readline."""
import re
import inspect
from PyQt5.QtWidgets import QLineEdit, QApplication
import pytest
from qutebrowser.misc import readline
# Some functions aren't 100% readline compatible:
# https://github.com/qutebrowser/qutebrowser/issues/678
# Those are marked with fixme and have another value marked with '# wrong'
# which marks the current behavior.
fixme = pytest.mark.xfail(reason='readline compatibility - see #678')
class LineEdit(QLineEdit):
"""QLineEdit with some methods to make testing easier."""
def _get_index(self, haystack, needle):
"""Get the index of a char (needle) in a string (haystack).
Return:
The position where needle was found, or None if it wasn't found.
"""
try:
return haystack.index(needle)
except ValueError:
return None
def set_aug_text(self, text):
"""Set a text with </> markers for selected text and | as cursor."""
real_text = re.sub('[<>|]', '', text)
self.setText(real_text)
cursor_pos = self._get_index(text, '|')
sel_start_pos = self._get_index(text, '<')
sel_end_pos = self._get_index(text, '>')
if sel_start_pos is not None and sel_end_pos is None:
raise ValueError("< given without >!")
if sel_start_pos is None and sel_end_pos is not None:
raise ValueError("> given without <!")
if cursor_pos is not None:
if sel_start_pos is not None or sel_end_pos is not None:
raise ValueError("Can't mix | and </>!")
self.setCursorPosition(cursor_pos)
elif sel_start_pos is not None:
if sel_start_pos > sel_end_pos:
raise ValueError("< given after >!")
sel_len = sel_end_pos - sel_start_pos - 1
self.setSelection(sel_start_pos, sel_len)
def aug_text(self):
"""Get a text with </> markers for selected text and | as cursor."""
text = self.text()
chars = list(text)
cur_pos = self.cursorPosition()
assert cur_pos >= 0
chars.insert(cur_pos, '|')
if self.hasSelectedText():
selected_text = self.selectedText()
sel_start = self.selectionStart()
sel_end = sel_start + len(selected_text)
assert sel_start > 0
assert sel_end > 0
assert sel_end > sel_start
assert cur_pos == sel_end
assert text[sel_start:sel_end] == selected_text
chars.insert(sel_start, '<')
chars.insert(sel_end + 1, '>')
return ''.join(chars)
def _validate_deletion(lineedit, bridge, method, text, deleted, rest):
"""Run and validate a text deletion method on the ReadLine bridge.
Args:
lineedit: The LineEdit instance.
bridge: The ReadlineBridge instance.
method: Reference to the method on the bridge to test.
text: The starting 'augmented' text (see LineEdit.set_aug_text)
deleted: The text that should be deleted when the method is invoked.
rest: The augmented text that should remain after method is invoked.
"""
lineedit.set_aug_text(text)
method()
assert bridge._deleted[lineedit] == deleted
assert lineedit.aug_text() == rest
lineedit.clear()
bridge.rl_yank()
assert lineedit.aug_text() == deleted + '|'
@pytest.fixture
def lineedit(qtbot, monkeypatch):
"""Fixture providing a LineEdit."""
le = LineEdit()
qtbot.add_widget(le)
monkeypatch.setattr(QApplication.instance(), 'focusWidget', lambda: le)
return le
@pytest.fixture
def bridge():
"""Fixture providing a ReadlineBridge."""
return readline.ReadlineBridge()
def test_none(bridge, qtbot):
"""Call each rl_* method with a None focusWidget."""
assert QApplication.instance().focusWidget() is None
for name, method in inspect.getmembers(bridge, inspect.ismethod):
if name.startswith('rl_'):
method()
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'fo|obar'),
('|foobar', '|foobar')])
def test_rl_backward_char(text, expected, lineedit, bridge):
"""Test rl_backward_char."""
lineedit.set_aug_text(text)
bridge.rl_backward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'foob|ar'),
('foobar|', 'foobar|')])
def test_rl_forward_char(text, expected, lineedit, bridge):
"""Test rl_forward_char."""
lineedit.set_aug_text(text)
bridge.rl_forward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('one <tw>o', 'one |two'),
('<one >two', '|one two'),
('|one two', '|one two')])
def test_rl_backward_word(text, expected, lineedit, bridge):
"""Test rl_backward_word."""
lineedit.set_aug_text(text)
bridge.rl_backward_word()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [
pytest.param('<o>ne two', 'one| two', marks=fixme),
('<o>ne two', 'one |two'), # wrong
pytest.param('<one> two', 'one two|', marks=fixme),
('<one> two', 'one |two'), # wrong
('one t<wo>', 'one two|')
])
def test_rl_forward_word(text, expected, lineedit, bridge):
"""Test rl_forward_word."""
lineedit.set_aug_text(text)
bridge.rl_forward_word()
assert lineedit.aug_text() == expected
def test_rl_beginning_of_line(lineedit, bridge):
"""Test rl_beginning_of_line."""
lineedit.set_aug_text('f<oo>bar')
bridge.rl_beginning_of_line()
assert lineedit.aug_text() == '|foobar'
def test_rl_end_of_line(lineedit, bridge):
"""Test rl_end_of_line."""
lineedit.set_aug_text('f<oo>bar')
bridge.rl_end_of_line()
assert lineedit.aug_text() == 'foobar|'
@pytest.mark.parametrize('text, expected', [('foo|bar', 'foo|ar'),
('foobar|', 'foobar|'),
('|foobar', '|oobar'),
('f<oo>bar', 'f|bar')])
def test_rl_delete_char(text, expected, lineedit, bridge):
"""Test rl_delete_char."""
lineedit.set_aug_text(text)
bridge.rl_delete_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('foo|bar', 'fo|bar'),
('foobar|', 'fooba|'),
('|foobar', '|foobar'),
('f<oo>bar', 'f|bar')])
def test_rl_backward_delete_char(text, expected, lineedit, bridge):
"""Test rl_backward_delete_char."""
lineedit.set_aug_text(text)
bridge.rl_backward_delete_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, deleted, rest', [
('delete this| test', 'delete this', '| test'),
pytest.param('delete <this> test', 'delete this', '| test', marks=fixme),
('delete <this> test', 'delete ', '|this test'), # wrong
pytest.param('f<oo>bar', 'foo', '|bar', marks=fixme),
('f<oo>bar', 'f', '|oobar'), # wrong
])
def test_rl_unix_line_discard(lineedit, bridge, text, deleted, rest):
"""Delete from the cursor to the beginning of the line and yank back."""
_validate_deletion(lineedit, bridge, bridge.rl_unix_line_discard, text,
deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test |delete this', 'delete this', 'test |'),
pytest.param('<test >delete this', 'test delete this', 'test |',
marks=fixme),
('<test >delete this', 'test delete this', '|'), # wrong
])
def test_rl_kill_line(lineedit, bridge, text, deleted, rest):
"""Delete from the cursor to the end of line and yank back."""
_validate_deletion(lineedit, bridge, bridge.rl_kill_line, text, deleted,
rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'github.com/foo/bar ', 'open -t |'),
('open -t |github.com/foo/bar', '-t ', 'open |github.com/foo/bar'),
pytest.param('test del<ete>foobar', 'delete', 'test |foobar',
marks=fixme),
('test del<ete >foobar', 'del', 'test |ete foobar'), # wrong
])
def test_rl_unix_word_rubout(lineedit, bridge, text, deleted, rest):
"""Delete to word beginning and see if it comes back with yank."""
_validate_deletion(lineedit, bridge, bridge.rl_unix_word_rubout, text,
deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'bar ', 'open -t github.com/foo/|'),
('open -t |github.com/foo/bar', '-t ', 'open |github.com/foo/bar'),
('open foo/bar.baz|', 'bar.baz', 'open foo/|'),
])
def test_rl_unix_filename_rubout(lineedit, bridge, text, deleted, rest):
"""Delete filename segment and see if it comes back with yank."""
_validate_deletion(lineedit, bridge, bridge.rl_unix_filename_rubout, text,
deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
pytest.param('test foobar| delete', ' delete', 'test foobar|',
marks=fixme),
('test foobar| delete', ' ', 'test foobar|delete'), # wrong
pytest.param('test foo|delete bar', 'delete', 'test foo| bar',
marks=fixme),
('test foo|delete bar', 'delete ', 'test foo|bar'), # wrong
pytest.param('test foo<bar> delete', ' delete', 'test foobar|',
marks=fixme),
('test foo<bar>delete', 'bardelete', 'test foo|'), # wrong
])
def test_rl_kill_word(lineedit, bridge, text, deleted, rest):
"""Delete to word end and see if it comes back with yank."""
_validate_deletion(lineedit, bridge, bridge.rl_kill_word, text, deleted,
rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'bar ', 'open -t github.com/foo/|'),
('open -t |github.com/foo/bar', 't ', 'open -|github.com/foo/bar'),
pytest.param('test del<ete>foobar', 'delete', 'test |foobar', marks=fixme),
('test del<ete >foobar', 'del', 'test |ete foobar'), # wrong
('open foo/bar.baz|', 'baz', 'open foo/bar.|'),
])
def test_rl_backward_kill_word(lineedit, bridge, text, deleted, rest):
"""Delete to word beginning and see if it comes back with yank."""
_validate_deletion(lineedit, bridge, bridge.rl_backward_kill_word, text,
deleted, rest)
def test_rl_yank_no_text(lineedit, bridge):
"""Test yank without having deleted anything."""
lineedit.clear()
bridge.rl_yank()
assert lineedit.aug_text() == '|'
| pkill-nine/qutebrowser | tests/unit/misc/test_readline.py | Python | gpl-3.0 | 11,925 |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA
class AnacondaGetLines(sublime_plugin.WindowCommand):
"""Get a quickpanel with all the errors and lines ready to jump to them
"""
def run(self):
errors = {}
self._harvest_errors(errors, 'ERRORS')
self._harvest_errors(errors, 'WARNINGS')
self._harvest_errors(errors, 'VIOLATIONS')
if len(errors) > 0:
self.options = []
for line, error_strings in errors.items():
for msg in error_strings:
self.options.append([msg, 'line: {}'.format(line)])
self.window.show_quick_panel(self.options, self._jump)
def is_enabled(self):
"""Determines if the command is enabled
"""
view = self.window.active_view()
if (view.file_name() in ANACONDA['DISABLED']
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
def _harvest_errors(self, harvester, error_type):
vid = self.window.active_view().id()
for line, error_strings in ANACONDA[error_type].get(vid, {}).items():
if line not in harvester:
harvester[line] = []
for error in error_strings:
harvester[line].append(error)
def _jump(self, item):
"""Jump to a line in the view buffer
"""
if item == -1:
return
lineno = int(self.options[item][1].split(':')[1].strip())
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show(pt)
| alexfalcucc/anaconda | commands/get_lines.py | Python | gpl-3.0 | 2,206 |
from beaker.util import ThreadLocal
thread_instance = ThreadLocal()
def setup_thread(instance):
global thread_instance
thread_instance.put(instance)
def teardown_thread():
'''
A counterpart for setup_thread(), probly only
useful in test_code
'''
global thread_instance
try:
thread_instance.remove()
except AttributeError:
# no value saved
pass
def has_instance():
return thread_instance.get() is not None
def get_instance():
return thread_instance.get()
| DanielNeugebauer/adhocracy | src/adhocracy/model/instance_filter.py | Python | agpl-3.0 | 531 |
# -*- coding: utf-8 -*-
from flask.ext.rq import job
from tweepy import OAuthHandler, API
import bitlyapi
import urllib2
import json
import re
from hasjob import app
@job('hasjob')
def tweet(title, url, location=None, parsed_location=None, username=None):
auth = OAuthHandler(app.config['TWITTER_CONSUMER_KEY'], app.config['TWITTER_CONSUMER_SECRET'])
auth.set_access_token(app.config['TWITTER_ACCESS_KEY'], app.config['TWITTER_ACCESS_SECRET'])
api = API(auth)
urllength = 23 # Current Twitter standard for HTTPS (as of Oct 2014)
maxlength = 140 - urllength - 1 # == 116
if username:
maxlength -= len(username) + 2
locationtag = u''
if parsed_location:
locationtags = []
for token in parsed_location.get('tokens', []):
if 'geoname' in token and 'token' in token:
locname = token['token'].strip()
if locname:
locationtags.append(u'#' + locname.title().replace(u' ', ''))
locationtag = u' '.join(locationtags)
if locationtag:
maxlength -= len(locationtag) + 1
if not locationtag and location:
# Make a hashtag from the first word in the location. This catches
# locations like 'Anywhere' which have no geonameid but are still valid
locationtag = u'#' + re.split('\W+', location)[0]
maxlength -= len(locationtag) + 1
if len(title) > maxlength:
text = title[:maxlength - 1] + u'…'
else:
text = title[:maxlength]
text = text + ' ' + url # Don't shorten URLs, now that there's t.co
if locationtag:
text = text + ' ' + locationtag
if username:
text = text + ' @' + username
api.update_status(text)
def shorten(url):
if app.config['BITLY_KEY']:
b = bitlyapi.BitLy(app.config['BITLY_USER'], app.config['BITLY_KEY'])
res = b.shorten(longUrl=url)
return res['url']
else:
req = urllib2.Request("https://www.googleapis.com/urlshortener/v1/url",
headers={"Content-Type": "application/json"},
data=json.dumps({'longUrl': url}))
request_result = urllib2.urlopen(req)
result = request_result.read()
result_json = json.loads(result)
return result_json['id']
| nhannv/hasjob | hasjob/twitter.py | Python | agpl-3.0 | 2,286 |
# The Hazard Library
# Copyright (C) 2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.convertito_2012 import (
ConvertitoEtAl2012Geysers
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class ConvertitoEtAl2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = ConvertitoEtAl2012Geysers
def test_mean(self):
self.check('CONV2012/CONV_2012_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('CONV2012/CONV_2012_STDDEV.csv',
max_discrep_percentage=0.1)
| mmpagani/oq-hazardlib | openquake/hazardlib/tests/gsim/convertito_2012_test.py | Python | agpl-3.0 | 1,218 |
# pylint: disable=C0103
"""Wrapper module for libpcp_import - Performace Co-Pilot Log Import API
#
# Copyright (C) 2012-2015 Red Hat.
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Example use of this module for creating a PCP archive:
import math
import time
import pmapi
from pcp import pmi
# Create a new archive
log = pmi.pmiLogImport("loadtest")
log.pmiSetHostname("www.abc.com")
log.pmiSetTimezone("EST-10")
# Add a metric with an instance domain
domain = 60 # Linux kernel
pmid = log.pmiID(domain, 2, 0)
indom = log.pmiInDom(domain, 2)
units = log.pmiUnits(0, 0, 0, 0, 0, 0)
log.pmiAddMetric("kernel.all.load", pmid, pmapi.PM_TYPE_FLOAT,
indom, pmapi.PM_SEM_INSTANT, units)
log.pmiAddInstance(indom, "1 minute", 1)
log.pmiAddInstance(indom, "5 minute", 5)
log.pmiAddInstance(indom, "15 minute", 15)
# Create a record with a timestamp
log.pmiPutValue("kernel.all.load", "1 minute", "%f" % 0.01)
log.pmiPutValue("kernel.all.load", "5 minute", "%f" % 0.05)
log.pmiPutValue("kernel.all.load", "15 minute", "%f" % 0.15)
timetuple = math.modf(time.time())
useconds = int(timetuple[0] * 1000000)
seconds = int(timetuple[1])
log.pmiWrite(seconds, useconds)
del log
"""
from pcp.pmapi import pmID, pmInDom, pmUnits, pmResult
from cpmi import pmiErrSymDict, PMI_MAXERRMSGLEN
import ctypes
from ctypes import cast, c_int, c_char_p, POINTER
# Performance Co-Pilot PMI library (C)
LIBPCP_IMPORT = ctypes.CDLL(ctypes.util.find_library("pcp_import"))
##
# PMI Log Import Services
LIBPCP_IMPORT.pmiDump.restype = None
LIBPCP_IMPORT.pmiDump.argtypes = None
LIBPCP_IMPORT.pmiID.restype = pmID
LIBPCP_IMPORT.pmiID.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiInDom.restype = pmInDom
LIBPCP_IMPORT.pmiInDom.argtypes = [ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiUnits.restype = pmUnits
LIBPCP_IMPORT.pmiUnits.argtypes = [
ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int]
LIBPCP_IMPORT.pmiErrStr_r.restype = c_char_p
LIBPCP_IMPORT.pmiErrStr_r.argtypes = [c_int, c_char_p, c_int]
LIBPCP_IMPORT.pmiStart.restype = c_int
LIBPCP_IMPORT.pmiStart.argtypes = [c_char_p, c_int]
LIBPCP_IMPORT.pmiUseContext.restype = c_int
LIBPCP_IMPORT.pmiUseContext.argtypes = [c_int]
LIBPCP_IMPORT.pmiEnd.restype = c_int
LIBPCP_IMPORT.pmiEnd.argtypes = None
LIBPCP_IMPORT.pmiSetHostname.restype = c_int
LIBPCP_IMPORT.pmiSetHostname.argtypes = [c_char_p]
LIBPCP_IMPORT.pmiSetTimezone.restype = c_int
LIBPCP_IMPORT.pmiSetTimezone.argtypes = [c_char_p]
LIBPCP_IMPORT.pmiAddMetric.restype = c_int
LIBPCP_IMPORT.pmiAddMetric.argtypes = [
c_char_p, pmID, c_int, pmInDom, c_int, pmUnits]
LIBPCP_IMPORT.pmiAddInstance.restype = c_int
LIBPCP_IMPORT.pmiAddInstance.argtypes = [pmInDom, c_char_p, c_int]
LIBPCP_IMPORT.pmiPutValue.restype = c_int
LIBPCP_IMPORT.pmiPutValue.argtypes = [c_char_p, c_char_p, c_char_p]
LIBPCP_IMPORT.pmiGetHandle.restype = c_int
LIBPCP_IMPORT.pmiGetHandle.argtypes = [c_char_p, c_char_p]
LIBPCP_IMPORT.pmiPutValueHandle.restype = c_int
LIBPCP_IMPORT.pmiPutValueHandle.argtypes = [c_int, c_char_p]
LIBPCP_IMPORT.pmiWrite.restype = c_int
LIBPCP_IMPORT.pmiWrite.argtypes = [c_int, c_int]
LIBPCP_IMPORT.pmiPutResult.restype = c_int
LIBPCP_IMPORT.pmiPutResult.argtypes = [POINTER(pmResult)]
#
# definition of exception classes
#
class pmiErr(Exception):
'''
Encapsulation for PMI interface error code
'''
def __str__(self):
error_code = self.args[0]
try:
error_symbol = pmiErrSymDict[error_code]
error_string = ctypes.create_string_buffer(PMI_MAXERRMSGLEN)
error_string = LIBPCP_IMPORT.pmiErrStr_r(error_code,
error_string, PMI_MAXERRMSGLEN)
except KeyError:
error_symbol = error_string = ""
return "%s %s" % (error_symbol, error_string)
#
# class LogImport
#
# This class wraps the PMI (Log Import) library functions
#
class pmiLogImport(object):
"""Defines a PCP Log Import archive context
This is used to create a PCP archive from an external source
"""
##
# property read methods
def read_path(self):
""" Property for archive path """
return self._path
def read_ctx(self):
""" Property for log import context """
return self._ctx
##
# property definitions
path = property(read_path, None, None, None)
ctx = property(read_ctx, None, None, None)
##
# overloads
def __init__(self, path, inherit = 0):
if type(path) != type(b''):
path = path.encode('utf-8')
self._path = path # the archive path (file name)
self._ctx = LIBPCP_IMPORT.pmiStart(c_char_p(path), inherit)
if self._ctx < 0:
raise pmiErr(self._ctx)
def __del__(self):
if LIBPCP_IMPORT:
LIBPCP_IMPORT.pmiUseContext(self._ctx)
LIBPCP_IMPORT.pmiEnd()
self._ctx = -1
##
# PMI Log Import Services
def pmiSetHostname(self, hostname):
"""PMI - set the source host name for a Log Import archive """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(hostname) != type(b''):
hostname = hostname.encode('utf-8')
status = LIBPCP_IMPORT.pmiSetHostname(c_char_p(hostname))
if status < 0:
raise pmiErr(status)
return status
def pmiSetTimezone(self, timezone):
"""PMI - set the source timezone for a Log Import archive
"""
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(timezone) != type(b''):
timezone = timezone.encode('utf-8')
status = LIBPCP_IMPORT.pmiSetTimezone(c_char_p(timezone))
if status < 0:
raise pmiErr(status)
return status
@staticmethod
def pmiID(domain, cluster, item):
"""PMI - construct a pmID data structure (helper routine) """
return LIBPCP_IMPORT.pmiID(domain, cluster, item)
@staticmethod
def pmiInDom(domain, serial):
"""PMI - construct a pmInDom data structure (helper routine) """
return LIBPCP_IMPORT.pmiInDom(domain, serial)
@staticmethod
def pmiUnits(dim_space, dim_time, dim_count,
scale_space, scale_time, scale_count):
# pylint: disable=R0913
"""PMI - construct a pmiUnits data structure (helper routine) """
return LIBPCP_IMPORT.pmiUnits(dim_space, dim_time, dim_count,
scale_space, scale_time, scale_count)
def pmiAddMetric(self, name, pmid, typed, indom, sem, units):
# pylint: disable=R0913
"""PMI - add a new metric definition to a Log Import context """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(name) != type(b''):
name = name.encode('utf-8')
status = LIBPCP_IMPORT.pmiAddMetric(c_char_p(name),
pmid, typed, indom, sem, units)
if status < 0:
raise pmiErr(status)
return status
def pmiAddInstance(self, indom, instance, instid):
"""PMI - add element to an instance domain in a Log Import context """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(instance) != type(b''):
instance = instance.encode('utf-8')
status = LIBPCP_IMPORT.pmiAddInstance(indom, c_char_p(instance), instid)
if status < 0:
raise pmiErr(status)
return status
def pmiPutValue(self, name, inst, value):
"""PMI - add a value for a metric-instance pair """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(name) != type(b''):
name = name.encode('utf-8')
if type(inst) != type(b''):
inst = inst.encode('utf-8')
if type(value) != type(b''):
value = value.encode('utf-8')
status = LIBPCP_IMPORT.pmiPutValue(c_char_p(name),
c_char_p(inst), c_char_p(value))
if status < 0:
raise pmiErr(status)
return status
def pmiGetHandle(self, name, inst):
"""PMI - define a handle for a metric-instance pair """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(name) != type(b''):
name = name.encode('utf-8')
if type(inst) != type(b''):
inst = inst.encode('utf-8')
status = LIBPCP_IMPORT.pmiGetHandle(c_char_p(name), c_char_p(inst))
if status < 0:
raise pmiErr(status)
return status
def pmiPutValueHandle(self, handle, value):
"""PMI - add a value for a metric-instance pair via a handle """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
if type(value) != type(b''):
value = value.encode('utf-8')
status = LIBPCP_IMPORT.pmiPutValueHandle(handle, c_char_p(value))
if status < 0:
raise pmiErr(status)
return status
def pmiWrite(self, sec, usec):
"""PMI - flush data to a Log Import archive """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
status = LIBPCP_IMPORT.pmiWrite(sec, usec)
if status < 0:
raise pmiErr(status)
return status
def put_result(self, result):
"""PMI - add a data record to a Log Import archive """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
status = LIBPCP_IMPORT.pmiPutResult(cast(result, POINTER(pmResult)))
if status < 0:
raise pmiErr(status)
return status
@staticmethod
def pmiDump():
"""PMI - dump the current Log Import contexts (diagnostic) """
LIBPCP_IMPORT.pmiDump()
def pmiEnd(self):
"""PMI - close current context and finish a Log Import archive """
status = LIBPCP_IMPORT.pmiUseContext(self._ctx)
if status < 0:
raise pmiErr(status)
status = LIBPCP_IMPORT.pmiEnd()
self._ctx = -1
if status < 0:
raise pmiErr(status)
return status
| edwardt/pcp | src/python/pcp/pmi.py | Python | lgpl-2.1 | 11,357 |
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| stefanklug/mapnik | scons/scons-local-2.3.6/SCons/Variables/PathVariable.py | Python | lgpl-2.1 | 5,646 |
import time
from Rpyc import Async
def threadfunc(callback):
"""this function will call the callback every second"""
callback = Async(callback)
try:
while True:
print "!"
callback()
time.sleep(1)
except:
print "thread exiting"
def printer(text):
print text
def caller(func, *args):
func(*args)
| tempbottle/restcommander | play-1.2.4/python/Lib/site-packages/Rpyc/Demo/testmodule.py | Python | apache-2.0 | 373 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
class _ProgressBarBase(object):
"""
Base abstract class used by specific class wrapper to show a progress bar
when the wrapped object are consumed.
:param wrapped: Object to wrap that hold data to be consumed.
:param totalsize: The total size of the data in the wrapped object.
:note: The progress will be displayed only if sys.stdout is a tty.
"""
def __init__(self, wrapped, totalsize):
self._wrapped = wrapped
self._totalsize = float(totalsize)
self._show_progress = sys.stdout.isatty() and self._totalsize != 0
self._percent = 0
def _display_progress_bar(self, size_read):
if self._show_progress:
self._percent += size_read / self._totalsize
# Output something like this: [==========> ] 49%
sys.stdout.write('\r[{0:<30}] {1:.0%}'.format(
'=' * int(round(self._percent * 29)) + '>', self._percent
))
sys.stdout.flush()
def __getattr__(self, attr):
# Forward other attribute access to the wrapped object.
return getattr(self._wrapped, attr)
class VerboseFileWrapper(_ProgressBarBase):
"""
A file wrapper that show and advance a progress bar whenever file's read
method is called.
"""
def read(self, *args, **kwargs):
data = self._wrapped.read(*args, **kwargs)
if data:
self._display_progress_bar(len(data))
else:
if self._show_progress:
# Break to a new line from the progress bar for incoming
# output.
sys.stdout.write('\n')
return data
class VerboseIteratorWrapper(_ProgressBarBase):
"""
An iterator wrapper that show and advance a progress bar whenever
data is consumed from the iterator.
:note: Use only with iterator that yield strings.
"""
def __iter__(self):
return self
def next(self):
try:
data = six.next(self._wrapped)
# NOTE(mouad): Assuming that data is a string b/c otherwise calling
# len function will not make any sense.
self._display_progress_bar(len(data))
return data
except StopIteration:
if self._show_progress:
# Break to a new line from the progress bar for incoming
# output.
sys.stdout.write('\n')
raise
# In Python 3, __next__() has replaced next().
__next__ = next
| alexpilotti/python-glanceclient | glanceclient/common/progressbar.py | Python | apache-2.0 | 3,171 |
"""Support for the (unofficial) Tado API."""
import asyncio
from datetime import timedelta
import logging
from PyTado.interface import Tado
from requests import RequestException
import requests.exceptions
import voluptuous as vol
from homeassistant.components.climate.const import PRESET_AWAY, PRESET_HOME
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
from .const import (
CONF_FALLBACK,
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
UPDATE_LISTENER,
UPDATE_TRACK,
)
_LOGGER = logging.getLogger(__name__)
TADO_COMPONENTS = ["sensor", "climate", "water_heater"]
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
SCAN_INTERVAL = timedelta(seconds=15)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FALLBACK, default=True): cv.boolean,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Tado component."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN not in config:
return True
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Tado from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
fallback = entry.options.get(CONF_FALLBACK, True)
tadoconnector = TadoConnector(hass, username, password, fallback)
try:
await hass.async_add_executor_job(tadoconnector.setup)
except KeyError:
_LOGGER.error("Failed to login to tado")
return False
except RuntimeError as exc:
_LOGGER.error("Failed to setup tado: %s", exc)
return ConfigEntryNotReady
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
_LOGGER.error("Failed to login to tado: %s", ex)
return False
raise ConfigEntryNotReady
# Do first update
await hass.async_add_executor_job(tadoconnector.update)
# Poll for updates in the background
update_track = async_track_time_interval(
hass, lambda now: tadoconnector.update(), SCAN_INTERVAL,
)
update_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA: tadoconnector,
UPDATE_TRACK: update_track,
UPDATE_LISTENER: update_listener,
}
for component in TADO_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
if CONF_FALLBACK not in options:
options[CONF_FALLBACK] = entry.data.get(CONF_FALLBACK, True)
hass.config_entries.async_update_entry(entry, options=options)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in TADO_COMPONENTS
]
)
)
hass.data[DOMAIN][entry.entry_id][UPDATE_TRACK]()
hass.data[DOMAIN][entry.entry_id][UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class TadoConnector:
"""An object to store the Tado data."""
def __init__(self, hass, username, password, fallback):
"""Initialize Tado Connector."""
self.hass = hass
self._username = username
self._password = password
self._fallback = fallback
self.device_id = None
self.tado = None
self.zones = None
self.devices = None
self.data = {
"zone": {},
"device": {},
}
@property
def fallback(self):
"""Return fallback flag to Smart Schedule."""
return self._fallback
def setup(self):
"""Connect to Tado and fetch the zones."""
self.tado = Tado(self._username, self._password)
self.tado.setDebugging(True)
# Load zones and devices
self.zones = self.tado.getZones()
self.devices = self.tado.getMe()["homes"]
self.device_id = self.devices[0]["id"]
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the registered zones."""
for zone in self.zones:
self.update_sensor("zone", zone["id"])
for device in self.devices:
self.update_sensor("device", device["id"])
def update_sensor(self, sensor_type, sensor):
"""Update the internal data from Tado."""
_LOGGER.debug("Updating %s %s", sensor_type, sensor)
try:
if sensor_type == "zone":
data = self.tado.getZoneState(sensor)
elif sensor_type == "device":
devices_data = self.tado.getDevices()
if not devices_data:
_LOGGER.info("There are no devices to setup on this tado account")
return
data = devices_data[0]
else:
_LOGGER.debug("Unknown sensor: %s", sensor_type)
return
except RuntimeError:
_LOGGER.error(
"Unable to connect to Tado while updating %s %s", sensor_type, sensor,
)
return
self.data[sensor_type][sensor] = data
_LOGGER.debug(
"Dispatching update to %s %s %s: %s",
self.device_id,
sensor_type,
sensor,
data,
)
dispatcher_send(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(self.device_id, sensor_type, sensor),
)
def get_capabilities(self, zone_id):
"""Return the capabilities of the devices."""
return self.tado.getCapabilities(zone_id)
def reset_zone_overlay(self, zone_id):
"""Reset the zone back to the default operation."""
self.tado.resetZoneOverlay(zone_id)
self.update_sensor("zone", zone_id)
def set_presence(
self, presence=PRESET_HOME,
):
"""Set the presence to home or away."""
if presence == PRESET_AWAY:
self.tado.setAway()
elif presence == PRESET_HOME:
self.tado.setHome()
def set_zone_overlay(
self,
zone_id=None,
overlay_mode=None,
temperature=None,
duration=None,
device_type="HEATING",
mode=None,
fan_speed=None,
swing=None,
):
"""Set a zone overlay."""
_LOGGER.debug(
"Set overlay for zone %s: overlay_mode=%s, temp=%s, duration=%s, type=%s, mode=%s fan_speed=%s swing=%s",
zone_id,
overlay_mode,
temperature,
duration,
device_type,
mode,
fan_speed,
swing,
)
try:
self.tado.setZoneOverlay(
zone_id,
overlay_mode,
temperature,
duration,
device_type,
"ON",
mode,
fanSpeed=fan_speed,
swing=swing,
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
def set_zone_off(self, zone_id, overlay_mode, device_type="HEATING"):
"""Set a zone to off."""
try:
self.tado.setZoneOverlay(
zone_id, overlay_mode, None, None, device_type, "OFF"
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
| pschmitt/home-assistant | homeassistant/components/tado/__init__.py | Python | apache-2.0 | 8,954 |
for item in really_long_name_of_the_function_with_a_lot_of_patams(
param1, param2, param3):
pass | IllusionRom-deprecated/android_platform_tools_idea | python/testData/formatter/continuationIndentForCallInStatementPart_after.py | Python | apache-2.0 | 108 |
"""Test for RFLink cover components.
Test setup of RFLink covers component/platform. State tracking and
control of RFLink cover devices.
"""
import logging
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
STATE_CLOSED,
STATE_OPEN,
)
from homeassistant.core import CoreState, State, callback
from tests.common import mock_restore_cache
from tests.components.rflink.test_init import mock_rflink
DOMAIN = "cover"
CONFIG = {
"rflink": {
"port": "/dev/ttyABC0",
"ignore_devices": ["ignore_wildcard_*", "ignore_cover"],
},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]},
"cover_0_0": {"name": "dim_test"},
"cover_0_1": {"name": "cover_test"},
},
},
}
_LOGGER = logging.getLogger(__name__)
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the RFLink cover component."""
# setup mocking rflink module
event_callback, create, protocol, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch
)
# make sure arguments are passed
assert create.call_args_list[0][1]["ignore"]
# test default state of cover loaded from config
cover_initial = hass.states.get(DOMAIN + ".test")
assert cover_initial.state == STATE_CLOSED
assert cover_initial.attributes["assumed_state"]
# cover should follow state of the hardware device by interpreting
# incoming events for its name and aliases
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "up"})
await hass.async_block_till_done()
cover_after_first_command = hass.states.get(DOMAIN + ".test")
assert cover_after_first_command.state == STATE_OPEN
# not sure why, but cover have always assumed_state=true
assert cover_after_first_command.attributes.get("assumed_state")
# mock incoming command event for this device
event_callback({"id": "protocol_0_0", "command": "down"})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "allon"})
await hass.async_block_till_done()
cover_after_first_command = hass.states.get(DOMAIN + ".test")
assert cover_after_first_command.state == STATE_OPEN
# should respond to group command
event_callback({"id": "protocol_0_0", "command": "alloff"})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test following aliases
# mock incoming command event for this device alias
event_callback({"id": "test_alias_0_0", "command": "up"})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
# test changing state from HA propagates to RFLink
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
async def test_firing_bus_event(hass, monkeypatch):
"""Incoming RFLink command events should be put on the HA event bus."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"aliases": ["test_alias_0_0"],
"fire_event": True,
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({"id": "protocol_0_0", "command": "down"})
await hass.async_block_till_done()
assert calls[0].data == {"state": "down", "entity_id": DOMAIN + ".test"}
async def test_signal_repetitions(hass, monkeypatch):
"""Command should be sent amount of configured repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"device_defaults": {"signal_repetitions": 3},
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1"},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
# test if signal repetition is performed according to configuration
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 2
# test if default apply to configured devices
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test1"}
)
)
# wait for commands and repetitions to finish
await hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 5
async def test_signal_repetitions_alternation(hass, monkeypatch):
"""Simultaneously switching entities must alternate repetitions."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "signal_repetitions": 2},
"protocol_0_1": {"name": "test1", "signal_repetitions": 2},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test1"}
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[1][0][0] == "protocol_0_1"
assert protocol.send_command_ack.call_args_list[2][0][0] == "protocol_0_0"
assert protocol.send_command_ack.call_args_list[3][0][0] == "protocol_0_1"
async def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"protocol_0_0": {"name": "test", "signal_repetitions": 3}},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
assert protocol.send_command_ack.call_args_list[2][0][1] == "UP"
assert protocol.send_command_ack.call_args_list[3][0][1] == "UP"
async def test_group_alias(hass, monkeypatch):
"""Group aliases should only respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "group_aliases": ["test_group_0_0"]}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "allon"})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
# test sending group command to group alias
event_callback({"id": "test_group_0_0", "command": "down"})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
async def test_nogroup_alias(hass, monkeypatch):
"""Non group aliases should not respond to group commands."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"protocol_0_0": {
"name": "test",
"nogroup_aliases": ["test_nogroup_0_0"],
}
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test sending group command to nogroup alias
event_callback({"id": "test_nogroup_0_0", "command": "up"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
async def test_nogroup_device_id(hass, monkeypatch):
"""Device id that do not respond to group commands (allon/alloff)."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"test_nogroup_0_0": {"name": "test", "group": False}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "allon"})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + ".test").state == STATE_CLOSED
# test sending group command to nogroup
event_callback({"id": "test_nogroup_0_0", "command": "up"})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + ".test").state == STATE_OPEN
async def test_restore_state(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"RTS_12345678_0": {"name": "c1"},
"test_restore_2": {"name": "c2"},
"test_restore_3": {"name": "c3"},
"test_restore_4": {"name": "c4"},
},
},
}
mock_restore_cache(
hass, (State(DOMAIN + ".c1", STATE_OPEN), State(DOMAIN + ".c2", STATE_CLOSED))
)
hass.state = CoreState.starting
# setup mocking rflink module
_, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
state = hass.states.get(DOMAIN + ".c1")
assert state
assert state.state == STATE_OPEN
state = hass.states.get(DOMAIN + ".c2")
assert state
assert state.state == STATE_CLOSED
state = hass.states.get(DOMAIN + ".c3")
assert state
assert state.state == STATE_CLOSED
# not cached cover must default values
state = hass.states.get(DOMAIN + ".c4")
assert state
assert state.state == STATE_CLOSED
assert state.attributes["assumed_state"]
# The code checks the ID, it will use the
# 'inverted' class when the name starts with
# 'newkaku'
async def test_inverted_cover(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {
"nonkaku_device_1": {
"name": "nonkaku_type_standard",
"type": "standard",
},
"nonkaku_device_2": {"name": "nonkaku_type_none"},
"nonkaku_device_3": {
"name": "nonkaku_type_inverted",
"type": "inverted",
},
"newkaku_device_4": {
"name": "newkaku_type_standard",
"type": "standard",
},
"newkaku_device_5": {"name": "newkaku_type_none"},
"newkaku_device_6": {
"name": "newkaku_type_inverted",
"type": "inverted",
},
},
},
}
# setup mocking rflink module
event_callback, _, protocol, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch
)
# test default state of cover loaded from config
standard_cover = hass.states.get(DOMAIN + ".nonkaku_type_standard")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes["assumed_state"]
# mock incoming up command event for nonkaku_device_1
event_callback({"id": "nonkaku_device_1", "command": "up"})
await hass.async_block_till_done()
standard_cover = hass.states.get(DOMAIN + ".nonkaku_type_standard")
assert standard_cover.state == STATE_OPEN
assert standard_cover.attributes.get("assumed_state")
# mock incoming up command event for nonkaku_device_2
event_callback({"id": "nonkaku_device_2", "command": "up"})
await hass.async_block_till_done()
standard_cover = hass.states.get(DOMAIN + ".nonkaku_type_none")
assert standard_cover.state == STATE_OPEN
assert standard_cover.attributes.get("assumed_state")
# mock incoming up command event for nonkaku_device_3
event_callback({"id": "nonkaku_device_3", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".nonkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_4
event_callback({"id": "newkaku_device_4", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_standard")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_5
event_callback({"id": "newkaku_device_5", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_none")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming up command event for newkaku_device_6
event_callback({"id": "newkaku_device_6", "command": "up"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_1
event_callback({"id": "nonkaku_device_1", "command": "down"})
await hass.async_block_till_done()
standard_cover = hass.states.get(DOMAIN + ".nonkaku_type_standard")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_2
event_callback({"id": "nonkaku_device_2", "command": "down"})
await hass.async_block_till_done()
standard_cover = hass.states.get(DOMAIN + ".nonkaku_type_none")
assert standard_cover.state == STATE_CLOSED
assert standard_cover.attributes.get("assumed_state")
# mock incoming down command event for nonkaku_device_3
event_callback({"id": "nonkaku_device_3", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".nonkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_4
event_callback({"id": "newkaku_device_4", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_standard")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_5
event_callback({"id": "newkaku_device_5", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_none")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# mock incoming down command event for newkaku_device_6
event_callback({"id": "newkaku_device_6", "command": "down"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
assert inverted_cover.attributes.get("assumed_state")
# We are only testing the 'inverted' devices, the 'standard' devices
# are already covered by other test cases.
# should respond to group command
event_callback({"id": "nonkaku_device_3", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".nonkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "nonkaku_device_3", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".nonkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_4", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_standard")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_4", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_standard")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_5", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_none")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_5", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_none")
assert inverted_cover.state == STATE_OPEN
# should respond to group command
event_callback({"id": "newkaku_device_6", "command": "alloff"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_inverted")
assert inverted_cover.state == STATE_CLOSED
# should respond to group command
event_callback({"id": "newkaku_device_6", "command": "allon"})
await hass.async_block_till_done()
inverted_cover = hass.states.get(DOMAIN + ".newkaku_type_inverted")
assert inverted_cover.state == STATE_OPEN
# Sending the close command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_standard").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[0][0][0] == "nonkaku_device_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_standard").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[1][0][0] == "nonkaku_device_1"
assert protocol.send_command_ack.call_args_list[1][0][1] == "UP"
# Sending the close command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_none").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[2][0][0] == "nonkaku_device_2"
assert protocol.send_command_ack.call_args_list[2][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_none").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[3][0][0] == "nonkaku_device_2"
assert protocol.send_command_ack.call_args_list[3][0][1] == "UP"
# Sending the close command from HA should result
# in an 'UP' command sent to a non-newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_inverted").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[4][0][0] == "nonkaku_device_3"
assert protocol.send_command_ack.call_args_list[4][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a non-newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".nonkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".nonkaku_type_inverted").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[5][0][0] == "nonkaku_device_3"
assert protocol.send_command_ack.call_args_list[5][0][1] == "DOWN"
# Sending the close command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_standard").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[6][0][0] == "newkaku_device_4"
assert protocol.send_command_ack.call_args_list[6][0][1] == "DOWN"
# Sending the open command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type set to 'standard'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_standard"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_standard").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[7][0][0] == "newkaku_device_4"
assert protocol.send_command_ack.call_args_list[7][0][1] == "UP"
# Sending the close command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_none").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[8][0][0] == "newkaku_device_5"
assert protocol.send_command_ack.call_args_list[8][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type not specified.
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_none"}
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_none").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[9][0][0] == "newkaku_device_5"
assert protocol.send_command_ack.call_args_list[9][0][1] == "DOWN"
# Sending the close command from HA should result
# in an 'UP' command sent to a newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_inverted").state == STATE_CLOSED
assert protocol.send_command_ack.call_args_list[10][0][0] == "newkaku_device_6"
assert protocol.send_command_ack.call_args_list[10][0][1] == "UP"
# Sending the open command from HA should result
# in an 'DOWN' command sent to a newkaku device
# that has its type set to 'inverted'.
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: DOMAIN + ".newkaku_type_inverted"},
)
)
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + ".newkaku_type_inverted").state == STATE_OPEN
assert protocol.send_command_ack.call_args_list[11][0][0] == "newkaku_device_6"
assert protocol.send_command_ack.call_args_list[11][0][1] == "DOWN"
| leppa/home-assistant | tests/components/rflink/test_cover.py | Python | apache-2.0 | 27,881 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from nova import config
from nova import ipv6
from nova import paths
from nova.tests.unit import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.openstack.common.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver',
'nova.virt.fake.SmallFakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('vlan_interface', 'eth0')
self.conf.set_default('auth_strategy', 'noauth')
config.parse_args([], default_config_files=[])
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('fatal_exception_format_errors', True)
self.conf.set_default('enabled', True, 'osapi_v3')
self.conf.set_default('force_dhcp_release', False)
self.conf.set_default('periodic_enable', False)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
| cloudbase/nova-virtualbox | nova/tests/unit/conf_fixture.py | Python | apache-2.0 | 3,169 |
# Copyright (c) 2013 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from manila.api.middleware import auth
from manila.i18n import _LW
LOG = log.getLogger(__name__)
class ManilaKeystoneContext(auth.ManilaKeystoneContext):
def __init__(self, application):
LOG.warn(_LW('manila.api.auth:ManilaKeystoneContext is deprecated. '
'Please use '
'manila.api.middleware.auth:ManilaKeystoneContext '
'instead.'))
super(ManilaKeystoneContext, self).__init__(application)
def pipeline_factory(loader, global_conf, **local_conf):
LOG.warn(_LW('manila.api.auth:pipeline_factory is deprecated. Please use '
'manila.api.middleware.auth:pipeline_factory instead.'))
auth.pipeline_factory(loader, global_conf, **local_conf)
| jcsp/manila | manila/api/auth.py | Python | apache-2.0 | 1,414 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from xml.dom import minidom
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
def test_safe_parse_xml(self):
normal_body = ("""
<?xml version="1.0" ?><foo>
<bar>
<v1>hey</v1>
<v2>there</v2>
</bar>
</foo>""").strip()
dom = xmlutil.safe_minidom_parse_string(normal_body)
self.assertEqual(normal_body, str(dom.toxml()))
self.assertRaises(exception.MalformedRequestBody,
xmlutil.safe_minidom_parse_string,
tests_utils.killer_xml_body())
class SafeParserTestCase(test.TestCase):
def test_external_dtd(self):
xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>html with dtd</body>
</html>""")
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_external_file(self):
xml_string = """<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
]>
<root>ⅇ</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_notation(self):
xml_string = """<?xml version="1.0" standalone="no"?>
<!-- comment data -->
<!DOCTYPE x [
<!NOTATION notation SYSTEM "notation.jpeg">
]>
<root attr1="value1">
</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
| jessicalucci/NovaOrc | nova/tests/api/openstack/test_xmlutil.py | Python | apache-2.0 | 28,151 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.sensors.sagemaker_base import SageMakerBaseSensor
class TestSagemakerBaseSensor(unittest.TestCase):
def test_execute(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
sensor.execute(None)
def test_poke_with_unfinished_job(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'PENDING'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_not_implemented_method(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(NotImplementedError, sensor.poke, None)
def test_poke_with_bad_response(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 400}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_job_failure(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'FAILED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(AirflowException, sensor.poke, None)
if __name__ == '__main__':
unittest.main()
| wileeam/airflow | tests/providers/amazon/aws/sensors/test_sagemaker_base.py | Python | apache-2.0 | 4,728 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import google
import mock
from google.cloud.bigtable import Client
from google.cloud.bigtable.instance import Instance
from mock import PropertyMock
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigtable import BigtableHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST, mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
CBT_INSTANCE = 'instance'
CBT_CLUSTER = 'cluster'
CBT_ZONE = 'zone'
CBT_TABLE = 'table'
class TestBigtableHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch(
"airflow.providers.google.cloud.hooks.bigtable.BigtableHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.get_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
delete_method = instance_method.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_missing_project_id(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_not_called()
instance_create.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_not_called()
instance_exists_method.assert_not_called()
table_delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
class TestBigtableHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch(
"airflow.providers.google.cloud.hooks.bigtable.BigtableHook.client_info",
new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_default_project_id._client, result)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id='new-project',
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNone(res)
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id='example-project')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='new-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id='new-project',
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='new-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.create')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.create_table(
instance=instance,
table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch('google.cloud.bigtable.cluster.Cluster.update')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_update_cluster(self, get_client, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance,
cluster_id=CBT_CLUSTER,
nodes=4)
get_client.assert_not_called()
update.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.list_column_families')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.get_cluster_states')
@mock.patch('airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client')
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| spektom/incubator-airflow | tests/providers/google/cloud/hooks/test_bigtable.py | Python | apache-2.0 | 22,575 |
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
from numba.config import ENABLE_CUDASIM
def useless_sync(ary):
i = cuda.grid(1)
cuda.syncthreads()
ary[i] = i
def simple_smem(ary):
N = 100
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((10, 20), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
def dyn_shared_memory(ary):
i = cuda.grid(1)
sm = cuda.shared.array(0, float32)
sm[i] = i * 2
cuda.syncthreads()
ary[i] = sm[i]
def use_threadfence(ary):
ary[0] += 123
cuda.threadfence()
ary[0] += 321
def use_threadfence_block(ary):
ary[0] += 123
cuda.threadfence_block()
ary[0] += 321
def use_threadfence_system(ary):
ary[0] += 123
cuda.threadfence_system()
ary[0] += 321
class TestCudaSync(unittest.TestCase):
def test_useless_sync(self):
compiled = cuda.jit("void(int32[::1])")(useless_sync)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
def test_threadfence_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.gl;", compiled.ptx)
def test_threadfence_block_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_block)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.cta;", compiled.ptx)
def test_threadfence_system_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_system)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.sys;", compiled.ptx)
if __name__ == '__main__':
unittest.main()
| ssarangi/numba | numba/cuda/tests/cudapy/test_sync.py | Python | bsd-2-clause | 3,582 |
import decimal
import json
import unittest
import uuid
from django import forms
from django.apps.registry import Apps
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
test_apps = Apps(['postgres_tests'])
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
class Meta:
apps = test_apps
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
test_apps = Apps(['postgres_tests'])
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
class Meta:
apps = test_apps
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
test_apps = Apps(['postgres_tests'])
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
class Meta:
apps = test_apps
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_field_names = [
c.rsplit('_', 2)[0][len(table_name) + 1:]
for c in connection.introspection.get_constraints(cursor, table_name)
if c.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_field_names, ['char2'])
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table_name)
# All fields should have regular indexes.
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.messages[0],
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
| dydek/django | tests/postgres_tests/test_array.py | Python | bsd-3-clause | 23,682 |
import numpy as np
import pandas as pd
from bokeh import mpl
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD'))
df = df.cumsum()
df.plot(legend=False)
mpl.to_bokeh(name="dataframe")
| the13fools/Bokeh_Examples | pandas/dataframe.py | Python | bsd-3-clause | 318 |
from __future__ import with_statement
import sys
import unittest
import maya.cmds as cmds
import pymel.core as pm
import pymel.core.uitypes as ui
if not hasattr(pm, 'currentMenuParent'):
def currentMenuParent():
return ui.PyUI(cmds.setParent(q=1, menu=1))
pm.currentMenuParent = currentMenuParent
class TestWithStatement(unittest.TestCase):
def setUp(self):
cmds.setParent(None, menu=1)
self.win = cmds.window()
def tearDown(self):
cmds.deleteUI(self.win, window=True)
def test_classInit(self):
with ui.FormLayout() as fl:
self.assertEqual(pm.currentParent(), fl)
self.assertEqual(pm.currentParent(), self.win)
with ui.RowLayout() as rl:
self.assertEqual(pm.currentParent(), rl)
# Since there can only be one top-level layout,
# what happens is that before creating the row layout, the
# parent is set to the window; but that automatically gets translated
# to mean the top-level layout for that window, which is the form
# layout... so the row layout has it's parent set to the form
# layout
self.assertEqual(pm.currentParent(), fl)
with ui.ColumnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), fl)
def test_cmdInit(self):
with pm.formLayout() as fl:
self.assertEqual(pm.currentParent(), fl)
self.assertEqual(pm.currentParent(), self.win)
with pm.rowLayout() as rl:
self.assertEqual(pm.currentParent(), rl)
# Since there can only be one top-level layout,
# what happens is that before creating the row layout, the
# parent is set to the window; but that automatically gets translated
# to mean the top-level layout for that window, which is the form
# layout... so the row layout has it's parent set to the form
# layout
self.assertEqual(pm.currentParent(), fl)
with pm.columnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), fl)
def test_parentJump(self):
cl = ui.ColumnLayout()
rl1 = ui.RowLayout()
with pm.rowLayout(parent=cl) as rl2:
self.assertEqual(pm.currentParent(), rl2)
self.assertEqual(pm.currentParent(), cl)
def test_nested(self):
with ui.ColumnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
with pm.rowLayout() as rl:
self.assertEqual(pm.currentParent(), rl)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), self.win)
def test_nestedParentJump(self):
with ui.ColumnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
with pm.rowLayout() as rl:
self.assertEqual(pm.currentParent(), rl)
with cl:
# set the parent BACK to the column layout
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), rl)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), self.win)
def test_nestedMenu(self):
self.assertEqual(pm.currentParent(), self.win)
self.assertEqual(pm.currentMenuParent(), None)
with ui.ColumnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), None)
cmds.button()
with pm.popupMenu() as m:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), m)
with ui.MenuItem(subMenu=1) as sm:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), sm)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), m)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), self.win)
def test_rowGroupLayout(self):
self.assertEqual(pm.currentParent(), self.win)
self.assertEqual(pm.currentMenuParent(), None)
with pm.textFieldButtonGrp( label='Label', text='Text', buttonLabel='Button' ) as tfbg:
self.assertEqual(pm.currentParent(), tfbg)
self.assertEqual(pm.currentMenuParent(), None)
cmds.button()
with pm.popupMenu() as m:
self.assertEqual(pm.currentParent(), tfbg)
self.assertEqual(pm.currentMenuParent(), m)
with pm.menuItem(subMenu=1) as sm:
self.assertEqual(pm.currentParent(), tfbg)
self.assertEqual(pm.currentMenuParent(), sm)
self.assertEqual(pm.currentParent(), tfbg)
self.assertEqual(pm.currentMenuParent(), m)
self.assertEqual(pm.currentParent(), tfbg)
self.assertEqual(pm.currentParent(), self.win)
fl = pm.formLayout()
tfbg2 = pm.textFieldButtonGrp( label='Label', text='Text', buttonLabel='Button' )
self.assertEqual(pm.currentParent(), fl)
with pm.columnLayout() as cl:
cmds.button()
with pm.popupMenu() as m:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), m)
with pm.menuItem(subMenu=1) as sm:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), sm)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), m)
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), fl)
def test_optionMenuGrp(self):
self.assertEqual(pm.currentParent(), self.win)
self.assertEqual(pm.currentMenuParent(), None)
with ui.ColumnLayout() as cl:
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentMenuParent(), None)
cmds.button()
with ui.OptionMenuGrp() as m:
self.assertEqual(pm.currentParent(), m)
self.assertEqual(pm.currentMenuParent(), m.menu())
self.assertEqual(pm.currentParent(), cl)
self.assertEqual(pm.currentParent(), self.win)
def test_windowExit(self):
self.assertEqual(pm.currentParent(), self.win)
newWin = ui.Window()
try:
with newWin:
self.assertEqual(pm.currentParent(), newWin)
with pm.formLayout() as fl:
self.assertEqual(pm.currentParent(), fl)
self.assertEqual(pm.currentParent(), newWin)
self.assertTrue(pm.currentParent() in (None, newWin, fl))
finally:
pm.deleteUI(newWin, window=True)
otherWin = ui.Window()
# try NOT using with statement, to make sure the last newWin
# statement's exit popped it off the stack correctly
try:
with pm.formLayout() as fl:
self.assertEqual(pm.currentParent(), fl)
self.assertEqual(pm.currentParent(), otherWin)
finally:
pm.deleteUI(otherWin, window=True)
class TestTextScrollList(unittest.TestCase):
def setUp(self):
cmds.setParent(None, menu=1)
self.win = cmds.window()
def tearDown(self):
cmds.deleteUI(self.win, window=True)
def test_selectItemEmptyList(self):
with ui.Window(self.win):
with pm.formLayout():
tsl = pm.textScrollList()
tsl.extend(['a','b','c'])
# Make sure this is NOT None
self.assertEqual(tsl.getSelectItem(), [])
if not pm.about(batch=1):
for key, obj in globals().items():
if isinstance(obj, unittest.TestCase):
del globals()[key]
obj.__name__ = '_canceledTest_' + obj.__name__
| AtonLerin/pymel | tests/test_uitypes.py | Python | bsd-3-clause | 8,047 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for notification command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
import time
import uuid
import boto
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.utils.retry_util import Retry
def _LoadNotificationUrl():
return boto.config.get_value('GSUtil', 'test_notification_url')
NOTIFICATION_URL = _LoadNotificationUrl()
class TestNotification(testcase.GsUtilIntegrationTestCase):
"""Integration tests for notification command."""
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_watch_bucket(self):
"""Tests creating a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)])
identifier = str(uuid.uuid4())
token = str(uuid.uuid4())
stderr = self.RunGsUtil([
'notification', 'watchbucket', '-i', identifier, '-t', token,
NOTIFICATION_URL,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('token: %s' % token, stderr)
self.assertIn('identifier: %s' % identifier, stderr)
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_stop_channel(self):
"""Tests stopping a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=True)
channel_id = re.findall(r'channel identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
channel_id = channel_id[0]
resource_id = resource_id[0]
self.RunGsUtil(['notification', 'stopchannel', channel_id, resource_id])
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_list_one_channel(self):
"""Tests listing notification channel on a bucket."""
# TODO(b/132277269): Re-enable these once the service-side bug is fixed.
return unittest.skip('Functionality has been disabled due to b/132277269')
bucket_uri = self.CreateBucket()
# Set up an OCN (object change notification) on the newly created bucket.
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=False)
# The OCN listing in the service is eventually consistent. In initial
# tests, it almost never was ready immediately after calling WatchBucket
# above, so we A) sleep for a few seconds before the first OCN listing
# attempt, and B) wrap the OCN listing attempt in retry logic in case
# it raises a BucketNotFoundException (note that RunGsUtil will raise this
# as an AssertionError due to the exit status not being 0).
@Retry(AssertionError, tries=3, timeout_secs=5)
def _ListObjectChangeNotifications():
stderr = self.RunGsUtil(['notification', 'list', '-o',
suri(bucket_uri)],
return_stderr=True)
return stderr
time.sleep(5)
stderr = _ListObjectChangeNotifications()
channel_id = re.findall(r'Channel identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'Resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
push_url = re.findall(r'Application URL: (?P<id>.*)', stderr)
self.assertEqual(len(push_url), 1)
subscriber_email = re.findall(r'Created by: (?P<id>.*)', stderr)
self.assertEqual(len(subscriber_email), 1)
creation_time = re.findall(r'Creation time: (?P<id>.*)', stderr)
self.assertEqual(len(creation_time), 1)
def test_invalid_subcommand(self):
stderr = self.RunGsUtil(['notification', 'foo', 'bar', 'baz'],
return_stderr=True,
expected_status=1)
self.assertIn('Invalid subcommand', stderr)
| catapult-project/catapult | third_party/gsutil/gslib/tests/test_notification.py | Python | bsd-3-clause | 4,981 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Platform(object):
"""The platform that the target browser is running on.
Provides a limited interface to obtain stats from the platform itself, where
possible.
"""
def GetSurfaceCollector(self, trace_tag):
"""Platforms may be able to collect GL surface stats."""
class StubSurfaceCollector(object):
def __init__(self, trace_tag):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
return StubSurfaceCollector(trace_tag)
| leighpauls/k2cro4 | tools/telemetry/telemetry/platform.py | Python | bsd-3-clause | 672 |
from django.forms import ModelForm
from .models import PakistaniPlace
class PakistaniPlaceForm(ModelForm):
""" Form for storing a Pakistani place. """
class Meta:
model = PakistaniPlace
fields = ('state', 'state_required', 'state_default', 'postcode', 'postcode_required', 'postcode_default',
'phone', 'name')
| infoxchange/django-localflavor | tests/test_pk/forms.py | Python | bsd-3-clause | 355 |
# -*- coding: utf-8 -*-
# Module: Navigation
# Author: asciidisco
# Created on: 11.10.2017
# License: MIT https://goo.gl/5bMj3H
"""Tests for the `Navigation` module"""
import unittest
import mock
from resources.lib.Navigation import Navigation
class NavigationTestCase(unittest.TestCase):
pass
| asciidisco/plugin.video.netflix | resources/test/test_Navigation.py | Python | mit | 301 |
from pathlib import Path
from unittest import mock, TestCase
from rpg.spec import Spec
class RpgTestCase(TestCase):
test_project_dir = Path("tests/project")
def assertExistInDir(self, expected, pathlibobject):
path = Path(pathlibobject)
for files in expected:
self.assertTrue((path / files).exists(), msg=files)
class PluginTestCase(RpgTestCase):
sack = mock.MagicMock()
spec = Spec()
| ignatenkobrain/rpg | tests/support.py | Python | gpl-2.0 | 434 |
import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.InfoBarInstance = Screens.InfoBar.InfoBar.instance
self.ts_dialog = None
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare or (self.ts_dialog is not None and not self.checkingTimeshiftRunning()):
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if self.always_zap:
return False
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
self.ts_dialog = None
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def checkingTimeshiftRunning(self):
return config.usage.check_timeshift.value and self.InfoBarInstance and self.InfoBarInstance.timeshiftEnabled() and self.InfoBarInstance.timeshift_was_activated
def openChoiceActionBeforeZap(self):
if self.ts_dialog is None:
type = _("record")
if self.justplay:
type = _("zap")
elif self.always_zap:
type = _("zap and record")
message = _("You must switch to the service %s (%s - '%s')!\n") % (type, self.service_ref.getServiceName(), self.name)
if self.repeated:
message += _("Attention, this is repeated timer!\n")
message += _("Timeshift is running. Select an action.\n")
choice = [(_("Zap"), "zap"), (_("Don't zap and disable timer"), "disable"), (_("Don't zap and remove timer"), "remove")]
if not self.InfoBarInstance.save_timeshift_file:
choice.insert(1, (_("Save timeshift in movie dir and zap"), "save_movie"))
if self.InfoBarInstance.timeshiftActivated():
choice.insert(0, (_("Save timeshift and zap"), "save"))
else:
choice.insert(1, (_("Save timeshift and zap"), "save"))
else:
message += _("Reminder, you have chosen to save timeshift file.")
#if self.justplay or self.always_zap:
# choice.insert(2, (_("Don't zap"), "continue"))
choice.insert(2, (_("Don't zap"), "continue"))
def zapAction(choice):
start_zap = True
if choice:
if choice in ("zap", "save", "save_movie"):
self.log(8, "zap to recording service")
if choice in ("save", "save_movie"):
ts = self.InfoBarInstance.getTimeshift()
if ts and ts.isTimeshiftEnabled():
if choice =="save_movie":
self.InfoBarInstance.save_timeshift_in_movie_dir = True
self.InfoBarInstance.save_timeshift_file = True
ts.saveTimeshiftFile()
del ts
self.InfoBarInstance.saveTimeshiftFiles()
elif choice == "disable":
self.disable()
NavigationInstance.instance.RecordTimer.timeChanged(self)
start_zap = False
self.log(8, "zap canceled by the user, timer disabled")
elif choice == "remove":
start_zap = False
self.afterEvent = AFTEREVENT.NONE
NavigationInstance.instance.RecordTimer.removeEntry(self)
self.log(8, "zap canceled by the user, timer removed")
elif choice == "continue":
if self.justplay:
self.end = self.begin
start_zap = False
self.log(8, "zap canceled by the user")
if start_zap:
if not self.justplay:
self.setRecordingPreferredTuner()
self.failureCB(True)
else:
self.log(8, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
self.ts_dialog = self.InfoBarInstance.session.openWithCallback(zapAction, MessageBox, message, simple=True, list=choice, timeout=20)
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
self.ts_dialog = None
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and not Screens.Standby.inStandby and self.InfoBarInstance and self.InfoBarInstance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| popazerty/blackhole-vuplus | RecordTimer.py | Python | gpl-2.0 | 43,007 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
from hyperspy.misc.test_utils import ignore_warning, assert_warns, all_warnings
def warnsA():
warnings.warn("Warning A!", UserWarning)
def warnsB():
warnings.warn("Warning B!", DeprecationWarning)
def warnsC():
warnings.warn("Warning C!")
def test_ignore_full_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A!"):
warnsA()
with ignore_warning(message="Warning B!"):
warnsB()
with ignore_warning(message="Warning C!"):
warnsC()
def test_ignore_partial_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_ignore_regex_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_ignore_message_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning [AB]!"):
warnsA()
warnsB()
try:
warnsC()
except UserWarning as e:
assert str(e) == "Warning C!"
else:
raise ValueError("Expected warning to give error!")
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A! Too much"):
try:
warnsA()
except UserWarning as e:
assert str(e) == "Warning A!"
else:
raise ValueError("Expected warning to give error!")
def test_ignore_type():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
warnsA()
warnsC()
with ignore_warning(category=DeprecationWarning):
warnsB()
def test_ignore_type_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
try:
warnsB()
except DeprecationWarning as e:
assert str(e) == "Warning B!"
else:
raise ValueError("Expected warning to give error!")
def test_assert_warns_full_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning A!"):
warnsA()
with assert_warns(message="Warning B!"):
warnsB()
with assert_warns(message="Warning C!"):
warnsC()
with assert_warns(message=["Warning A!", "Warning B!", "Warning C!"]):
warnsA()
warnsB()
warnsC()
def test_assert_warns_partial_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_regex_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_message_fails():
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(message="Warning [AB]!"):
warnsC()
except ValueError:
pass
else:
raise AssertionError("ValueError expected!")
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(message="Warning A! Too much"):
warnsA()
except ValueError:
pass
else:
raise ValueError("ValueError expected!")
def test_assert_warns_type():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(category=UserWarning):
warnsA()
warnsC()
with assert_warns(category=DeprecationWarning):
warnsB()
def test_assert_warns_type_fails():
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(category=UserWarning):
warnsB()
except ValueError:
pass
else:
raise ValueError("Expected warning to give error!")
| sem-geologist/hyperspy | hyperspy/tests/misc/test_test_utils.py | Python | gpl-3.0 | 5,176 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.six import iteritems
from ansible.plugins import connection_loader, module_loader
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
module = module_loader._load_module_source(self._task.action, module_loader.find_plugin(self._task.action))
if not getattr(module, 'USE_PERSISTENT_CONNECTION', False):
return super(ActionModule, self).run(tmp, task_vars)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.network_os = 'junos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
if self._task.action == 'junos_netconf':
pc.connection = 'network_cli'
pc.port = provider['port'] or self._play_context.port or 22
else:
pc.connection = 'netconf'
pc.port = provider['port'] or self._play_context.port or 830
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = provider['timeout'] or self._play_context.timeout
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not os.path.exists(socket_path):
# start the connection if it isn't started
if pc.connection == 'netconf':
rc, out, err = connection.exec_command('open_session()')
else:
rc, out, err = connection.exec_command('open_shell()')
if rc != 0:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell',
'rc': rc}
elif pc.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def _get_socket_path(self, play_context):
ssh = connection_loader.get('ssh', class_only=True)
path = unfrackpath("$HOME/.ansible/pc")
# use play_context.connection instea of play_context.port to avoid
# collision if netconf is listening on port 22
#cp = ssh._create_control_path(play_context.remote_addr, play_context.connection, play_context.remote_user)
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user)
return cp % dict(directory=path)
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(junos_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| fedorpatlin/ansible | lib/ansible/plugins/action/junos.py | Python | gpl-3.0 | 5,656 |
"""
Utilities for instructor unit tests
"""
import datetime
import json
import random
import six
from pytz import UTC
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = six.text_type(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=UTC)
self.targets = FakeTargetGroup()
class FakeTarget(object):
""" Corresponding fake target for a fake email """
target_type = "expected"
def long_display(self):
""" Mocks out a class method """
return self.target_type
class FakeTargetGroup(object):
""" Mocks out the M2M relationship between FakeEmail and FakeTarget """
def all(self):
""" Mocks out a django method """
return [FakeTarget()]
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
self.sent_to = [u'expected']
| cpennington/edx-platform | lms/djangoapps/instructor/tests/utils.py | Python | agpl-3.0 | 3,206 |
from django.db import migrations, models
def add_default_enable(apps, schema_editor):
ForumsConfig = apps.get_model("django_comment_common", "ForumsConfig")
settings_count = ForumsConfig.objects.count()
if settings_count == 0:
# By default we want the comment client enabled, but this is *not* enabling
# discussions themselves by default, as in showing the Disucussions tab, or
# inline discussions, etc. It just allows the underlying service client to work.
settings = ForumsConfig(enabled=True)
settings.save()
def reverse_noop(apps, schema_editor):
return
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0002_forumsconfig'),
]
operations = [
migrations.RunPython(add_default_enable, reverse_code=reverse_noop),
]
| eduNEXT/edx-platform | openedx/core/djangoapps/django_comment_common/migrations/0003_enable_forums.py | Python | agpl-3.0 | 849 |
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1','127.0.0.1','::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\
in ['https', 'HTTPS']:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if not gluon.fileutils.check_credentials(request):
redirect(URL(a='admin', c='default', f='index'))
ignore_rw = True
response.view = 'appadmin.html'
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename,db,request=request):
keyed = hasattr(db[tablename],'_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form,table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request,db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value='submit'))),
_action=URL(r=request,args=request.args))
if request.vars.csvfile != None:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'),PRE(str(e)))
if form.accepts(request.vars, formname=None):
# regex = re.compile(request.args[0] + '\.(?P<table>\w+)\.id\>0')
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s rows updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s rows deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query).select(limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query).select(limitby=(start, stop))
except Exception, e:
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'),PRE(str(e)))
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table],'_primarykey')
record = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[0]]).select().first()
else:
record = db(db[table].id == request.args(2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable=False
form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form,table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
form = FORM(
P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += "Ram Cleared "
if clear_disk:
cache.disk.clear()
session.flash += "Disk Cleared"
redirect(URL(r=request))
try:
from guppy import hpy; hp=hpy()
except ImportError:
hp = False
import shelve, os, copy, time, math
from gluon import portalocker
ram = {
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time()
}
disk = copy.copy(ram)
total = copy.copy(ram)
for key, value in cache.ram.storage.items():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
locker = open(os.path.join(request.folder,
'cache/cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(request.folder,
'cache/cache.shelve'))
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
return dict(form=form, total=total,
ram=ram, disk=disk)
| lucasdavila/web2py-appreport | controllers/appadmin.py | Python | lgpl-3.0 | 13,298 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes providing REST data sources for common CourseBuilder items."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import schema_fields
from common import utils
from models import courses
from models import data_sources
from models import jobs
from models import models
from models import transforms
from tools import verify
class AssessmentsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'assessments'
@classmethod
def get_title(cls):
return 'Assessments'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Analytics',
description='Sets of questions determining student skill')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying this particular assessment'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the assessment'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
'Scalar indicating how the results of this assessment are '
'to be weighted versus the results of peer assessments.'))
reg.add_property(schema_fields.SchemaField(
'html_check_answers', 'Check Answers', 'boolean',
'Whether students may check their answers before submitting '
'the assessment.'))
reg.add_property(schema_fields.SchemaField(
'properties', 'Properties', 'object',
'Set of key/value additional properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
assessments = course.get_units_of_type(verify.UNIT_TYPE_ASSESSMENT)
ret = []
for assessment in assessments:
ret.append({
'unit_id': assessment.unit_id,
'title': assessment.title,
'weight': assessment.weight,
'html_check_answers': assessment.html_check_answers,
'properties': assessment.properties})
return ret, 0
class UnitsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'units'
@classmethod
def get_title(cls):
return 'Units'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Units',
description='Sets of lessons providing course content')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying this particular unit'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'properties', 'Properties', 'object',
'Set of key/value additional properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
units = course.get_units_of_type(verify.UNIT_TYPE_UNIT)
ret = []
for unit in units:
ret.append({
'unit_id': unit.unit_id,
'title': unit.title,
'properties': unit.properties,
})
return ret, 0
class LessonsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'lessons'
@classmethod
def get_title(cls):
return 'Lessons'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Lessons',
description='Sets of lessons providing course content')
reg.add_property(schema_fields.SchemaField(
'lesson_id', 'Unit ID', 'integer',
description='Key uniquely identifying which lesson this is'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying unit lesson is in'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'scored', 'Scored', 'boolean',
'Boolean: Whether questions in this lesson count for scoring.'))
reg.add_property(schema_fields.SchemaField(
'has_activity', 'Has Activity', 'boolean',
'Boolean: does this lesson contain an activity?'))
reg.add_property(schema_fields.SchemaField(
'activity_title', 'Activity Title', 'string',
'Title of the activity (if lesson has an activity)'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
lessons = course.get_lessons_for_all_units()
ret = []
for lesson in lessons:
ret.append({
'lesson_id': lesson.unit_id,
'unit_id': lesson.unit_id,
'title': lesson.title,
'scored': lesson.scored,
'has_activity': lesson.has_activity,
'activity_title': lesson.activity_title,
})
return ret, 0
class StudentAssessmentScoresDataSource(
data_sources.AbstractDbTableRestDataSource):
"""Unpack student assessment scores from student record.
NOTE: Filtering/ordering, if present, will be done based on Student
attributes, not scores. (The scores are in an encoded string in a
field which is not indexed anyhow.) The only meaningful field to
index or filter on is enrolled_on.
"""
@classmethod
def get_name(cls):
return 'assessment_scores'
@classmethod
def get_title(cls):
return 'Assessment Scores'
@classmethod
def get_context_class(cls):
return data_sources.DbTableContext
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry('Unit',
description='Course sub-components')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='Student ID encrypted with a session-specific key'))
reg.add_property(schema_fields.SchemaField(
'id', 'Unit ID', 'string',
description='ID of assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Title of the assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'score', 'Score', 'integer',
description='Value from 0 to 100 indicating % correct.'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'integer',
description='Value from 0 to 100 indicating % correct.'))
reg.add_property(schema_fields.SchemaField(
'completed', 'Completed', 'boolean',
description='Whether the assessment was completed.'))
reg.add_property(schema_fields.SchemaField(
'human_graded', 'Human Graded', 'boolean',
description='Score is from a human (vs. automatic) grading.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def get_entity_class(cls):
return models.Student
@classmethod
def _postprocess_rows(cls, app_context, source_context,
unused_schema, unused_log, unused_page_number,
students):
transform_fn = cls._build_transform_fn(source_context)
with utils.Namespace(app_context.get_namespace_name()):
course = courses.Course(handler=None, app_context=app_context)
students_with_scores = [s for s in students if s.scores]
student_scores = []
for student in students_with_scores:
scores = course.get_all_scores(student)
for score in scores:
if not score['attempted']:
continue
# user_id is PII and must be encoded to obscure its value.
score['user_id'] = transform_fn(student.user_id)
student_scores.append(score)
# Provide a ranking by student, 0 ... #students, low to high.
scored_students = {}
for score in student_scores:
current_score = scored_students.get(score['user_id'], 0)
scored_students[score['user_id']] = current_score + (
score['weight'] * score['score'])
ranked_students = {kv[0]: rank for rank, kv in
enumerate(
sorted(scored_students.items(),
lambda i1, i2: cmp(i1[1], i2[1])))}
# Provide a ranking by assessment, 0 ... #assessments, low to high
scored_assessments = {}
for score in student_scores:
title = score['title']
if title not in scored_assessments:
scored_assessments[title] = []
scored_assessments[title].append(
score['weight'] * score['score'])
for title in scored_assessments:
avg = (sum(scored_assessments[title]) * 1.0 /
len(scored_assessments[title]))
scored_assessments[title] = avg
ranked_assessments = {kv[0]: rank for rank, kv in
enumerate(
sorted(scored_assessments.items(),
lambda i1, i2: cmp(i1[1], i2[1])))}
for score in student_scores:
score['user_rank'] = ranked_students[score['user_id']]
score['assessment_rank'] = ranked_assessments[score['title']]
return student_scores
class StudentsDataSource(data_sources.AbstractDbTableRestDataSource):
@classmethod
def get_entity_class(cls):
return models.Student
@classmethod
def get_name(cls):
return 'students'
@classmethod
def get_title(cls):
return 'Students'
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema,
log, page_number, rows):
ret = super(StudentsDataSource, cls)._postprocess_rows(
app_context, source_context, schema, log, page_number, rows)
# These don't add any value, and do add substantially to data volume.
# (The user_id field is what's valuable for matching to other items
# such as StudentAnswersEntity records.)
for item in ret:
del item['key']
del item['key_by_user_id']
if 'additional_fields' not in item or not item['additional_fields']:
item['additional_fields'] = {}
else:
item['additional_fields'] = (
transforms.nested_lists_as_string_to_dict(
item['additional_fields']))
return ret
class LabelsOnStudentsGenerator(jobs.AbstractCountingMapReduceJob):
@staticmethod
def get_description():
return 'labels on students'
@staticmethod
def entity_class():
return models.Student
@staticmethod
def map(student):
for label_id_str in utils.text_to_list(student.labels):
yield (label_id_str, 1)
class LabelsOnStudentsDataSource(data_sources.AbstractRestDataSource):
@staticmethod
def required_generators():
return [LabelsOnStudentsGenerator]
@classmethod
def get_name(cls):
return 'labels_on_students'
@classmethod
def get_title(cls):
return 'Labels on Students'
@classmethod
def get_default_chunk_size(cls):
return 0 # Meaning we don't need pagination
@classmethod
def get_context_class(cls):
return data_sources.NullContextManager
@classmethod
def get_schema(cls, app_context, log):
reg = schema_fields.FieldRegistry(
'Students By Label',
description='Count of students marked with each label')
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Name for this label'))
reg.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description='Human-readable text describing the label'))
reg.add_property(schema_fields.SchemaField(
'type', 'Type', 'string',
description='Title of label group to which this label belongs.'))
reg.add_property(schema_fields.SchemaField(
'count', 'Count', 'integer',
description='Number of students with this label applied'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, source_context, schema, log, page_number,
labels_on_students_job):
label_counts = jobs.MapReduceJob.get_results(labels_on_students_job)
counts = {int(x[0]): int(x[1]) for x in label_counts}
type_titles = {lt.type: lt.title for lt in models.LabelDTO.LABEL_TYPES}
ret = []
for label in models.LabelDAO.get_all():
ret.append({
'title': label.title,
'description': label.description,
'type': type_titles[label.type],
'count': counts.get(label.id, 0),
})
return ret, 0
| CSCI1200Course/csci1200OnlineCourse | modules/data_source_providers/rest_providers.py | Python | apache-2.0 | 14,940 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import drop_tables
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
meta = MetaData()
dns_records = Table(
'dns_records', meta,
Column('name', String(length=255), primary_key=True),
Column('record_id', String(length=64)))
def upgrade(migrate_engine):
meta.bind = migrate_engine
create_tables([dns_records])
def downgrade(migrate_engine):
meta.bind = migrate_engine
drop_tables([dns_records])
| redhat-openstack/trove | trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py | Python | apache-2.0 | 1,320 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import datetime
import sys
import traceback
import common
import text
"""
Given a map of all connections with lists of the associated frames
analyze and show per-connection, per-session, and per-link details.
This is done in a two-step process:
* Run through the frame lists and generates an intermediate structure
with the the details for display.
* Generate the html from the detail structure.
This strategy allows for a third step that would allow more details
to be gleaned from the static details. For instance, if router A
sends a transfer to router B then router A's details could show
how long it took for the transfer to reach router B. Similarly
router B's details could show how long ago router A sent the transfer.
"""
class Counts():
"""
Holds common count sets that can be rolled up from links to
sessions to connections. Not for individual performatives.
"""
def __init__(self):
# amqp errors gleaned from any performative
self.errors = 0 # amqp error - simple count
# derived facts about message settlement
self.unsettled = 0
self.presettled = 0
self.accepted = 0
self.rejected = 0
self.released = 0
self.modified = 0
# interesting transfers
self.aborted = 0
self.more = 0
self.incomplete = 0
# link drained
self.drain = 0
# link out of credit
self.credit_not_evaluated = 0
self.no_credit = 0 # event count, excludes drain credit exhaustion
self.initial_no_credit_duration = datetime.timedelta() # before first credit
self.no_credit_duration = datetime.timedelta() # after credit issued and then exhausted
def highlight(self, name, value, color):
"""
if value is non zero then return a colorized 'name: value' text stream
else return a blank string
"""
result = ""
if value:
result = "<span style=\"background-color:%s\">%s: %s</span> " % (color, name, str(value))
return result
def highlight_duration(self, name, value, color):
"""
if value is non zero then return a colorized 'name: value' text stream
else return a blank string
"""
result = ""
if value.seconds > 0 or value.microseconds > 0:
t = float(value.seconds) + float(value.microseconds) / 1000000.0
result = "<span style=\"background-color:%s\">%s: %0.06f</span> " % (color, name, t)
return result
def show_html(self):
res = ""
res += self.highlight("errors", self.errors, common.color_of("errors"))
res += self.highlight("unsettled", self.unsettled, common.color_of("unsettled"))
res += self.highlight("presettled", self.presettled, common.color_of("presettled"))
res += self.highlight("accepted", self.accepted, common.color_of("accepted"))
res += self.highlight("rejected", self.rejected, common.color_of("rejected"))
res += self.highlight("released", self.released, common.color_of("released"))
res += self.highlight("modified", self.modified, common.color_of("modified"))
res += self.highlight("aborted", self.aborted, common.color_of("aborted"))
res += self.highlight("more", self.more, common.color_of("more"))
res += self.highlight("incomplete", self.incomplete, common.color_of("unsettled"))
res += self.highlight("drain", self.drain, common.color_of("drain"))
res += self.highlight_duration("initial", self.initial_no_credit_duration, common.color_of("no_credit"))
res += self.highlight("no_credit", self.no_credit, common.color_of("no_credit"))
res += self.highlight_duration("duration", self.no_credit_duration, common.color_of("no_credit"))
res += self.highlight("no_eval", self.credit_not_evaluated, common.color_of("no_credit"))
return res
@classmethod
def show_table_heads1(cls):
return "<th rowspan=\"2\"><span title=\"AMQP errors\">ERR</span></th>" \
"<th colspan=\"6\">Settlement - disposition</th>" \
"<th colspan=\"3\">Transfer</th>" \
"<th>Flow</th>" \
"<th colspan=\"4\">Credit starvation</th>"
@classmethod
def show_table_heads2(cls):
return "<th><span title=\"Unsettled transfers\">UNSTL</span></th>" \
"<th><span title=\"Presettled transfers\">PRE</span></th>" \
"<th><span title=\"Disposition: accepted\">ACCPT</span></th>" \
"<th><span title=\"Disposition: rejected\">RJCT</span></th>" \
"<th><span title=\"Disposition: released\">RLSD</span></th>" \
"<th><span title=\"Disposition: modified\">MDFD</span></th>" \
"<th><span title=\"Transfer abort=true\">ABRT</span></th>" \
"<th><span title=\"Transfer: more=true\">MOR</span></th>" \
"<th><span title=\"Transfer: incomplete; all frames had more=true\">INC</span></th>" \
"<th><span title=\"Flow: drain=true\">DRN</span></th>" \
"<th><span title=\"Initial stall (S)\">initial (S)</span></th>" \
"<th><span title=\"Credit exhausted\">-> 0</span></th>" \
"<th><span title=\"Normal credit exhaustion stall (S)\">duration (S)</span></th>" \
"<th><span title=\"Credit not evaluated\">?</span></th>"
def show_table_element(self, name, value, color):
return ("<td>%s</td>" % text.nbsp()) if value == 0 else \
("<td>%s</td>" % ("<span style=\"background-color:%s\">%s</span> " % (color, str(value))))
def show_table_duration(self, delta):
if delta.seconds == 0 and delta.microseconds == 0:
return "<td>%s</td>" % text.nbsp()
t = float(delta.seconds) + float(delta.microseconds) / 1000000.0
return ("<td>%0.06f</td>" % t)
def show_table_data(self):
res = ""
res += self.show_table_element("errors", self.errors, common.color_of("errors"))
res += self.show_table_element("unsettled", self.unsettled, common.color_of("unsettled"))
res += self.show_table_element("presettled", self.presettled, common.color_of("presettled"))
res += self.show_table_element("accepted", self.accepted, common.color_of("accepted"))
res += self.show_table_element("rejected", self.rejected, common.color_of("rejected"))
res += self.show_table_element("released", self.released, common.color_of("released"))
res += self.show_table_element("modified", self.modified, common.color_of("modified"))
res += self.show_table_element("aborted", self.aborted, common.color_of("aborted"))
res += self.show_table_element("more", self.more, common.color_of("more"))
res += self.show_table_element("incomplete", self.incomplete, common.color_of("unsettled"))
res += self.show_table_element("drain", self.drain, common.color_of("drain"))
res += self.show_table_duration(self.initial_no_credit_duration)
res += self.show_table_element("no_credit", self.no_credit, common.color_of("no_credit"))
res += self.show_table_duration(self.no_credit_duration)
res += self.show_table_element("?", self.credit_not_evaluated, common.color_of("no_credit"))
return res
class ConnectionDetail():
"""
Holds facts about sessions over the connection's lifetime
"""
def __init__(self, id, router, conn):
# id in form 'A_15':
# A is the router logfile key
# 15 is the log connection number [15]
self.id = id
self.router = router
self.conn = conn # from router.conn_list
# seq_no number differentiates items that otherwise have same identifiers.
# Sessions, for example: a given connection may have N distinct session
# with local channel 0.
self.seq_no = 0
# combined counts
self.counts = Counts()
# session_list holds all SessionDetail records either active or retired
# Sessions for a connection are identified by the local channel number.
# There may be many sessions all using the same channel number.
# This list holds all of them.
self.session_list = []
# this map indexed by the channel refers to the current item in the session_list
self.chan_map = {}
# count of AMQP performatives for this connection that are not accounted
# properly in session and link processing.
# Server Accepting, SASL mechs, init, outcome, AMQP, and so on
self.unaccounted_frame_list = []
def FindSession(self, channel):
"""
Find the current session by channel number
:param channel: the performative channel
:return: the session or None
"""
return self.chan_map[channel] if channel in self.chan_map else None
def GetId(self):
return self.id
def GetSeqNo(self):
self.seq_no += 1
return str(self.seq_no)
def EndChannel(self, channel):
# take existing session out of connection chan map
if channel in self.chan_map:
del self.chan_map[channel]
def GetLinkEventCount(self):
c = 0
for session in self.session_list:
c += session.GetLinkEventCount()
return c
class SessionDetail:
"""
Holds facts about a session
"""
def __init__(self, id, conn_detail, conn_seq, start_time):
# parent connection
self.id = id
self.conn_detail = conn_detail
# some seq number
self.conn_epoch = conn_seq
# Timing
self.time_start = start_time
self.time_end = start_time
# combined counts
self.counts = Counts()
self.channel = -1
self.peer_chan = -1
self.half_closed = False
self.direction = ""
# seq_no number differentiates items that otherwise have same identifiers.
# links for example
self.seq_no = 0
self.log_line_list = []
# link_list holds LinkDetail records
# Links for a session are identified by a (handle, remote-handle) number pair.
# There may be many links all using the same handle pairs.
# This list holds all of them.
self.link_list = []
# link_list holds all links either active or retired
# this map indexed by the handle refers to the current item in the link_list
self.input_handle_link_map = {} # link created by peer
self.output_handle_link_map = {} # link created locally
# Link name in attach finds link details in link_list
# This map contains the link handle to disambiguate the name
self.link_name_to_detail_map = {}
#
# The map contains the pure link name and is used only to resolve name collisions
self.link_name_conflict_map = {}
# count of AMQP performatives for this connection that are not accounted
# properly in link processing
self.session_frame_list = []
# Session dispositions
# Sender/receiver dispositions may be sent or received
self.rx_rcvr_disposition_map = {} # key=delivery id, val=disposition plf
self.rx_sndr_disposition_map = {} # key=delivery id, val=disposition plf
self.tx_rcvr_disposition_map = {} # key=delivery id, val=disposition plf
self.tx_sndr_disposition_map = {} # key=delivery id, val=disposition plf
def FrameCount(self):
count = 0
for link in self.link_list:
count += len(link.frame_list)
count += len(self.session_frame_list)
return count
def FindLinkByName(self, attach_name, link_name_unambiguous, parsed_log_line):
# find conflicted name
cnl = None
if attach_name in self.link_name_conflict_map:
cnl = self.link_name_conflict_map[attach_name]
if cnl.input_handle == -1 and cnl.output_handle == -1:
cnl = None
# find non-conflicted name
nl = None
if link_name_unambiguous in self.link_name_to_detail_map:
nl = self.link_name_to_detail_map[link_name_unambiguous]
if nl.input_handle == -1 and nl.output_handle == -1:
nl = None
# report conflict
# TODO: There's an issue with this logic generating false positives
# if nl is None and (not cnl is None):
# parsed_log_line.data.amqp_error = True
# parsed_log_line.data.web_show_str += " <span style=\"background-color:yellow\">Link name conflict</span>"
# return unambiguous link
return nl
def FindLinkByHandle(self, handle, find_remote):
"""
Find the current link by handle number
qualify lookup based on packet direction
:param link: the performative channel
:param dst_is_broker: packet direction
:return: the session or None
"""
if find_remote:
return self.input_handle_link_map[handle] if handle in self.input_handle_link_map else None
else:
return self.output_handle_link_map[handle] if handle in self.output_handle_link_map else None
def GetId(self):
return self.conn_detail.GetId() + "_" + str(self.conn_epoch)
def GetSeqNo(self):
self.seq_no += 1
return self.seq_no
def DetachOutputHandle(self, handle):
# take existing link out of session handle map
if handle in self.output_handle_link_map:
nl = self.output_handle_link_map[handle]
del self.output_handle_link_map[handle]
nl.output_handle = -1
def DetachInputHandle(self, handle):
# take existing link out of session remote handle map
if handle in self.input_handle_link_map:
nl = self.input_handle_link_map[handle]
del self.input_handle_link_map[handle]
nl.input_handle = -1
def DetachHandle(self, handle, is_remote):
if is_remote:
self.DetachInputHandle(handle)
else:
self.DetachOutputHandle(handle)
def GetLinkEventCount(self):
c = 0
for link in self.link_list:
c += link.GetLinkEventCount()
return c
class LinkDetail():
"""
Holds facts about a link endpoint
This structure binds input and output links with same name
"""
def __init__(self, id, session_detail, session_seq, link_name, start_time):
self.id = id
# parent session
self.session_detail = session_detail
# some seq number
self.session_seq = session_seq
# link name
self.name = link_name # plf.data.link_short_name
self.display_name = link_name # show short name; hover to see long name
# Timing
self.time_start = start_time
self.time_end = start_time
# combined counts
self.counts = Counts()
self.unsettled_list = []
# paired handles
self.output_handle = -1
self.input_handle = -1
# link originator
self.direction = ""
self.is_receiver = True
self.first_address = ''
# set by sender
self.snd_settle_mode = ''
self.sender_target_address = "none"
self.sender_class = ''
# set by receiver
self.rcv_settle_mode = ''
self.receiver_source_address = "none"
self.receiver_class = ''
self.frame_list = []
def GetId(self):
return self.session_detail.GetId() + "_" + str(self.session_seq)
def FrameCount(self):
return len(self.frame_list)
class AllDetails():
#
#
def format_errors(self, n_errors):
return ("<span style=\"background-color:%s\">errors: %d</span>" % (common.color_of("errors"), n_errors)) if n_errors > 0 else ""
def format_unsettled(self, n_unsettled):
return ("<span style=\"background-color:%s\">unsettled: %d</span>" % (common.color_of("unsettled"), n_unsettled)) if n_unsettled > 0 else ""
def classify_connection(self, id):
"""
Return probable connection class based on the kinds of links the connection uses.
TODO: This assumes that the connection has one session and one
:param id:
:return:
"""
return "oops"
def time_offset(self, ttest, t0):
"""
Return a string time delta between two datetime objects in seconds formatted
to six significant decimal places.
:param ttest:
:param t0:
:return:
"""
if ttest < t0:
# Never return negative deltas
return "0.000000"
delta = ttest - t0
t = float(delta.seconds) + float(delta.microseconds) / 1000000.0
return "%0.06f" % t
def links_in_connection(self, id):
conn_details = self.conn_details[id]
n_links = 0
for sess in conn_details.session_list:
n_links += len(sess.link_list)
return n_links
def settlement_display(self, transfer, disposition):
"""
Generate the details for a disposition settlement
:param transfer: plf
:param disposition: plf
:return: display string
"""
state = disposition.data.disposition_state # accept, reject, release, ...
if state != "accepted":
state = "<span style=\"background-color:orange\">%s</span>" % state
l2disp = "<a href=\"#%s\">%s</a>" % (disposition.fid, state)
sttld = "settled" if disposition.data.settled == "true" else "unsettled"
delay = self.time_offset(disposition.datetime, transfer.datetime)
return "(%s %s %s S)" % (l2disp, sttld, delay)
def resolve_settlement(self, link, transfer, rcv_disposition, snd_disposition):
"""
Generate the settlement display string for this transfer.
:param link: linkDetails - holds settlement modes
:param transfer: plf of the transfer frame
:param rcv_disposition: plf of receiver role disposition
:param snd_disposition: plf of sender role disposition
:return: display string
"""
if transfer.data.settled is not None and transfer.data.settled == "true":
result = "transfer presettled"
transfer.data.transfer_presettled = True
if rcv_disposition is not None:
sys.stderr.write("WARING: Receiver disposition for presettled message. connid:%s, line:%s\n" %
(rcv_disposition.data.conn_id, rcv_disposition.lineno))
if snd_disposition is not None:
sys.stderr.write("WARING: Sender disposition for presettled message. connid:%s, line:%s\n" %
(snd_disposition.data.conn_id, snd_disposition.lineno))
else:
if "1" in link.snd_settle_mode:
# link mode sends only settled transfers
result = "link presettled"
transfer.data.transfer_presettled = True
if rcv_disposition is not None:
sys.stderr.write("WARING: Receiver disposition for presettled link. connid:%s, line:%s\n" %
(rcv_disposition.data.conn_id, rcv_disposition.lineno))
if snd_disposition is not None:
sys.stderr.write("WARING: Sender disposition for presettled link. connid:%s, line:%s\n" %
(snd_disposition.data.conn_id, snd_disposition.lineno))
else:
# transfer unsettled and link mode requires settlement
if rcv_disposition is not None:
rtext = self.settlement_display(transfer, rcv_disposition)
transfer.data.final_disposition = rcv_disposition
if snd_disposition is not None:
stext = self.settlement_display(transfer, snd_disposition)
transfer.data.final_disposition = snd_disposition
if "0" in link.rcv_settle_mode:
# one settlement expected
if rcv_disposition is not None:
result = rtext
if snd_disposition is not None:
sys.stderr.write("WARING: Sender disposition for single first(0) settlement link. "
"connid:%s, line:%s\n" %
(snd_disposition.data.conn_id, snd_disposition.lineno))
else:
if transfer.data.transfer_more:
result = "(pending)"
else:
result = "<span style=\"background-color:orange\">%s</span>" % "receive settlement absent"
else:
# two settlements expected
if transfer.data.transfer_more:
result = "(pending)"
elif rcv_disposition is not None:
result = "receiver: " + rtext
if snd_disposition is not None:
result += ", sender: " + stext
else:
result += "<span style=\"background-color:orange\">%s</span>" % ", sender settlement absent"
else:
result = "<span style=\"background-color:orange\">%s</span>" % "receiver settlement absent"
if snd_disposition is not None:
result += ", sender: " + stext
else:
result += "<span style=\"background-color:orange\">%s</span>" % ", sender settlement absent"
return result
def __init__(self, _router, _common):
self.rtr = _router
self.comn = _common
# conn_details - AMQP analysis
# key= connection id '1', '2'
# val= ConnectionDetails
# for each connection, for each session, for each link:
# what happened
self.conn_details = {}
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
self.conn_details[id] = ConnectionDetail(id, self.rtr, conn)
conn_details = self.conn_details[id]
conn_frames = self.rtr.conn_to_frame_map[id]
for plf in conn_frames:
pname = plf.data.name
if plf.data.amqp_error:
conn_details.counts.errors += 1
if pname in ['', 'open', 'close']:
conn_details.unaccounted_frame_list.append(plf)
continue
# session required
channel = plf.data.channel # Assume in/out channels are the same for the time being
sess_details = conn_details.FindSession(channel)
if sess_details is None:
new_id = len(conn_details.session_list)
sess_details = SessionDetail(new_id, conn_details, conn_details.GetSeqNo(), plf.datetime)
conn_details.session_list.append(sess_details)
conn_details.EndChannel(channel)
conn_details.chan_map[channel] = sess_details
sess_details.direction = plf.data.direction
sess_details.channel = channel
if plf.data.amqp_error:
sess_details.counts.errors += 1
if pname in ['begin', 'end', 'disposition']:
sess_details.session_frame_list.append(plf) # Accumulate to current session
if pname == 'end':
# end is closing this session
if sess_details.half_closed:
conn_details.EndChannel(plf.data.channel)
else:
sess_details.half_closed = True
else:
pass # begin handled above; disposition needs no action
elif pname in ['attach']:
handle = plf.data.handle # proton local handle
link_name = plf.data.link_short_name
link_name_unambiguous = link_name + "_" + str(handle)
error_was = plf.data.amqp_error
nl = sess_details.FindLinkByName(link_name, link_name_unambiguous, plf)
# if finding an ambiguous link name generated an error then propagate to session/connection
if not error_was and plf.data.amqp_error:
conn_details.counts.errors += 1
sess_details.counts.errors += 1
if nl is None:
# Creating a new link from scratch resulting in a half attached link pair
new_id = len(sess_details.link_list)
nl = LinkDetail(new_id, sess_details, sess_details.GetSeqNo(), link_name, plf.datetime)
sess_details.link_list.append(nl)
sess_details.link_name_to_detail_map[link_name_unambiguous] = nl
sess_details.link_name_conflict_map[link_name] = nl
nl.display_name = plf.data.link_short_name_popup
nl.direction = plf.data.direction
nl.is_receiver = plf.data.role == "receiver"
nl.first_address = plf.data.source if nl.is_receiver else plf.data.target
if plf.data.amqp_error:
nl.counts.errors += 1
if plf.data.direction_is_in():
# peer is creating link
nl.input_handle = handle
sess_details.DetachInputHandle(handle)
sess_details.input_handle_link_map[handle] = nl
else:
# local is creating link
nl.output_handle = handle
sess_details.DetachOutputHandle(handle)
sess_details.output_handle_link_map[handle] = nl
if plf.data.is_receiver:
nl.rcv_settle_mode = plf.data.rcv_settle_mode
nl.receiver_source_address = plf.data.source
nl.receiver_class = plf.data.link_class
else:
nl.snd_settle_mode = plf.data.snd_settle_mode
nl.sender_target_address = plf.data.target
nl.sender_class = plf.data.link_class
nl.frame_list.append(plf)
elif pname in ['detach']:
ns = conn_details.FindSession(channel)
if ns is None:
conn_details.unaccounted_frame_list.append(plf)
continue
handle = plf.data.handle
nl = ns.FindLinkByHandle(handle, plf.data.direction_is_in())
ns.DetachHandle(handle, plf.data.direction_is_in())
if nl is None:
ns.session_frame_list.append(plf)
else:
if plf.data.amqp_error:
nl.counts.errors += 1
nl.frame_list.append(plf)
elif pname in ['transfer', 'flow']:
ns = conn_details.FindSession(channel)
if ns is None:
conn_details.unaccounted_frame_list.append(plf)
plf.no_parent_link = True
continue
handle = plf.data.handle
nl = ns.FindLinkByHandle(handle, plf.data.direction_is_in())
if nl is None:
ns.session_frame_list.append(plf)
plf.no_parent_link = True
else:
if plf.data.amqp_error:
nl.counts.errors += 1
nl.frame_list.append(plf)
# identify and index dispositions
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
conn_detail = self.conn_details[id]
for sess in conn_detail.session_list:
# for each disposition add state to disposition_map
for splf in sess.session_frame_list:
if splf.data.name == "disposition":
if splf.data.direction == "<-":
sdispmap = sess.rx_rcvr_disposition_map if splf.data.is_receiver else sess.rx_sndr_disposition_map
else:
sdispmap = sess.tx_rcvr_disposition_map if splf.data.is_receiver else sess.tx_sndr_disposition_map
for sdid in range(int(splf.data.first), (int(splf.data.last) + 1)):
did = str(sdid)
if did in sdispmap:
old_splf = sdispmap[did]
if "state=@received" in old_splf.line:
# Existing disposition is non-terminal.
# Don't complain when it is overwritten by another non-terminal
# or by a terminal disposition.
pass
else:
# Current state is terminal disposition. Complain when overwritten.
sys.stderr.write("ERROR: Delivery ID collision in disposition map. connid:%s, \n" %
(splf.data.conn_id))
sys.stderr.write(" old: %s, %s\n" % (old_splf.fid, old_splf.line))
sys.stderr.write(" new: %s, %s\n" % (splf.fid, splf.line))
sdispmap[did] = splf
def rollup_disposition_counts(self, state, conn, sess, link):
if state is not None:
if state.startswith("acce"):
conn.accepted += 1
sess.accepted += 1
link.accepted += 1
elif state.startswith("reje"):
conn.rejected += 1
sess.rejected += 1
link.rejected += 1
elif state.startswith("rele"):
conn.released += 1
sess.released += 1
link.released += 1
elif state.startswith("modi"):
conn.modified += 1
sess.modified += 1
link.modified += 1
else:
pass # Hmmm, some other disposition. TODO: count these
def compute_settlement(self):
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
conn_detail = self.rtr.details.conn_details[id]
for sess in conn_detail.session_list:
for link in sess.link_list:
for plf in link.frame_list:
if plf.data.transfer:
tdid = plf.data.delivery_id
if plf.data.direction == "->":
rmap = sess.rx_rcvr_disposition_map
tmap = sess.rx_sndr_disposition_map
else:
rmap = sess.tx_rcvr_disposition_map
tmap = sess.tx_sndr_disposition_map
plf.data.disposition_display = self.resolve_settlement(link, plf,
rmap.get(tdid),
tmap.get(tdid))
if common.transfer_is_possibly_unsettled(plf):
if tdid not in link.unsettled_list:
link.unsettled_list.append(tdid)
link.counts.unsettled += 1
sess.counts.unsettled += 1
conn_detail.counts.unsettled += 1
else:
if not plf.data.transfer_more:
if plf.data.transfer_presettled:
link.counts.presettled += 1
sess.counts.presettled += 1
conn_detail.counts.presettled += 1
else:
self.rollup_disposition_counts(
plf.data.final_disposition.data.disposition_state, conn_detail.counts, sess.counts, link.counts)
else:
link.counts.more += 1
sess.counts.more += 1
conn_detail.counts.more += 1
if plf.data.transfer_aborted:
link.counts.aborted += 1
sess.counts.aborted += 1
conn_detail.counts.aborted += 1
if plf.data.flow_drain:
link.counts.drain += 1
sess.counts.drain += 1
conn_detail.counts.drain += 1
def index_addresses(self):
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
conn_detail = self.rtr.details.conn_details[id]
for sess in conn_detail.session_list:
for link in sess.link_list:
self.comn.shorteners.short_addr_names.translate(link.first_address, False, link)
def evaluate_credit(self):
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
conn_detail = self.rtr.details.conn_details[id]
for sess in conn_detail.session_list:
for link in sess.link_list:
# ignore links without starting attach
if link.frame_list[0].data.name != "attach":
link.counts.credit_not_evaluated += 1
sess.counts.credit_not_evaluated += 1
conn_detail.counts.credit_not_evaluated += 1
break
# process flaggage
look_for_sender_delivery_id = True
dir_of_xfer = ''
dir_of_flow = ''
current_delivery = 0 # next transfer expected id
delivery_limit = 0 # first unreachable delivery id from flow
n_attaches = 0
tod_of_second_attach = None
multiframe_in_progress = False
init_stall = True
credit_stall = False
tod_of_no_credit = None
tod_of_shutdown = None
# record info about initial attach
is_rcvr = link.frame_list[0].data.is_receiver
o_dir = link.frame_list[0].data.direction
# derive info about where to look for credit and transfer id
# role dir transfers flow w/credit case
# ---- ---- --------- ------------- ----
# rcvr <- -> <- A
# rcvr -> <- -> B
# sndr <- <- -> B
# sndr -> -> <- A
#
if (((is_rcvr) and (o_dir == text.direction_in())) or
((not is_rcvr) and (o_dir == text.direction_out()))):
# case A
dir_of_xfer = text.direction_out()
dir_of_flow = text.direction_in()
else:
# case B
dir_of_xfer = text.direction_in()
dir_of_flow = text.direction_out()
for plf in link.frame_list:
# initial credit delay starts at reception of second attach
if n_attaches < 2:
if plf.data.name == "attach":
n_attaches += 1
if n_attaches == 2:
tod_of_second_attach = plf.datetime
if look_for_sender_delivery_id:
if plf.data.name == "attach" and not plf.data.is_receiver:
current_delivery = int(plf.data.described_type.dict.get("initial-delivery_count", "0"))
delivery_limit = current_delivery
look_for_sender_delivery_id = False
if plf.data.name == "flow":
if plf.data.direction == dir_of_flow:
# a flow in the normal direction updates the delivery limit
dc = plf.data.described_type.dict.get("delivery-count", "0")
lc = plf.data.described_type.dict.get("link-credit", "0")
delivery_limit = int(dc) + int(lc) # TODO: wrap at 32-bits
if n_attaches < 2:
# a working flow before sender attach - cancel initial stall
init_stall = False
if init_stall:
init_stall = False
dur = plf.datetime - tod_of_second_attach
link.counts.initial_no_credit_duration = dur
sess.counts.initial_no_credit_duration += dur
conn_detail.counts.initial_no_credit_duration += dur
if credit_stall and delivery_limit > current_delivery: # TODO: wrap
credit_stall = False
plf.data.web_show_str += " <span style=\"background-color:%s\">credit restored</span>" % common.color_of("no_credit")
dur = plf.datetime - tod_of_no_credit
link.counts.no_credit_duration += dur
sess.counts.no_credit_duration += dur
conn_detail.counts.no_credit_duration += dur
else:
# flow in the opposite direction updates the senders current delivery
# normally used to consume credit in response to a drain from receiver
current_delivery = int(plf.data.described_type.dict.get("initial-delivery_count", "0"))
elif plf.data.transfer:
if plf.data.direction == dir_of_xfer:
if not plf.data.transfer_more:
# consider the transfer to have arrived when last transfer seen
current_delivery += 1 # TODO: wrap at 32-bits
if current_delivery == delivery_limit:
link.counts.no_credit += 1
sess.counts.no_credit += 1
conn_detail.counts.no_credit += 1
plf.data.transfer_exhausted_credit = True
credit_stall = True
plf.data.web_show_str += " <span style=\"background-color:%s\">no more credit</span>" % common.color_of("no_credit")
tod_of_no_credit = plf.datetime
else:
pass # still have credit
multiframe_in_progress = False
else:
# transfers with 'more' set don't consume credit
multiframe_in_progress = True
else:
pass # transfer in wrong direction??
elif plf.data.name == "detach":
tod_of_shutdown = plf.datetime
break
# clean up lingering credit stall
if init_stall or credit_stall:
if tod_of_shutdown is None:
# find first end or close and call that shutdown time
for plf in sess.session_frame_list:
if plf.data.name == "end":
tod_of_shutdown = plf.datetime
break
if tod_of_shutdown is None:
for plf in conn_detail.unaccounted_frame_list:
if plf.data.name == "close":
tod_of_shutdown = plf.datetime
break
if tod_of_shutdown is None:
# Hmmm, no shutdown. Use last link frame
tod_of_shutdown = link.frame_list[-1].datetime
if tod_of_second_attach is None:
# Hmmm, no second attach. Use first link frame time
tod_of_second_attach = link.frame_list[0].datetime
if init_stall:
dur = tod_of_shutdown - tod_of_second_attach
link.counts.initial_no_credit_duration = dur
sess.counts.initial_no_credit_duration += dur
conn_detail.counts.initial_no_credit_duration += dur
if credit_stall: # TODO: wrap
dur = tod_of_shutdown - tod_of_no_credit
link.counts.no_credit_duration += dur
sess.counts.no_credit_duration += dur
conn_detail.counts.no_credit_duration += dur
# record multiframe transfer that didn't complete
if multiframe_in_progress:
link.counts.incomplete += 1
sess.counts.incomplete += 1
conn_detail.counts.incomplete += 1
def show_html(self):
for conn in self.rtr.conn_list:
id = self.rtr.conn_id(conn)
conn_detail = self.rtr.details.conn_details[id]
conn_frames = self.rtr.conn_to_frame_map[id]
print("<a name=\"cd_%s\"></a>" % id)
# This lozenge shows/hides the connection's data
print("<a href=\"javascript:toggle_node('%s_data')\">%s%s</a>" %
(id, text.lozenge(), text.nbsp()))
dir = self.rtr.conn_dir[id] if id in self.rtr.conn_dir else ""
peer = self.rtr.conn_peer_display.get(id, "") # peer container id
peerconnid = self.comn.conn_peers_connid.get(id, "")
# show the connection title
print("%s %s %s %s (nFrames=%d) %s<br>" %
(id, dir, peerconnid, peer, len(conn_frames), conn_detail.counts.show_html()))
# data div
print("<div id=\"%s_data\" style=\"display:none; margin-bottom: 2px; margin-left: 10px\">" % id)
# unaccounted frames
print("<a href=\"javascript:toggle_node('%s_data_unacc')\">%s%s</a>" %
(id, text.lozenge(), text.nbsp()))
# show the connection-level frames
errs = sum(1 for plf in conn_detail.unaccounted_frame_list if plf.data.amqp_error)
print("Connection-based entries %s<br>" % self.format_errors(errs))
print("<div id=\"%s_data_unacc\" style=\"display:none; margin-bottom: 2px; margin-left: 10px\">" % id)
for plf in conn_detail.unaccounted_frame_list:
print(plf.adverbl_link_to(), plf.datetime, plf.data.direction, peer, plf.data.web_show_str, "<br>")
print("</div>") # end unaccounted frames
# loop to print session details
for sess in conn_detail.session_list:
# show the session 'toggle goto' and title
print("<a href=\"javascript:toggle_node('%s_sess_%s')\">%s%s</a>" %
(id, sess.conn_epoch, text.lozenge(), text.nbsp()))
print("Session %s: channel: %s, peer channel: %s; Time: start %s, Counts: frames: %d %s<br>" %
(sess.conn_epoch, sess.channel, sess.peer_chan, sess.time_start,
sess.FrameCount(), sess.counts.show_html()))
print("<div id=\"%s_sess_%s\" style=\"display:none; margin-bottom: 2px; margin-left: 10px\">" %
(id, sess.conn_epoch))
# show the session-level frames
errs = sum(1 for plf in sess.session_frame_list if plf.data.amqp_error)
print("<a href=\"javascript:toggle_node('%s_sess_%s_unacc')\">%s%s</a>" %
(id, sess.conn_epoch, text.lozenge(), text.nbsp()))
print("Session-based entries %s<br>" % self.format_errors(errs))
print("<div id=\"%s_sess_%s_unacc\" style=\"display:none; margin-bottom: 2px; margin-left: 10px\">" %
(id, sess.conn_epoch))
for plf in sess.session_frame_list:
print(plf.adverbl_link_to(), plf.datetime, plf.data.direction, peer, plf.data.web_show_str, "<br>")
print("</div>") # end <id>_sess_<conn_epoch>_unacc
# loops to print session link details
# first loop prints link table
print("<table")
print("<tr><th>Link</th> <th>Dir</th> <th>Role</th> <th>Address</th> <th>Class</th> "
"<th>snd-settle-mode</th> <th>rcv-settle-mode</th> <th>Start time</th> <th>Frames</th> "
"<th>Counts</th> </tr>")
for link in sess.link_list:
# show the link toggle and title
showthis = ("<a href=\"javascript:toggle_node('%s_sess_%s_link_%s')\">%s</a>" %
(id, sess.conn_epoch, link.session_seq, text.lozenge()))
visitthis = ("<a href=\"#%s_sess_%s_link_%s_data\">%s</a>" %
(id, sess.conn_epoch, link.session_seq, link.display_name))
role = "receiver" if link.is_receiver else "sender"
print("<tr><td>%s %s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td>"
"<td>%s</td><td>%d</td><td>%s</td> </tr>" %
(showthis, visitthis, link.direction, role, link.first_address,
(link.sender_class + '-' + link.receiver_class), link.snd_settle_mode,
link.rcv_settle_mode, link.time_start, link.FrameCount(),
link.counts.show_html()))
print("</table>")
# second loop prints the link's frames
for link in sess.link_list:
print(
"<div id=\"%s_sess_%s_link_%s\" style=\"display:none; margin-top: 2px; margin-bottom: 2px; margin-left: 10px\">" %
(id, sess.conn_epoch, link.session_seq))
print("<a name=\"%s_sess_%s_link_%s_data\"></a>" %
(id, sess.conn_epoch, link.session_seq))
print("<h4>Connection %s Session %s Link %s</h4>" %
(id, sess.conn_epoch, link.display_name))
for plf in link.frame_list:
print(plf.adverbl_link_to(), plf.datetime, plf.data.direction, peer, plf.data.web_show_str,
plf.data.disposition_display, "<br>")
print("</div>") # end link <id>_sess_<conn_epoch>_link_<sess_seq>
print("</div>") # end session <id>_sess_<conn_epoch>
print("</div>") # end current connection data
if __name__ == "__main__":
try:
pass
except:
traceback.print_exc(file=sys.stdout)
pass
| ted-ross/qpid-dispatch | tools/scraper/amqp_detail.py | Python | apache-2.0 | 50,089 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ConfirmSubscription
# Verifies that the endpoint owner wishes to receive messages by verifying the token sent during the Subscribe action.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ConfirmSubscription(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ConfirmSubscription Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ConfirmSubscription, self).__init__(temboo_session, '/Library/Amazon/SNS/ConfirmSubscription')
def new_input_set(self):
return ConfirmSubscriptionInputSet()
def _make_result_set(self, result, path):
return ConfirmSubscriptionResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ConfirmSubscriptionChoreographyExecution(session, exec_id, path)
class ConfirmSubscriptionInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ConfirmSubscription
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('AWSSecretKeyId', value)
def set_AuthenticateOnUnsubscribed(self, value):
"""
Set the value of the AuthenticateOnUnsubscribed input for this Choreo. ((optional, boolean) Indicates that you want to disable the ability to unsubscribe from the subscription without authenticating. Specify 1 to enable this flag.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('AuthenticateOnUnsubscribed', value)
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((required, string) The short-lived token sent to an endpoint during the Subscribe action.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('Token', value)
def set_TopicArn(self, value):
"""
Set the value of the TopicArn input for this Choreo. ((required, string) The ARN of the topic you want to confirm a subscription for.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('TopicArn', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SNS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(ConfirmSubscriptionInputSet, self)._set_input('UserRegion', value)
class ConfirmSubscriptionResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ConfirmSubscription Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon.)
"""
return self._output.get('Response', None)
class ConfirmSubscriptionChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ConfirmSubscriptionResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Amazon/SNS/ConfirmSubscription.py | Python | apache-2.0 | 4,739 |
"""
A place for code to be called from the implementation of np.dtype
String handling is much easier to do correctly in python.
"""
import numpy as np
_kind_to_stem = {
'u': 'uint',
'i': 'int',
'c': 'complex',
'f': 'float',
'b': 'bool',
'V': 'void',
'O': 'object',
'M': 'datetime',
'm': 'timedelta',
'S': 'bytes',
'U': 'str',
}
def _kind_name(dtype):
try:
return _kind_to_stem[dtype.kind]
except KeyError as e:
raise RuntimeError(
"internal dtype error, unknown kind {!r}"
.format(dtype.kind)
) from None
def __str__(dtype):
if dtype.fields is not None:
return _struct_str(dtype, include_align=True)
elif dtype.subdtype:
return _subarray_str(dtype)
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
return dtype.str
else:
return dtype.name
def __repr__(dtype):
arg_str = _construction_repr(dtype, include_align=False)
if dtype.isalignedstruct:
arg_str = arg_str + ", align=True"
return "dtype({})".format(arg_str)
def _unpack_field(dtype, offset, title=None):
"""
Helper function to normalize the items in dtype.fields.
Call as:
dtype, offset, title = _unpack_field(*dtype.fields[name])
"""
return dtype, offset, title
def _isunsized(dtype):
# PyDataType_ISUNSIZED
return dtype.itemsize == 0
def _construction_repr(dtype, include_align=False, short=False):
"""
Creates a string repr of the dtype, excluding the 'dtype()' part
surrounding the object. This object may be a string, a list, or
a dict depending on the nature of the dtype. This
is the object passed as the first parameter to the dtype
constructor, and if no additional constructor parameters are
given, will reproduce the exact memory layout.
Parameters
----------
short : bool
If true, this creates a shorter repr using 'kind' and 'itemsize', instead
of the longer type name.
include_align : bool
If true, this includes the 'align=True' parameter
inside the struct dtype construction dict when needed. Use this flag
if you want a proper repr string without the 'dtype()' part around it.
If false, this does not preserve the
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
struct arrays like the regular repr does, because the 'align'
flag is not part of first dtype constructor parameter. This
mode is intended for a full 'repr', where the 'align=True' is
provided as the second parameter.
"""
if dtype.fields is not None:
return _struct_str(dtype, include_align=include_align)
elif dtype.subdtype:
return _subarray_str(dtype)
else:
return _scalar_str(dtype, short=short)
def _scalar_str(dtype, short):
byteorder = _byte_order_str(dtype)
if dtype.type == np.bool_:
if short:
return "'?'"
else:
return "'bool'"
elif dtype.type == np.object_:
# The object reference may be different sizes on different
# platforms, so it should never include the itemsize here.
return "'O'"
elif dtype.type == np.string_:
if _isunsized(dtype):
return "'S'"
else:
return "'S%d'" % dtype.itemsize
elif dtype.type == np.unicode_:
if _isunsized(dtype):
return "'%sU'" % byteorder
else:
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
# unlike the other types, subclasses of void are preserved - but
# historically the repr does not actually reveal the subclass
elif issubclass(dtype.type, np.void):
if _isunsized(dtype):
return "'V'"
else:
return "'V%d'" % dtype.itemsize
elif dtype.type == np.datetime64:
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif dtype.type == np.timedelta64:
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif np.issubdtype(dtype, np.number):
# Short repr with endianness, like '<f8'
if short or dtype.byteorder not in ('=', '|'):
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
# Longer repr, like 'float64'
else:
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
elif dtype.isbuiltin == 2:
return dtype.type.__name__
else:
raise RuntimeError(
"Internal error: NumPy dtype unrecognized type number")
def _byte_order_str(dtype):
""" Normalize byteorder to '<' or '>' """
# hack to obtain the native and swapped byte order characters
swapped = np.dtype(int).newbyteorder('S')
native = swapped.newbyteorder('S')
byteorder = dtype.byteorder
if byteorder == '=':
return native.byteorder
if byteorder == 'S':
# TODO: this path can never be reached
return swapped.byteorder
elif byteorder == '|':
return ''
else:
return byteorder
def _datetime_metadata_str(dtype):
# TODO: this duplicates the C metastr_to_unicode functionality
unit, count = np.datetime_data(dtype)
if unit == 'generic':
return ''
elif count == 1:
return '[{}]'.format(unit)
else:
return '[{}{}]'.format(count, unit)
def _struct_dict_str(dtype, includealignedflag):
# unpack the fields dictionary into ls
names = dtype.names
fld_dtypes = []
offsets = []
titles = []
for name in names:
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
fld_dtypes.append(fld_dtype)
offsets.append(offset)
titles.append(title)
# Build up a string to make the dictionary
# First, the names
ret = "{'names':["
ret += ",".join(repr(name) for name in names)
# Second, the formats
ret += "], 'formats':["
ret += ",".join(
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
# Third, the offsets
ret += "], 'offsets':["
ret += ",".join("%d" % offset for offset in offsets)
# Fourth, the titles
if any(title is not None for title in titles):
ret += "], 'titles':["
ret += ",".join(repr(title) for title in titles)
# Fifth, the itemsize
ret += "], 'itemsize':%d" % dtype.itemsize
if (includealignedflag and dtype.isalignedstruct):
# Finally, the aligned flag
ret += ", 'aligned':True}"
else:
ret += "}"
return ret
def _is_packed(dtype):
"""
Checks whether the structured data type in 'dtype'
has a simple layout, where all the fields are in order,
and follow each other with no alignment padding.
When this returns true, the dtype can be reconstructed
from a list of the field names and dtypes with no additional
dtype parameters.
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
"""
total_offset = 0
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
if fld_offset != total_offset:
return False
total_offset += fld_dtype.itemsize
if total_offset != dtype.itemsize:
return False
return True
def _struct_list_str(dtype):
items = []
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
item = "("
if title is not None:
item += "({!r}, {!r}), ".format(title, name)
else:
item += "{!r}, ".format(name)
# Special case subarray handling here
if fld_dtype.subdtype is not None:
base, shape = fld_dtype.subdtype
item += "{}, {}".format(
_construction_repr(base, short=True),
shape
)
else:
item += _construction_repr(fld_dtype, short=True)
item += ")"
items.append(item)
return "[" + ", ".join(items) + "]"
def _struct_str(dtype, include_align):
# The list str representation can't include the 'align=' flag,
# so if it is requested and the struct has the aligned flag set,
# we must use the dict str instead.
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
sub = _struct_list_str(dtype)
else:
sub = _struct_dict_str(dtype, include_align)
# If the data type isn't the default, void, show it
if dtype.type != np.void:
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
else:
return sub
def _subarray_str(dtype):
base, shape = dtype.subdtype
return "({}, {})".format(
_construction_repr(base, short=True),
shape
)
def _name_includes_bit_suffix(dtype):
if dtype.type == np.object_:
# pointer size varies by system, best to omit it
return False
elif dtype.type == np.bool_:
# implied
return False
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
# unspecified
return False
else:
return True
def _name_get(dtype):
# provides dtype.name.__get__, documented as returning a "bit name"
if dtype.isbuiltin == 2:
# user dtypes don't promise to do anything special
return dtype.type.__name__
if issubclass(dtype.type, np.void):
# historically, void subclasses preserve their name, eg `record64`
name = dtype.type.__name__
else:
name = _kind_name(dtype)
# append bit counts
if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
# append metadata to datetimes
if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
| simongibbons/numpy | numpy/core/_dtype.py | Python | bsd-3-clause | 9,843 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
data = package_csv('commits', 'commits.txt.gz', parse_dates=True, header=None, names=['day', 'datetime'], index_col='datetime')
data = data.tz_localize('GMT').tz_convert('US/Central')
data['time'] = data.index.time
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
| dennisobrien/bokeh | bokeh/sampledata/commits.py | Python | bsd-3-clause | 2,129 |
import datetime
import uuid
from decimal import Decimal
from django.core import checks, exceptions, serializers
from django.core.serializers.json import DjangoJSONEncoder
from django.forms import CharField, Form, widgets
from django.test.utils import isolate_apps
from django.utils.html import escape
from . import PostgreSQLTestCase
from .models import JSONModel, PostgreSQLModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import JSONField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_null(self):
instance = JSONModel()
instance.save()
loaded = JSONModel.objects.get()
self.assertIsNone(loaded.field)
def test_empty_object(self):
instance = JSONModel(field={})
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, {})
def test_empty_list(self):
instance = JSONModel(field=[])
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, [])
def test_boolean(self):
instance = JSONModel(field=True)
instance.save()
loaded = JSONModel.objects.get()
self.assertIs(loaded.field, True)
def test_string(self):
instance = JSONModel(field='why?')
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 'why?')
def test_number(self):
instance = JSONModel(field=1)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 1)
def test_realistic_object(self):
obj = {
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
}
instance = JSONModel(field=obj)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, obj)
def test_custom_encoding(self):
"""
JSONModel.field_custom has a custom DjangoJSONEncoder.
"""
some_uuid = uuid.uuid4()
obj_before = {
'date': datetime.date(2016, 8, 12),
'datetime': datetime.datetime(2016, 8, 12, 13, 44, 47, 575981),
'decimal': Decimal('10.54'),
'uuid': some_uuid,
}
obj_after = {
'date': '2016-08-12',
'datetime': '2016-08-12T13:44:47.575',
'decimal': '10.54',
'uuid': str(some_uuid),
}
JSONModel.objects.create(field_custom=obj_before)
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field_custom, obj_after)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
JSONModel.objects.create(field=None),
JSONModel.objects.create(field=True),
JSONModel.objects.create(field=False),
JSONModel.objects.create(field='yes'),
JSONModel.objects.create(field=7),
JSONModel.objects.create(field=[]),
JSONModel.objects.create(field={}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
}),
JSONModel.objects.create(field=[1, [2]]),
JSONModel.objects.create(field={
'k': True,
'l': False,
}),
JSONModel.objects.create(field={'foo': 'bar'}),
]
def test_exact(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={}),
[self.objs[6]]
)
def test_exact_complex(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}),
[self.objs[7]]
)
def test_isnull(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__isnull=True),
[self.objs[0]]
)
def test_isnull_key(self):
# key__isnull works the same as has_key='key'.
self.assertSequenceEqual(
JSONModel.objects.filter(field__a__isnull=True),
self.objs[:7] + self.objs[9:]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__a__isnull=False),
[self.objs[7], self.objs[8]]
)
def test_contains(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contains={'a': 'b'}),
[self.objs[7], self.objs[8]]
)
def test_contained_by(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}),
[self.objs[6], self.objs[7]]
)
def test_has_key(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_key='a'),
[self.objs[7], self.objs[8]]
)
def test_has_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']),
[self.objs[8]]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_any_keys=['c', 'l']),
[self.objs[7], self.objs[8], self.objs[10]]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__0=1),
[self.objs[9]]
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__a='b'),
[self.objs[7], self.objs[8]]
)
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k__l='m'),
[self.objs[8]]
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k={'l': 'm'}),
[self.objs[8]]
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__1__0=2),
[self.objs[9]]
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__d__1__f='g'),
[self.objs[8]]
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__gt=1),
[]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__lt=5),
[self.objs[7], self.objs[8]]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)),
self.objs[7:9]
)
def test_iexact(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iexact='BaR').exists())
self.assertFalse(JSONModel.objects.filter(field__foo__iexact='"BaR"').exists())
def test_icontains(self):
self.assertFalse(JSONModel.objects.filter(field__foo__icontains='"bar"').exists())
def test_startswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__startswith='b').exists())
def test_istartswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__istartswith='B').exists())
def test_endswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__endswith='r').exists())
def test_iendswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iendswith='R').exists())
def test_regex(self):
self.assertTrue(JSONModel.objects.filter(field__foo__regex=r'^bar$').exists())
def test_iregex(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iregex=r'^bAr$').exists())
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = JSONField(default={})
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"JSONField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `dict` instead of `{}`.',
obj=MyModel._meta.get_field('field'),
id='postgres.E003',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = JSONField(default=dict)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = JSONField(default=None)
model = MyModel()
self.assertEqual(model.check(), [])
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": {"a": "b", "c": null}, "field_custom": null}, '
'"model": "postgres_tests.jsonmodel", "pk": null}]'
)
def test_dumping(self):
instance = JSONModel(field={'a': 'b', 'c': None})
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b', 'c': None})
class TestValidation(PostgreSQLTestCase):
def test_not_serializable(self):
field = JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(datetime.timedelta(days=1), None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "Value must be valid JSON.")
def test_custom_encoder(self):
with self.assertRaisesMessage(ValueError, "The encoder parameter must be a callable object."):
field = JSONField(encoder=DjangoJSONEncoder())
field = JSONField(encoder=DjangoJSONEncoder)
self.assertEqual(field.clean(datetime.timedelta(days=1), None), datetime.timedelta(days=1))
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_valid_empty(self):
field = forms.JSONField(required=False)
value = field.clean('')
self.assertIsNone(value)
def test_invalid(self):
field = forms.JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{some badly formed: json}')
self.assertEqual(cm.exception.messages[0], "'{some badly formed: json}' value must be valid JSON.")
def test_formfield(self):
model_field = JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_disabled(self):
class JsonForm(Form):
name = CharField()
jfield = forms.JSONField(disabled=True)
form = JsonForm({'name': 'xyz', 'jfield': '["bar"]'}, initial={'jfield': ['foo']})
self.assertIn('["foo"]</textarea>', form.as_p())
def test_prepare_value(self):
field = forms.JSONField()
self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), 'null')
self.assertEqual(field.prepare_value('foo'), '"foo"')
def test_redisplay_wrong_input(self):
"""
When displaying a bound form (typically due to invalid input), the form
should not overquote JSONField inputs.
"""
class JsonForm(Form):
name = CharField(max_length=2)
jfield = forms.JSONField()
# JSONField input is fine, name is too long
form = JsonForm({'name': 'xyz', 'jfield': '["foo"]'})
self.assertIn('["foo"]</textarea>', form.as_p())
# This time, the JSONField input is wrong
form = JsonForm({'name': 'xy', 'jfield': '{"foo"}'})
# Appears once in the textarea and once in the error message
self.assertEqual(form.as_p().count(escape('{"foo"}')), 2)
def test_widget(self):
"""The default widget of a JSONField is a Textarea."""
field = forms.JSONField()
self.assertIsInstance(field.widget, widgets.Textarea)
def test_custom_widget_kwarg(self):
"""The widget can be overridden with a kwarg."""
field = forms.JSONField(widget=widgets.Input)
self.assertIsInstance(field.widget, widgets.Input)
def test_custom_widget_attribute(self):
"""The widget can be overridden with an attribute."""
class CustomJSONField(forms.JSONField):
widget = widgets.Input
field = CustomJSONField()
self.assertIsInstance(field.widget, widgets.Input)
def test_already_converted_value(self):
field = forms.JSONField(required=False)
tests = [
'["a", "b", "c"]', '{"a": 1, "b": 2}', '1', '1.5', '"foo"',
'true', 'false', 'null',
]
for json_string in tests:
val = field.clean(json_string)
self.assertEqual(field.clean(val), val)
def test_has_changed(self):
field = forms.JSONField()
self.assertIs(field.has_changed({'a': True}, '{"a": 1}'), True)
self.assertIs(field.has_changed({'a': 1, 'b': 2}, '{"b": 2, "a": 1}'), False)
| uranusjr/django | tests/postgres_tests/test_json.py | Python | bsd-3-clause | 13,888 |
#-*- coding: utf-8 -*-
import inspect
from django import forms
from django.conf import settings as globalsettings
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from filer.utils.compatibility import truncate_words
from filer.models import File
from filer import settings as filer_settings
import logging
logger = logging.getLogger(__name__)
class AdminFileWidget(ForeignKeyRawIdWidget):
choices = None
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id', 'id_image_x')
css_id_thumbnail_img = "%s_thumbnail_img" % css_id
css_id_description_txt = "%s_description_txt" % css_id
related_url = None
if value:
try:
file_obj = File.objects.get(pk=value)
related_url = file_obj.logical_folder.\
get_admin_directory_listing_url_path()
except Exception,e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while rendering file widget: %s',e)
if filer_settings.FILER_DEBUG:
raise e
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
if params:
lookup_url = '?' + '&'.join(
['%s=%s' % (k, v) for k, v in params.items()])
else:
lookup_url = ''
if not 'class' in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
# rendering the super for ForeignKeyRawIdWidget on purpose here because
# we only need the input and none of the other stuff that
# ForeignKeyRawIdWidget adds
hidden_input = super(ForeignKeyRawIdWidget, self).render(
name, value, attrs)
filer_static_prefix = filer_settings.FILER_STATICMEDIA_PREFIX
if not filer_static_prefix[-1] == '/':
filer_static_prefix += '/'
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, lookup_url),
'thumb_id': css_id_thumbnail_img,
'span_id': css_id_description_txt,
'object': obj,
'lookup_name': name,
'filer_static_prefix': filer_static_prefix,
'clear_id': '%s_clear' % css_id,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_file.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
js = (filer_settings.FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)
class AdminFileFormField(forms.ModelChoiceField):
widget = AdminFileWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
other_widget = kwargs.pop('widget', None)
if 'admin_site' in inspect.getargspec(self.widget.__init__)[0]: # Django 1.4
widget_instance = self.widget(rel, site)
else: # Django <= 1.3
widget_instance = self.widget(rel)
forms.Field.__init__(self, widget=widget_instance, *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFileField(models.ForeignKey):
default_form_class = AdminFileFormField
default_model_class = File
def __init__(self, **kwargs):
# we call ForeignKey.__init__ with the Image model as parameter...
# a FilerImageFiled can only be a ForeignKey to a Image
return super(FilerFileField, self).__init__(
self.default_model_class, **kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFileField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| thomasbilk/django-filer | filer/fields/file.py | Python | bsd-3-clause | 5,490 |
##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
class TestToHoudiniCurvesConverter( IECoreHoudini.TestCase ) :
__testScene = "test/converterTest.hip"
__curveCoordinates = [
IECore.V3fVectorData( [ IECore.V3f( 2.42892,0,-1.04096 ), IECore.V3f( 1.69011,0,-9.88746 ), IECore.V3f( 5.74288,0,-4.50183 ), IECore.V3f( 2.69113,0,-2.78439 ), IECore.V3f( 5.8923,0,1.53021 ), IECore.V3f( 6.20965,-9.53674e-07,2.03933 ), IECore.V3f( 2.72012,0,2.5738 ), IECore.V3f( 1.76971,0,-0.632637 ) ] ),
IECore.V3fVectorData( [ IECore.V3f( -0.560781,0,-1.04096 ), IECore.V3f( 2.21995,0,-6.31734 ), IECore.V3f( 4.77513,0,-6.61752 ), IECore.V3f( 4.10862,0,-2.78439 ), IECore.V3f( 4.29081,0,1.53021 ), IECore.V3f( 6.20965,-9.53674e-07,3.7489 ), IECore.V3f( -2.61584,0,2.5738 ), IECore.V3f( -1.45801,0,0.780965 ) ] ),
IECore.V3fVectorData( [ IECore.V3f( 2.42892,0,-1.04096 ), IECore.V3f( 2.21995,0,-4.51254 ), IECore.V3f( 4.77513,0,-4.50183 ), IECore.V3f( 6.32944,0,-2.78439 ), IECore.V3f( 7.231,0,1.53021 ), IECore.V3f( 6.20965,-9.53674e-07,3.7489 ), IECore.V3f( 2.72012,0,2.5738 ), IECore.V3f( 1.76971,0,0.780965 ) ] ),
IECore.V3fVectorData( [ IECore.V3f( 5.83427,0,-1.04096 ), IECore.V3f( 2.21995,0,-4.51254 ), IECore.V3f( 6.14141,0,-4.50183 ), IECore.V3f( 7.48932,0,-2.78439 ), IECore.V3f( 9.0197,0,1.53021 ), IECore.V3f( 6.20965,-9.53674e-07,1.2141 ), IECore.V3f( 2.72012,0,2.5738 ), IECore.V3f( 3.23728,0,0.780965 ) ] )
]
def curves( self, basis=IECore.CubicBasisf.linear(), periodic=False, numCurves=4 ) :
vertsPerCurve = IECore.IntVectorData()
pData = IECore.V3fVectorData()
pData.setInterpretation( IECore.GeometricData.Interpretation.Point )
for i in range( 0, numCurves ) :
p = TestToHoudiniCurvesConverter.__curveCoordinates[i%4]
if not periodic and basis == IECore.CubicBasisf.bSpline() :
vertsPerCurve.append( len(p) + 4 )
else :
vertsPerCurve.append( len(p) )
pData.extend( p )
curves = IECore.CurvesPrimitive( vertsPerCurve, basis, periodic )
floatData = IECore.FloatData( 1.5 )
v2fData = IECore.V2fData( IECore.V2f( 1.5, 2.5 ) )
v3fData = IECore.V3fData( IECore.V3f( 1.5, 2.5, 3.5 ) )
color3fData = IECore.Color3fData( IECore.Color3f( 1.5, 2.5, 3.5 ) )
intData = IECore.IntData( 1 )
v2iData = IECore.V2iData( IECore.V2i( 1, 2 ) )
v3iData = IECore.V3iData( IECore.V3i( 1, 2, 3 ) )
stringData = IECore.StringData( "this is a string" )
intRange = range( 1, pData.size()+1 )
floatVectorData = IECore.FloatVectorData( [ x+0.5 for x in intRange ] )
v2fVectorData = IECore.V2fVectorData( [ IECore.V2f( x, x+0.5 ) for x in intRange ] )
v3fVectorData = IECore.V3fVectorData( [ IECore.V3f( x, x+0.5, x+0.75 ) for x in intRange ] )
color3fVectorData = IECore.Color3fVectorData( [ IECore.Color3f( x, x+0.5, x+0.75 ) for x in intRange ] )
intVectorData = IECore.IntVectorData( intRange )
v2iVectorData = IECore.V2iVectorData( [ IECore.V2i( x, -x ) for x in intRange ] )
v3iVectorData = IECore.V3iVectorData( [ IECore.V3i( x, -x, x*2 ) for x in intRange ] )
stringVectorData = IECore.StringVectorData( [ "string number %d!" % x for x in intRange ] )
detailInterpolation = IECore.PrimitiveVariable.Interpolation.Constant
pointInterpolation = IECore.PrimitiveVariable.Interpolation.Vertex
primitiveInterpolation = IECore.PrimitiveVariable.Interpolation.Uniform
# add all valid detail attrib types
curves["floatDetail"] = IECore.PrimitiveVariable( detailInterpolation, floatData )
curves["v2fDetail"] = IECore.PrimitiveVariable( detailInterpolation, v2fData )
curves["v3fDetail"] = IECore.PrimitiveVariable( detailInterpolation, v3fData )
curves["color3fDetail"] = IECore.PrimitiveVariable( detailInterpolation, color3fData )
curves["intDetail"] = IECore.PrimitiveVariable( detailInterpolation, intData )
curves["v2iDetail"] = IECore.PrimitiveVariable( detailInterpolation, v2iData )
curves["v3iDetail"] = IECore.PrimitiveVariable( detailInterpolation, v3iData )
curves["stringDetail"] = IECore.PrimitiveVariable( detailInterpolation, stringData )
# add all valid point attrib types
if not periodic and basis == IECore.CubicBasisf.bSpline() :
modPData = IECore.V3fVectorData()
modPData.setInterpretation( IECore.GeometricData.Interpretation.Point )
floatPointData = IECore.FloatVectorData()
v2fPointData = IECore.V2fVectorData()
v3fPointData = IECore.V3fVectorData()
color3fPointData = IECore.Color3fVectorData()
intPointData = IECore.IntVectorData()
v2iPointData = IECore.V2iVectorData()
v3iPointData = IECore.V3iVectorData()
stringPointData = IECore.StringVectorData()
datas = [ modPData, floatPointData, v2fPointData, v3fPointData, color3fPointData, intPointData, v2iPointData, v3iPointData, stringPointData ]
rawDatas = [ pData, floatVectorData, v2fVectorData, v3fVectorData, color3fVectorData, intVectorData, v2iVectorData, v3iVectorData, stringVectorData ]
pIndex = 0
for i in range( 0, numCurves ) :
for j in range( 0, len(datas) ) :
index = 8*i
datas[j].extend( [ rawDatas[j][index], rawDatas[j][index] ] )
datas[j].extend( rawDatas[j][index:index+8] )
datas[j].extend( [ rawDatas[j][index+7], rawDatas[j][index+7] ] )
curves["P"] = IECore.PrimitiveVariable( pointInterpolation, modPData )
curves["floatPoint"] = IECore.PrimitiveVariable( pointInterpolation, floatPointData )
curves["v2fPoint"] = IECore.PrimitiveVariable( pointInterpolation,v2fPointData )
curves["v3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3fPointData )
curves["color3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, color3fPointData )
curves["intPoint"] = IECore.PrimitiveVariable( pointInterpolation, intPointData )
curves["v2iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v2iPointData )
curves["v3iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3iPointData )
else :
curves["P"] = IECore.PrimitiveVariable( pointInterpolation, pData )
curves["floatPoint"] = IECore.PrimitiveVariable( pointInterpolation, floatVectorData[:8*numCurves] )
curves["v2fPoint"] = IECore.PrimitiveVariable( pointInterpolation, v2fVectorData[:8*numCurves] )
curves["v3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3fVectorData[:8*numCurves] )
curves["color3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, color3fVectorData[:8*numCurves] )
curves["intPoint"] = IECore.PrimitiveVariable( pointInterpolation, intVectorData[:8*numCurves] )
curves["v2iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v2iVectorData[:8*numCurves] )
curves["v3iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3iVectorData[:8*numCurves] )
curves["stringPoint"] = IECore.PrimitiveVariable( detailInterpolation, stringVectorData[:8*numCurves] )
curves["stringPointIndices"] = IECore.PrimitiveVariable( pointInterpolation, IECore.IntVectorData( range( 0, 8*numCurves ) ) )
# add all valid primitive attrib types
curves["floatPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, floatVectorData[:numCurves] )
curves["v2fPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, v2fVectorData[:numCurves] )
curves["v3fPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, v3fVectorData[:numCurves] )
curves["color3fPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, color3fVectorData[:numCurves] )
curves["intPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, intVectorData[:numCurves] )
curves["v2iPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, v2iVectorData[:numCurves] )
curves["v3iPrim"] = IECore.PrimitiveVariable( primitiveInterpolation, v3iVectorData[:numCurves] )
curves["stringPrim"] = IECore.PrimitiveVariable( detailInterpolation, stringVectorData[:numCurves] )
curves["stringPrimIndices"] = IECore.PrimitiveVariable( primitiveInterpolation, IECore.IntVectorData( range( 0, numCurves ) ) )
self.assert_( curves.arePrimitiveVariablesValid() )
return curves
def emptySop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
null = geo.createNode( "null" )
return null
def curveSop( self, order=2, periodic=False, parent=None, coordIndex=0 ) :
if not parent :
obj = hou.node("/obj")
parent = obj.createNode("geo", run_init_scripts=False)
curve = parent.createNode( "curve" )
curve.parm( "type" ).set( 1 ) # NURBS
curve.parm( "order" ).set( order )
curve.parm( "close" ).set( periodic )
coordStr = ""
coords = TestToHoudiniCurvesConverter.__curveCoordinates[coordIndex]
for p in coords :
coordStr += "%f,%f,%f " % ( p[0], p[1], p[2] )
curve.parm( "coords" ).set( coordStr )
return curve
def curvesSop( self, numCurves=4, order=2, periodic=False ) :
curves = [ self.curveSop( order, periodic ) ]
geo = curves[0].parent()
for i in range( 0, numCurves-1 ) :
curves.append( self.curveSop( order, periodic, geo, i%4 ) )
merge = geo.createNode( "merge" )
for i in range( 0, len(curves) ) :
merge.setInput( i, curves[i] )
return merge
def comparePrimAndSop( self, prim, sop ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[i].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[i].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPointIndices"].data
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
self.assertEqual( len(sopPrims), prim.numCurves() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[i].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[i].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrimIndices"].data
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[i].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
self.assertEqual( len(verts), prim.verticesPerCurve()[i] )
verts.reverse()
sopVerts.extend( verts )
self.assertEqual( len(sopVerts), prim["P"].data.size() )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
self.assertEqual( result.verticesPerCurve(), prim.verticesPerCurve() )
self.assertEqual( result.keys(), prim.keys() )
for key in prim.keys() :
self.assertEqual( result[key], prim[key] )
self.assertEqual( result, prim )
def compareOpenSplinePrimAndSop( self, prim, sop ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPrims = geo.prims()
pIndex = 0
for i in range( prim.numCurves() ) :
hVertices = sopPrims[i].vertices()
self.assertEqual( len(hVertices) + 4, prim.verticesPerCurve()[i] )
for j in range( len(hVertices) ) :
for attr in geo.pointAttribs() :
if attr.name() == "Pw" :
continue
data = prim[attr.name()].data
if attr.name() in [ "floatPoint", "intPoint" ] :
self.assertEqual( data[pIndex], hVertices[j].point().attribValue( attr.name() ) )
else :
self.assertEqual( tuple(data[pIndex]), hVertices[j].point().attribValue( attr.name() ) )
if ( j == 0 or j == len(hVertices)-1 ) :
if attr.name() in [ "floatPoint", "intPoint" ] :
self.assertEqual( data[pIndex+1], hVertices[j].point().attribValue( attr.name() ) )
self.assertEqual( data[pIndex+2], hVertices[j].point().attribValue( attr.name() ) )
else :
self.assertEqual( tuple(data[pIndex+1]), hVertices[j].point().attribValue( attr.name() ) )
self.assertEqual( tuple(data[pIndex+2]), hVertices[j].point().attribValue( attr.name() ) )
if ( j == 0 or j == len(hVertices)-1 ) :
pIndex += 3
else :
pIndex += 1
self.assertEqual( len(sopPrims), prim.numCurves() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[i].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[i].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrimIndices"].data
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[i].attribValue( "stringPrim" ) )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
self.assertEqual( result.verticesPerCurve(), prim.verticesPerCurve() )
self.assertEqual( result.keys(), prim.keys() )
for key in prim.keys() :
self.assertEqual( result[key], prim[key] )
self.assertEqual( result, prim )
def comparePrimAndAppendedSop( self, prim, sop, origSopPrim, multipleConversions=False ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail" ] :
self.assertEqual( tuple(prim[key].data.value), geo.attribValue( key ) )
sopPoints = geo.points()
numPoints = prim.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
origNumPoints = origSopPrim.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( len(sopPoints), origNumPoints + numPoints )
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValue[i], sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[ origNumPoints + i ].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint" ] :
data = prim[key].data
if multipleConversions or key is "P" :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( tuple(defaultValue[i]), sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPoints[ origNumPoints + i ].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPointIndices"].data
if multipleConversions :
defaultIndices = origSopPrim["stringPointIndices"].data
for i in range( 0, origNumPoints ) :
val = "" if ( defaultIndices[i] >= data.size() ) else data[ defaultIndices[i] ]
self.assertEqual( val, sopPoints[ i ].attribValue( "stringPoint" ) )
else :
defaultValues = [ "" ] * origNumPoints
for i in range( 0, origNumPoints ) :
self.assertEqual( defaultValues[i], sopPoints[ i ].attribValue( "stringPoint" ) )
sopPrims = geo.prims()
origNumPrims = origSopPrim.numCurves()
self.assertEqual( len(sopPrims), origNumPrims + prim.numCurves() )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValue[i], sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[ origNumPrims + i ].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( tuple(defaultValue[i]), sopPrims[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( tuple(data[i]), sopPrims[ origNumPrims + i ].attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrimIndices"].data
if multipleConversions :
defaultIndices = origSopPrim["stringPrimIndices"].data
for i in range( 0, origNumPrims ) :
val = "" if ( defaultIndices[i] >= data.size() ) else data[ defaultIndices[i] ]
self.assertEqual( val, sopPrims[ i ].attribValue( "stringPrim" ) )
else :
defaultValues = [ "" ] * origNumPrims
for i in range( 0, origNumPrims ) :
self.assertEqual( defaultValues[i], sopPrims[ i ].attribValue( "stringPrim" ) )
sopVerts = []
for i in range( 0, len(sopPrims) ) :
verts = list(sopPrims[i].vertices())
verts.reverse()
sopVerts.extend( verts )
if i > origNumPrims :
self.assertEqual( len(verts), prim.verticesPerCurve()[i-origNumPrims] )
self.assertEqual( len(sopVerts), origSopPrim["P"].data.size() + prim["P"].data.size() )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
self.assertEqual( result.verticesPerCurve()[origNumPrims:], prim.verticesPerCurve() )
for key in prim.keys() :
self.assert_( key in result.keys() )
def testCreateConverter( self ) :
converter = IECoreHoudini.ToHoudiniCurvesConverter( self.curves() )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniCurvesConverter ) ) )
def testFactory( self ) :
converter = IECoreHoudini.ToHoudiniGeometryConverter.create( self.curves() )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniCurvesConverter ) ) )
self.failUnless( IECore.TypeId.CurvesPrimitive in IECoreHoudini.ToHoudiniGeometryConverter.supportedTypes() )
def testLinearConversion( self ) :
sop = self.emptySop()
curves = self.curves()
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( periodic=True )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( numCurves=1 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( periodic=True, numCurves=1 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( numCurves=100 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( periodic=True, numCurves=100 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
def testSplineConversion( self ) :
sop = self.emptySop()
spline = IECore.CubicBasisf.bSpline()
curves = self.curves( basis=spline )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.compareOpenSplinePrimAndSop( curves, sop )
curves = self.curves( basis=spline, periodic=True )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( basis=spline, numCurves=1 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.compareOpenSplinePrimAndSop( curves, sop )
curves = self.curves( basis=spline, periodic=True, numCurves=1 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
curves = self.curves( basis=spline, numCurves=100 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.compareOpenSplinePrimAndSop( curves, sop )
curves = self.curves( basis=spline, periodic=True, numCurves=100 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.comparePrimAndSop( curves, sop )
def testConversionIntoExistingSop( self ) :
curves = self.curves()
sop = self.curvesSop()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
self.assertNotEqual( orig, curves )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, False ) )
self.comparePrimAndSop( curves, sop )
def testAppendingIntoExistingSop( self ) :
curves = self.curves()
curvesNumPoints = curves.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
sop = self.curvesSop()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
origNumPoints = orig.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, curves )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assert_( "floatDetail" not in result.keys() )
self.assert_( "floatPoint" not in result.keys() )
def testAppendingIntoLockedSop( self ) :
curves = self.curves()
curvesNumPoints = curves.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
sop = self.curvesSop()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
origNumPoints = orig.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, curves )
sop.setHardLocked( True )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints )
self.assert_( "floatDetail" not in result.keys() )
self.assert_( "floatPoint" not in result.keys() )
def testSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
curves = self.curves()
curvesNumPoints = curves.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
sop = self.curvesSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
origNumPoints = orig.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, curves )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
hou.hipFile.save( TestToHoudiniCurvesConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniCurvesConverter.__testScene )
newSop = hou.node( sopPath )
self.assert_( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, newSop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
def testSaveLoadWithLockedSop( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
curves = self.curves()
curvesNumPoints = curves.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
sop = self.curvesSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
origNumPoints = orig.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, curves )
sop.setHardLocked( True )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
hou.hipFile.save( TestToHoudiniCurvesConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniCurvesConverter.__testScene )
newSop = hou.node( sopPath )
self.assert_( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, newSop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( newSop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
def testMultipleConversions( self ) :
curves = self.curves()
curvesNumPoints = curves.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
sop = self.curvesSop()
orig = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
origNumPoints = orig.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertNotEqual( orig, curves )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, orig )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 2*curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + curvesNumPoints + i ], curves["P"].data[i] )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( curves, sop, result, multipleConversions=True )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
resultNumPoints = result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( resultNumPoints, origNumPoints + 3*curvesNumPoints )
for i in range( 0, curves["P"].data.size() ) :
self.assertEqual( result["P"].data[ origNumPoints + i ], curves["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + curvesNumPoints + i ], curves["P"].data[i] )
self.assertEqual( result["P"].data[ origNumPoints + 2*curvesNumPoints + i ], curves["P"].data[i] )
def testObjectWasDeleted( self ) :
curves = self.curves()
sop = self.curvesSop()
converter = IECoreHoudini.ToHoudiniCurvesConverter( curves )
self.assert_( converter.convert( sop, False ) )
self.comparePrimAndSop( curves, sop )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
del curves
sop.setHardLocked( False )
self.assertNotEqual( IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert(), result )
self.assert_( converter.convert( sop, False ) )
self.assertEqual( IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert(), result )
def testWithUnacceptablePrimVars( self ) :
curves = self.curves()
curves["badDetail"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.TransformationMatrixfData() )
curves["badPoint"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
curves["badPrim"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
curves["badVert"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.FaceVarying, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
sop = self.emptySop()
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
self.assert_( "badDetail" not in [ x.name() for x in sop.geometry().globalAttribs() ] )
self.assert_( "badPoint" not in [ x.name() for x in sop.geometry().pointAttribs() ] )
self.assert_( "badPrim" not in [ x.name() for x in sop.geometry().primAttribs() ] )
self.assert_( "badVert" not in [ x.name() for x in sop.geometry().vertexAttribs() ] )
result = IECoreHoudini.FromHoudiniCurvesConverter( sop ).convert()
self.assertNotEqual( result, curves )
self.assert_( "badDetail" not in result )
self.assert_( "badPoint" not in result )
self.assert_( "badPrim" not in result )
self.assert_( "badVert" not in result )
del curves["badDetail"]
del curves["badPoint"]
del curves["badPrim"]
del curves["badVert"]
self.comparePrimAndSop( curves, sop )
def testConvertingOverExistingAttribs( self ) :
curves = self.curves()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 0 ) # float
detailAttr.parm( "size" ).set( 1 ) # 1 element
detailAttr.parm( "value1" ).set( 123.456 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 0 ) # float
pointAttr.parm( "size" ).set( 1 ) # 1 element
pointAttr.parm( "value1" ).set( 123.456 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # prim
primAttr.parm( "type" ).set( 0 ) # float
primAttr.parm( "size" ).set( 1 ) # 1 element
primAttr.parm( "value1" ).set( 123.456 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( primAttr ) )
self.comparePrimAndSop( curves, primAttr )
def testConvertingOverExistingAttribsWithDifferentTypes( self ) :
curves = self.curves()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 1 ) # int
detailAttr.parm( "size" ).set( 3 ) # 3 elements
detailAttr.parm( "value1" ).set( 10 )
detailAttr.parm( "value2" ).set( 11 )
detailAttr.parm( "value3" ).set( 12 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 1 ) # point
pointAttr.parm( "type" ).set( 1 ) # int
pointAttr.parm( "size" ).set( 3 ) # 3 elements
pointAttr.parm( "value1" ).set( 10 )
pointAttr.parm( "value2" ).set( 11 )
pointAttr.parm( "value3" ).set( 12 )
primAttr = pointAttr.createOutputNode( "attribcreate", exact_type_name=True )
primAttr.parm( "name" ).set( "floatPrim" )
primAttr.parm( "class" ).set( 1 ) # point
primAttr.parm( "type" ).set( 1 ) # int
primAttr.parm( "size" ).set( 3 ) # 3 elements
primAttr.parm( "value1" ).set( 10 )
primAttr.parm( "value2" ).set( 11 )
primAttr.parm( "value3" ).set( 12 )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( primAttr ) )
self.comparePrimAndSop( curves, primAttr )
def testVertAttribsCantBeConverted( self ) :
curves = self.curves()
curves["floatVert"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( 1 ) )
sop = self.emptySop()
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
allAttribs = [ x.name() for x in sop.geometry().globalAttribs() ]
allAttribs.extend( [ x.name() for x in sop.geometry().pointAttribs() ] )
allAttribs.extend( [ x.name() for x in sop.geometry().primAttribs() ] )
allAttribs.extend( [ x.name() for x in sop.geometry().vertexAttribs() ] )
self.assert_( "floatVert" not in allAttribs )
del curves["floatVert"]
self.comparePrimAndSop( curves, sop )
def testBadCurve( self ) :
curves = IECore.CurvesPrimitive( IECore.IntVectorData( [ 7 ] ), IECore.CubicBasisf.bSpline(), False )
curves['P'] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ IECore.V3f( 0 ), IECore.V3f( 0 ), IECore.V3f( 0 ), IECore.V3f( 1 ), IECore.V3f( 2 ), IECore.V3f( 2 ), IECore.V3f( 2 ) ] ) )
self.failUnless( curves.arePrimitiveVariablesValid() )
sop = self.emptySop()
self.assertFalse( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
def testName( self ) :
sop = self.emptySop()
curves = self.curves()
curves.blindData()["name"] = IECore.StringData( "testGroup" )
self.assert_( IECoreHoudini.ToHoudiniCurvesConverter( curves ).convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testGroup" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testGroup" ]), curves.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
def testAttributeFilter( self ) :
curves = self.curves()
sop = self.emptySop()
converter = IECoreHoudini.ToHoudiniCurvesConverter( curves )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'floatPoint', 'intPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'intPrim', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'floatDetail', 'intDetail', 'stringDetail', 'v2fDetail', 'v2iDetail', 'v3fDetail', 'v3iDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "P *3f*" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'v3fPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'v3fPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'v3fDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^*Detail ^int*" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'floatPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# verify we can filter uvs
for key in curves.keys() :
if key != "P" :
del curves[key]
rand = IECore.Rand32()
curves["s"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ rand.nextf() for x in range( 0, 32 ) ] ) )
curves["t"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ rand.nextf() for x in range( 0, 32 ) ] ) )
curves["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ IECore.V3f( 1, 0, 0 ) ] * 4, IECore.GeometricData.Interpretation.Color ) )
curves["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 32 ) )
curves["Pref"] = curves["P"]
converter = IECoreHoudini.ToHoudiniCurvesConverter( curves )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
# have to filter the source attrs s, t and not uv
converter.parameters()["attributeFilter"].setTypedValue( "* ^uv ^pscale ^rest" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest', 'uv'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^s ^t ^width ^Pref" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^s ^width ^Cs" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'rest', 't'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
def testStandardAttributeConversion( self ) :
sop = self.emptySop()
curves = self.curves()
for key in curves.keys() :
if key != "P" :
del curves[key]
rand = IECore.Rand32()
curves["s"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ rand.nextf() for x in range( 0, 32 ) ] ) )
curves["t"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ rand.nextf() for x in range( 0, 32 ) ] ) )
curves["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ IECore.V3f( 1, 0, 0 ) ] * 4, IECore.GeometricData.Interpretation.Color ) )
curves["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 32 ) )
curves["Pref"] = curves["P"]
self.assertTrue( curves.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniCurvesConverter( curves )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertEqual( sorted([ x.name() for x in geo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest', 'uv'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
sData = curves["s"].data
tData = curves["t"].data
uvs = geo.findPointAttrib( "uv" )
i = 0
for point in geo.points() :
uvValues = point.attribValue( uvs )
self.assertAlmostEqual( uvValues[0], sData[i] )
self.assertAlmostEqual( uvValues[1], 1 - tData[i] )
i += 1
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
geo = sop.geometry()
self.assertEqual( sorted([ x.name() for x in geo.pointAttribs() ]), ['P', 'Pref', 'Pw', 's', 't', 'width'] )
self.assertEqual( sorted([ x.name() for x in geo.primAttribs() ]), ['Cs'] )
self.assertEqual( sorted([ x.name() for x in geo.vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in geo.globalAttribs() ]), [] )
i = 0
s = geo.findPointAttrib( "s" )
t = geo.findPointAttrib( "t" )
for point in geo.points() :
self.assertAlmostEqual( point.attribValue( s ), sData[i] )
self.assertAlmostEqual( point.attribValue( t ), tData[i] )
i += 1
def tearDown( self ) :
if os.path.isfile( TestToHoudiniCurvesConverter.__testScene ) :
os.remove( TestToHoudiniCurvesConverter.__testScene )
if __name__ == "__main__":
unittest.main()
| code-google-com/cortex-vfx | test/IECoreHoudini/ToHoudiniCurvesConverter.py | Python | bsd-3-clause | 45,988 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import sys
from PyQt4 import Qt
from PyQt4 import QtCore
from PyQt4 import QtGui
class MyDialog(QtGui.QDialog):
def __init__(self):
super(MyDialog, self).__init__()
self.label = Qt.QLabel(
u"Press <ESC> to exit. Some non-ascii chars: řčšěíáŘ",
self)
self.setWindowTitle("Hello World from PyQt4")
#self.resize(500, 300)
self.show()
def sizeHint(self):
return self.label.sizeHint()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
def main():
app = Qt.QApplication(sys.argv)
read_formats = ', '.join([unicode(format).lower() \
for format in QtGui.QImageReader.supportedImageFormats()])
print("Qt4 plugin paths: " + unicode(list(app.libraryPaths())))
print("Qt4 image read support: " + read_formats)
print('Qt4 Libraries path: ' + unicode(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.LibrariesPath)))
ex = MyDialog()
app.exec_()
if __name__ == "__main__":
main()
| bl4ckdu5t/registron | tests/interactive/test_pyqt4.py | Python | mit | 1,490 |
# coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
)
| gmist/ctm-5studio | main/auth/twitter.py | Python | mit | 1,468 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import scope
from .. import scpi
from .. import extra
AcquisitionTypeMapping = {
'normal': 'norm',
'peak_detect': 'peak',
'high_resolution': 'hres',
'average': 'aver'}
VerticalCoupling = set(['ac', 'dc', 'gnd'])
InputImpedance = set([1000000, 50, 'gnd'])
# Bandwidth Limits, OFF = none, ON = 20 MHz, 200MHZ = 200 MHz
BandwidthLimit = set(['OFF', 'ON', '200MHZ'])
TriggerModes = set(['auto', 'norm', 'single', 'stop'])
TriggerTypes = set(
['drop', 'edge', 'ev', 'glit', 'ht', 'hv', 'hv2', 'il', 'intv', 'is', 'i2', 'off', 'pl', 'ps', 'p2', 'ql', 'sng',
'sq', 'sr', 'teq', 'ti', 'tl'])
TriggerCouplingMapping = {
'ac': ('ac', 0, 0),
'dc': ('dc', 0, 0),
'hf_reject': ('dc', 0, 1),
'lf_reject': ('lfr', 0, 0),
'noise_reject': ('dc', 1, 0),
'hf_reject_ac': ('ac', 0, 1),
'noise_reject_ac': ('ac', 1, 0),
'hf_noise_reject': ('dc', 1, 1),
'hf_noise_reject_ac': ('ac', 1, 1),
'lf_noise_reject': ('lfr', 1, 0)}
TVTriggerEventMapping = {'field1': 'fie1',
'field2': 'fie2',
'any_field': 'afi',
'any_line': 'alin',
'line_number': 'lfi1',
'vertical': 'vert',
'line_field1': 'lfi1',
'line_field2': 'lfi2',
'line': 'line',
'line_alternate': 'lalt',
'lvertical': 'lver'}
TVTriggerFormatMapping = {'generic': 'gen',
'ntsc': 'ntsc',
'pal': 'pal',
'palm': 'palm',
'secam': 'sec',
'p480l60hz': 'p480',
'p480': 'p480',
'p720l60hz': 'p720',
'p720': 'p720',
'p1080l24hz': 'p1080',
'p1080': 'p1080',
'p1080l25hz': 'p1080l25hz',
'p1080l50hz': 'p1080l50hz',
'p1080l60hz': 'p1080l60hz',
'i1080l50hz': 'i1080l50hz',
'i1080': 'i1080l50hz',
'i1080l60hz': 'i1080l60hz'}
PolarityMapping = {'positive': 'pos',
'negative': 'neg'}
GlitchConditionMapping = {'less_than': 'less',
'greater_than': 'gre'}
WidthConditionMapping = {'within': 'rang'}
SampleModeMapping = {'real_time': 'rtim',
'equivalent_time': 'etim'}
SlopeMapping = {
'positive': 'pos',
'negative': 'neg',
'either': 'eith',
'alternating': 'alt'}
MeasurementFunctionMapping = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'voltage_rms': 'vrms display',
'voltage_peak_to_peak': 'vpp',
'voltage_max': 'vmax',
'voltage_min': 'vmin',
'voltage_high': 'vtop',
'voltage_low': 'vbase',
'voltage_average': 'vaverage display',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle',
'amplitude': 'vamplitude',
'voltage_cycle_rms': 'vrms cycle',
'voltage_cycle_average': 'vaverage cycle',
'overshoot': 'overshoot',
'preshoot': 'preshoot',
'ratio': 'vratio',
'phase': 'phase',
'delay': 'delay'}
MeasurementFunctionMappingDigital = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle'}
ScreenshotImageFormatMapping = {
'bmp': 'bmp',
'bmp24': 'bmp',
'bmp8': 'bmpcomp',
'jpeg': 'jpeg',
'png': 'png',
'png24': 'png',
'psd': 'psd',
'tiff': 'tiff'}
TimebaseModeMapping = {
'main': 'main',
'window': 'wind',
'xy': 'xy',
'roll': 'roll'}
TimebaseReferenceMapping = {
'left': 'left',
'center': 'cent',
'right': 'righ'}
class lecroyBaseScope(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
scope.Base, scope.TVTrigger,
scope.GlitchTrigger, scope.WidthTrigger, scope.AcLineTrigger,
scope.WaveformMeasurement, scope.MinMaxWaveform,
scope.ContinuousAcquisition, scope.AverageAcquisition,
scope.SampleMode, scope.AutoSetup,
extra.common.SystemSetup, extra.common.Screenshot,
ivi.Driver):
"LeCroy generic IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._channel_label = list()
self._channel_label_position = list()
self._channel_noise_filter = list()
self._channel_interpolation = list()
self._channel_probe_skew = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_bw_limit = list()
super(lecroyBaseScope, self).__init__(*args, **kwargs)
self._memory_size = 5
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
self._timebase_mode = 'main'
self._timebase_reference = 'center'
self._timebase_position = 0.0
self._timebase_range = 1e-3
self._timebase_scale = 100e-6
self._timebase_window_position = 0.0
self._timebase_window_range = 5e-6
self._timebase_window_scale = 500e-9
self._trigger_mode = 'auto'
self._trigger_type = 'edge'
self._display_vectors = True
self._display_labels = True
self._display_grid = "single"
self._identity_description = "LeCroy generic IVI oscilloscope driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "LeCroy"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['WR204MXI-A', 'WR204XI-A', 'WR104MXI-A', 'WR104XI-A', 'WR64MXI-A',
'WR64XI-A',
'WR62XI-A', 'WR44MXI-A', 'WR44XI-A']
# Turn off the command header to remove extra information from all responses
self._write("CHDR OFF")
self._add_property('channels[].bw_limit',
self._get_channel_bw_limit,
self._set_channel_bw_limit,
None,
ivi.Doc("""
Commands an internal low-pass filter. When the filter is on, the
bandwidth of the channel is limited to approximately 20 MHz.
"""))
self._add_property('channels[].invert',
self._get_channel_invert,
self._set_channel_invert,
None,
ivi.Doc("""
Selects whether or not to invert the channel.
"""))
self._add_property('channels[].label',
self._get_channel_label,
self._set_channel_label,
None,
ivi.Doc("""
Sets the channel label. Setting a channel label also adds the label to
the nonvolatile label list. Setting the label will turn it's display on.
"""))
self._add_property('channels[].label_position',
self._get_channel_label_position,
self._set_channel_label_position,
None,
ivi.Doc("""
Set the channel label positions
"""))
self._add_property('channels[].probe_skew',
self._get_channel_probe_skew,
self._set_channel_probe_skew,
None,
ivi.Doc("""
Specifies the channel-to-channel skew factor for the channel. Each analog
channel can be adjusted + or - 100 ns for a total of 200 ns difference
between channels. This can be used to compensate for differences in cable
delay. Units are seconds.
"""))
self._add_property('channels[].scale',
self._get_channel_scale,
self._set_channel_scale,
None,
ivi.Doc("""
Specifies the vertical scale, or units per division, of the channel. Units
are volts.
"""))
self._add_property('channels[].trigger_level',
self._get_channel_trigger_level,
self._set_channel_trigger_level,
None,
ivi.Doc("""
Specifies the voltage threshold for the trigger sub-system. The units are
volts. This attribute affects instrument behavior only when the Trigger
Type is set to one of the following values: Edge Trigger, Glitch Trigger,
or Width Trigger.
This attribute, along with the Trigger Slope, Trigger Source, and Trigger
Coupling attributes, defines the trigger event when the Trigger Type is
set to Edge Trigger.
"""))
# TODO: delete following if not used in LeCroy
self._add_property('timebase.mode',
self._get_timebase_mode,
self._set_timebase_mode,
None,
ivi.Doc("""
Sets the current time base. There are four time base modes:
* 'main': normal timebase
* 'window': zoomed or delayed timebase
* 'xy': channels are plotted against each other, no timebase
* 'roll': data moves continuously from left to right
"""))
self._add_property('timebase.reference',
self._get_timebase_reference,
self._set_timebase_reference,
None,
ivi.Doc("""
Sets the time reference to one division from the left side of the screen,
to the center of the screen, or to one division from the right side of the
screen. Time reference is the point on the display where the trigger point
is referenced.
Values:
* 'left'
* 'center'
* 'right'
"""))
self._add_property('timebase.position',
self._get_timebase_position,
self._set_timebase_position,
None,
ivi.Doc("""
Sets the time interval between the trigger event and the display reference
point on the screen. The display reference point is either left, right, or
center and is set with the timebase.reference property. The maximum
position value depends on the time/division settings.
"""))
self._add_property('timebase.range',
self._get_timebase_range,
self._set_timebase_range,
None,
ivi.Doc("""
Sets the full-scale horizontal time in seconds for the main window. The
range is 10 times the current time-per-division setting.
"""))
self._add_property('timebase.scale',
self._get_timebase_scale,
self._set_timebase_scale,
None,
ivi.Doc("""
Sets the horizontal scale or units per division for the main window.
"""))
self._add_property('timebase.window.position',
self._get_timebase_window_position,
self._set_timebase_window_position,
None,
ivi.Doc("""
Sets the horizontal position in the zoomed (delayed) view of the main
sweep. The main sweep range and the main sweep horizontal position
determine the range for this command. The value for this command must
keep the zoomed view window within the main sweep range.
"""))
self._add_property('timebase.window.range',
self._get_timebase_window_range,
self._set_timebase_window_range,
None,
ivi.Doc("""
Sets the fullscale horizontal time in seconds for the zoomed (delayed)
window. The range is 10 times the current zoomed view window seconds per
division setting. The main sweep range determines the range for this
command. The maximum value is one half of the timebase.range value.
"""))
self._add_property('timebase.window.scale',
self._get_timebase_window_scale,
self._set_timebase_window_scale,
None,
ivi.Doc("""
Sets the zoomed (delayed) window horizontal scale (seconds/division). The
main sweep scale determines the range for this command. The maximum value
is one half of the timebase.scale value.
"""))
self._add_property('display.vectors',
self._get_display_vectors,
self._set_display_vectors,
None,
ivi.Doc("""
When enabled, draws a line between consecutive waveform data points.
"""))
self._add_property('display.grid',
self._get_grid_mode,
self._set_grid_mode,
None,
ivi.Doc("""
Sets the current grid used in the display. There are multiple grid modes.
Values:
* 'single'
* 'dual'
* 'quad'
* 'octal'
* 'auto'
* 'xy'
* 'xysingle'
* 'xydual'
"""))
self._add_property('trigger.mode',
self._get_trigger_mode,
self._set_trigger_mode,
None,
ivi.Doc("""
Specifies the trigger mode of the oscilloscope.
Values:
'auto', 'norm', 'single', 'stop'
* 'auto'
* 'norm'
* 'single'
* 'stop'
"""))
self._add_method('system.fetch_setup',
self._system_fetch_setup,
ivi.Doc("""
Returns the current oscilloscope setup in the form of a binary block. The
setup can be stored in memory or written to a file and then reloaded to the
oscilloscope at a later time with system.load_setup.
"""))
self._add_method('system.load_setup',
self._system_load_setup,
ivi.Doc("""
Transfers a binary block of setup data to the scope to reload a setup
previously saved with system.fetch_setup.
"""))
self._add_method('system.display_string',
self._system_display_string,
ivi.Doc("""
Writes a string to the advisory line on the instrument display. Send None
or an empty string to clear the advisory line.
"""))
self._add_method('display.fetch_screenshot',
self._display_fetch_screenshot,
ivi.Doc("""
Captures the oscilloscope screen and transfers it in the specified format.
The display graticule is optionally inverted.
"""))
self._add_method('memory.save',
self._memory_save,
ivi.Doc("""
Stores the current state of the instrument into an internal storage
register. Use memory.recall to restore the saved state.
"""))
self._add_method('memory.recall',
self._memory_recall,
ivi.Doc("""
Recalls the state of the instrument from an internal storage register
that was previously saved with memory.save.
"""))
self._init_channels()
def initialize(self, resource=None, id_query=False, reset=False, **keywargs):
"Opens an I/O session to the instrument."
self._channel_count = self._analog_channel_count + self._digital_channel_count
super(lecroyBaseScope, self).initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
# Modified for LeCroy, working
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
# TODO: Determine how to implement system:error? with LeCroy scope
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code, error_message = self._ask(":system:error?").split(',')
error_code = int(error_code)
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
# TODO: test utility reset
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
# TODO: test self test, check if the time.sleep() is sufficient
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
self._write("*TST?")
# Wait for test to complete - may be adjusted if required
time.sleep(40)
code = int(self._read())
if code != 0:
message = "Self test failed"
return (code, message)
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(lecroyBaseScope, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_label = list()
self._channel_label_position = list()
self._channel_noise_filter = list()
self._channel_interpolation = list()
self._channel_probe_skew = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_bw_limit = list()
self._channel_input_impedance = list()
self._channel_trigger_level = list()
self._analog_channel_name = list()
for i in range(self._analog_channel_count):
self._channel_name.append("C%d" % (i + 1))
self._channel_label.append("%d" % (i + 1))
self._channel_label_position.append(0)
self._channel_noise_filter.append('None')
self._channel_interpolation.append("Linear")
self._analog_channel_name.append("C%d" % (i + 1))
self._channel_probe_skew.append(0)
self._channel_invert.append(False)
self._channel_probe_id.append("NONE")
self._channel_bw_limit.append(False)
self._channel_coupling.append("NONE")
self._channel_input_impedance.append(0)
self._channel_trigger_level.append(0)
# digital channels
self._digital_channel_name = list()
if (self._digital_channel_count > 0):
for i in range(self._digital_channel_count):
self._channel_name.append("digital%d" % i)
self._channel_label.append("D%d" % i)
self._digital_channel_name.append("digital%d" % i)
for i in range(self._analog_channel_count, self._channel_count):
self._channel_input_frequency_max[i] = 1e9
self._channel_probe_attenuation[i] = 1
# self._channel_coupling[i] = 'dc'
#self._channel_input_impedance[i] = 1000000
#self._channel_coupling[i] = 'D1M'
self._channel_offset[i] = 0
self._channel_range[i] = 1
self._channel_count = self._analog_channel_count + self._digital_channel_count
self.channels._set_list(self._channel_name)
# TODO: how to implement the following on LeCroy scope?
def _system_fetch_setup(self):
if self._driver_operation_simulate:
return b''
self._write(":system:setup?")
return self._read_ieee_block()
# TODO: how to implement the following on LeCroy scope?
def _system_load_setup(self, data):
if self._driver_operation_simulate:
return
self._write_ieee_block(data, ':system:setup ')
self.driver_operation.invalidate_all_attributes()
# TODO: test display_string
def _system_display_string(self, string=None):
if string is None:
string = ""
if not self._driver_operation_simulate:
self._write("MESSAGE \"%s\"" % string)
# Modified for LeCroy, working
def _display_fetch_screenshot(self, format='png', invert=True):
if self._driver_operation_simulate:
return b''
if format not in ScreenshotImageFormatMapping:
raise ivi.ValueNotSupportedException()
format = ScreenshotImageFormatMapping[format]
if invert == False:
color = "BLACK"
elif invert == True:
color = "WHITE"
else:
color = "WHITE"
self._write(
"HCSU DEV,%s,FORMAT,PORTRAIT,BCKG,%s,DEST,\"REMOTE\",PORT,\"NET\",AREA,GRIDAREAONLY" % (str(format), color))
self._write("SCDP")
return self._read_raw()
# TODO: determine how to handle all :timebase: methods for LeCroy
def _get_timebase_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":timebase:mode?").lower()
self._timebase_mode = [k for k, v in TimebaseModeMapping.items() if v == value][0]
self._set_cache_valid
return self._timebase_mode
def _set_timebase_mode(self, value):
if value not in TimebaseModeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":timebase:mode %s" % TimebaseModeMapping[value])
self._timebase_mode = value
self._set_cache_valid()
def _get_timebase_reference(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":timebase:reference?").lower()
self._timebase_reference = [k for k, v in TimebaseReferenceMapping.items() if v == value][0]
self._set_cache_valid
return self._timebase_reference
def _set_timebase_reference(self, value):
if value not in TimebaseReferenceMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":timebase:reference %s" % TimebaseReferenceMapping[value])
self._timebase_reference = value
self._set_cache_valid()
def _get_timebase_position(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_position = float(self._ask(":timebase:position?"))
self._set_cache_valid()
return self._timebase_position
def _set_timebase_position(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":timebase:position %e" % value)
self._timebase_position = value
self._set_cache_valid()
# Modified for LeCroy, working
def _get_timebase_range(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_scale = float(self._ask("TDIV?"))
self._timebase_range = self._timebase_scale * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_scale')
return self._timebase_range
# Modified for LeCroy, working
def _set_timebase_range(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("TDIV %e" % (value / self._horizontal_divisions))
self._timebase_scale = value / self._horizontal_divisions
self._timebase_range = value
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_scale')
# Modified for LeCroy, working
def _get_timebase_scale(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_scale = float(self._ask("TDIV?"))
self._timebase_range = self._timebase_scale * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_range')
return self._timebase_scale
# Modified for LeCroy, working
def _set_timebase_scale(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("TDIV %e" % value)
self._timebase_scale = value
self._timebase_range = value * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_range')
def _get_timebase_window_position(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_window_position = float(self._ask(":timebase:window:position?"))
self._set_cache_valid()
return self._timebase_window_position
def _set_timebase_window_position(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":timebase:window:position %e" % value)
self._timebase_window_position = value
self._set_cache_valid()
def _get_timebase_window_range(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_window_range = float(self._ask(":timebase:window:range?"))
self._timebase_window_scale = self._timebase_window_range / self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_window_scale')
return self._timebase_window_range
def _set_timebase_window_range(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":timebase:window:range %e" % value)
self._timebase_window_range = value
self._timebase_window_scale = value / self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_window_scale')
def _get_timebase_window_scale(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_window_scale = float(self._ask(":timebase:window:scale?"))
self._timebase_window_range = self._timebase_window_scale * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_window_range')
return self._timebase_window_scale
def _set_timebase_window_scale(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":timebase:window:scale %e" % value)
self._timebase_window_scale = value
self._timebase_window_range = value * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(True, 'timebase_window_range')
def _get_display_vectors(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._display_vectors = bool(int(self._ask(":display:vectors?")))
self._set_cache_valid()
return self._display_vectors
def _set_display_vectors(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write(":display:vectors %d" % int(value))
self._display_vectors = value
self._set_cache_valid()
# Modified for LeCroy, working
def _get_grid_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._display_vectors = str(self._ask("GRID?"))
self._set_cache_valid()
return self._display_vectors
# Modified for LeCroy, working
def _set_grid_mode(self, value):
if not self._driver_operation_simulate:
self._write("GRID %s" % str(value))
self._display_vectors = str(value)
self._set_cache_valid()
def _get_acquisition_start_time(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_start_time = float(self._ask(":waveform:xorigin?"))
self._set_cache_valid()
return self._acquisition_start_time
def _set_acquisition_start_time(self, value):
value = float(value)
value = value + self._get_acquisition_time_per_record() * 5 / 10
if not self._driver_operation_simulate:
self._write(":timebase:position %e" % value)
self._acquisition_start_time = value
self._set_cache_valid()
def _get_acquisition_type(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":acquire:type?").lower()
self._acquisition_type = [k for k, v in AcquisitionTypeMapping.items() if v == value][0]
self._set_cache_valid()
return self._acquisition_type
def _set_acquisition_type(self, value):
if value not in AcquisitionTypeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":acquire:type %s" % AcquisitionTypeMapping[value])
self._acquisition_type = value
self._set_cache_valid()
def _get_acquisition_number_of_points_minimum(self):
return self._acquisition_number_of_points_minimum
def _set_acquisition_number_of_points_minimum(self, value):
value = int(value)
self._acquisition_number_of_points_minimum = value
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_acquisition_record_length(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_record_length = float(self._ask("MSIZ?"))
self._set_cache_valid()
return self._acquisition_record_length
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_acquisition_time_per_record(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_time_per_record = float(self._ask("TDIV?")) * self._horizontal_divisions
self._set_cache_valid()
return self._acquisition_time_per_record
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_acquisition_time_per_record(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("TDIV %e" % (value / self._horizontal_divisions))
self._acquisition_time_per_record = value * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(False, 'acquisition_start_time')
# This method implemented differently in WRXIA, not tested with other LeCroy scope
def _get_channel_label(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_label[index] = self._ask(":%s:label?" % self._channel_name[index]).strip('"')
self._set_cache_valid(index=index)
return self._channel_label[index]
# This method implemented differently in WRXIA, not tested with other LeCroy scope
def _set_channel_label(self, index, value):
value = str(value)
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate:
self._write(":%s:label \"%s\"" % (self._channel_name[index], value))
self._channel_label[index] = value
self._set_cache_valid(index=index)
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_channel_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
trace = self._ask("%s:TRA?" % self._channel_name[index])
if trace == "ON":
self._channel_enabled[index] = True
elif trace == "OFF":
self._channel_enabled[index] = False
self._set_cache_valid(index=index)
return self._channel_enabled[index]
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_channel_enabled(self, index, value):
value = bool(value)
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate:
if value == False:
self._write("%s:TRA OFF" % self._channel_name[index])
elif value == True:
self._write("%s:TRA ON" % self._channel_name[index])
self._channel_enabled[index] = value
self._set_cache_valid(index=index)
# TODO: test channel.input_impedance
def _get_channel_input_impedance(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
result = str(self._ask("%s:coupling?" % self._channel_name[index])).lower().split()
result = result[1]
if result == 'a1m':
impedance = 1000000
coupling = "ac"
elif result == "a1m":
impedance = 1000000
coupling = "dc"
elif result == 'd50':
impedance = 50
coupling = "dc"
elif result == 'gnd':
impedance = 1000000
coupling = "gnd"
self._channel_input_impedance[index] = impedance
self._channel_coupling[index] = coupling
self._set_cache_valid(index=index)
return self._channel_input_impedance[index]
# TODO: test channel.input_impedance
def _set_channel_input_impedance(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
if value not in InputImpedance:
raise Exception('Invalid input impedance selection')
# Check current coupling setting to know if AC or DC
result = str(self._ask("%s:coupling?" % self._channel_name[index])).lower()
if result[0] == "a" and value == 1000000:
coupling = "a1m"
elif result[0] == "a" and value == 50:
raise Exception('Invalid impedance selection')
elif result[0] == "d" and value == 1000000:
coupling = "d1m"
elif result[0] == "d" and value == 50:
coupling = "d50"
elif result == "gnd":
if value == 50:
coupling = "d50"
elif value == 1000000:
coupling = "d1m"
else:
raise Exception('Invalid impedance selection')
if not self._driver_operation_simulate:
self._write("%s:coupling %s" % (self._channel_name[index], coupling.upper()))
self._channel_input_impedance[index] = value
self._set_cache_valid(index=index)
def _get_channel_input_frequency_max(self, index):
index = ivi.get_index(self._analog_channel_name, index)
return self._channel_input_frequency_max[index]
def _set_channel_input_frequency_max(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate:
self._set_channel_bw_limit(index, value < 20e6)
self._channel_input_frequency_max[index] = value
self._set_cache_valid(index=index)
# Tested, working on WRX104MXiA
def _get_channel_probe_attenuation(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_probe_attenuation[index] = int(
(self._ask("%s:attenuation?" % self._channel_name[index])))
self._set_cache_valid(index=index)
return self._channel_probe_attenuation[index]
# TODO: not working yet, can not write the correct value
def _set_channel_probe_attenuation(self, index, value):
"""
<channel> : ATTeNuation <attenuation>
<channel> :={C1,C2,C3,C4,EX,EX10}
<attenuation> : = {1, 2, 5, 10, 20, 25, 50, 100, 200, 500, 1000, 10000}
"""
index = ivi.get_index(self._analog_channel_name, index)
# value = str(value)
if not self._driver_operation_simulate:
self._write("%s:ATTN %e" % (self._channel_name[index], value))
self._channel_probe_attenuation[index] = value
self._set_cache_valid(index=index)
def _get_channel_invert(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_invert[index] = bool(int(self._ask(":%s:invert?" % self._channel_name[index])))
self._set_cache_valid(index=index)
return self._channel_invert[index]
def _set_channel_invert(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":%s:invert %e" % (self._channel_name[index], int(value)))
self._channel_invert[index] = value
self._set_cache_valid(index=index)
def _get_channel_probe_id(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_probe_id[index] = self._ask(":%s:probe:id?" % self._channel_name[index])
self._set_cache_valid(index=index)
return self._channel_probe_id[index]
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_channel_bw_limit(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
# On WRXiA bandwidth limits are read out all at one time, we need to split the list to get specified channel
limits = (self._ask("BWL?").strip()).split(',')
if self._channel_name[index] in limits:
self._channel_bw_limit[index] = limits[limits.index(self._channel_name[index]) + 1]
self._set_cache_valid(index=index)
return self._channel_bw_limit[index]
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_channel_bw_limit(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate:
self._write("BWL %s,%s" % (self._channel_name[index], value))
self._channel_bw_limit[index] = value
self._set_cache_valid(index=index)
# TODO: FIX COUPLING AND IMPEDANCE
def _get_channel_coupling(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
result = self._ask("%s:coupling?" % self._channel_name[index]).lower().split()
self._channel_coupling[index] = result[1]
return self._channel_coupling[index]
# TODO: FIX COUPLING AND IMPEDANCE - split coupling from impedance to avoid errors?
def _set_channel_coupling(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
if value not in VerticalCoupling:
raise ivi.ValueNotSupportedException()
# Check current impedance setting to know if impedance is 1M, 50, or GND
result = str(self._ask("%s:coupling?" % self._channel_name[index])).lower()
if result[1:3] == "1m" or result == "gnd":
if value == "ac":
coupling = "a1m"
elif value == "dc":
coupling = "d1m"
elif result[1:3] == "50" and value == "dc":
coupling = "d50"
elif result[1:3] == "50" and value == "ac":
raise Exception('Invalid coupling selection, set correct impedance')
elif value == "gnd":
coupling = "gnd"
if not self._driver_operation_simulate:
self._write("%s:coupling %s" % (self._channel_name[index], coupling.upper()))
self._channel_coupling[index] = value
self._set_cache_valid(index=index)
# TODO: test
def _get_channel_offset(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_offset[index] = float(self._ask("%s:offset?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_offset[index]
# TODO: test
def _set_channel_offset(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("%s:offset %e" % (self._channel_name[index], value))
self._channel_offset[index] = value
self._set_cache_valid(index=index)
def _get_channel_range(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_range[index] = float(self._ask(":%s:range?" % self._channel_name[index]))
self._channel_scale[index] = self._channel_range[index] / self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_scale", index)
return self._channel_range[index]
def _set_channel_range(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:range %e" % (self._channel_name[index], value))
self._channel_range[index] = value
self._channel_scale[index] = value / self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_scale", index)
def _get_channel_scale(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_scale[index] = float(self._ask(":%s:scale?" % self._channel_name[index]))
self._channel_range[index] = self._channel_scale[index] * self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_range", index)
return self._channel_scale[index]
def _set_channel_scale(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:scale %e" % (self._channel_name[index], value))
self._channel_scale[index] = value
self._channel_range[index] = value * self._vertical_divisions
self._set_cache_valid(index=index)
self._set_cache_valid(True, "channel_range", index)
def _get_measurement_status(self):
return self._measurement_status
def _get_trigger_coupling(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
cpl = self._ask(":trigger:coupling?").lower()
noise = int(self._ask(":trigger:nreject?"))
hf = int(self._ask(":trigger:hfreject?"))
for k in TriggerCouplingMapping:
if (cpl, noise, hf) == TriggerCouplingMapping[k]:
self._trigger_coupling = k
return self._trigger_coupling
def _set_trigger_coupling(self, value):
if value not in TriggerCouplingMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
cpl, noise, hf = TriggerCouplingMapping[value]
self._write(":trigger:coupling %s" % cpl)
self._write(":trigger:nreject %d" % noise)
self._write(":trigger:hfreject %d" % hf)
self._trigger_coupling = value
self._set_cache_valid()
def _get_trigger_holdoff(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_holdoff = float(self._ask(":trigger:holdoff?"))
self._set_cache_valid()
return self._trigger_holdoff
def _set_trigger_holdoff(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:holdoff %e" % value)
self._trigger_holdoff = value
self._set_cache_valid()
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_channel_trigger_level(self, index):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._channel_trigger_level[index] = float(self._ask(("%s:TRLV?") % self._channel_name[index]).split(",")[0].split(" ")[0])
self._set_cache_valid()
return self._channel_trigger_level[index]
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_channel_trigger_level(self, index, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("%s:TRLV %e" % (self._channel_name[index], value))
self._channel_trigger_level[index] = value
self._set_cache_valid()
def _get_trigger_edge_slope(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:edge:slope?").lower()
self._trigger_edge_slope = [k for k, v in SlopeMapping.items() if v == value][0]
self._set_cache_valid()
return self._trigger_edge_slope
def _set_trigger_edge_slope(self, value):
if value not in SlopeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:edge:slope %s" % SlopeMapping[value])
self._trigger_edge_slope = value
self._set_cache_valid()
# To only get the trigger source, the entire TRSE must be read out
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_trigger_source(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
vals = self._ask("TRSE?")
vals = vals.split(",")
#type = vals[0]
source = vals[vals.index('SR')+1]
self._trigger_source = source
self._set_cache_valid()
return self._trigger_source
# To only set the trigger source, the entire TRSE must be read out and then the new trigger source is hacked in
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_trigger_source(self, value):
value = str(value)
if value not in self._channel_name:
raise ivi.UnknownPhysicalNameException()
if not self._driver_operation_simulate:
vals = self._ask("TRSE?")
split_vals = vals.split(",")
split_vals[split_vals.index('SR')+1] = value
vals = ",".join(split_vals)
self._write("TRSE %s" % vals)
self._trigger_source = value
self._set_cache_valid()
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_trigger_mode(self, value):
value = value.lower()
if value not in TriggerModes:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("TRMD %s" % value.lower())
self._trigger_mode = value
self._set_cache_valid()
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_trigger_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("TRMD?").lower()
self._trigger_mode = value
self._set_cache_valid()
return self._trigger_mode
# Modified for LeCroy, WORKING ON WR104XI-A
def _get_trigger_type(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
vals = self._ask("TRSE?")
value = vals.split(",")[0]
self._trigger_type = value.lower()
self._set_cache_valid()
return self._trigger_type
# Modified for LeCroy, WORKING ON WR104XI-A
def _set_trigger_type(self, value):
value = value.lower()
if value not in TriggerTypes:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("TRSE %s" % value)
self._trigger_type = value
self._set_cache_valid()
def _measurement_abort(self):
pass
# def _get_trigger_tv_trigger_event(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:tv:mode?").lower()
# # may need processing
# self._trigger_tv_trigger_event = [k for k, v in TVTriggerEventMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_tv_trigger_event
#
# def _set_trigger_tv_trigger_event(self, value):
# if value not in TVTriggerEvent:
# raise ivi.ValueNotSupportedException()
# # may need processing
# if not self._driver_operation_simulate:
# self._write(":trigger:tv:mode %s" % TVTriggerEventMapping[value])
# self._trigger_tv_trigger_event = value
# self._set_cache_valid()
#
# def _get_trigger_tv_line_number(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = int(self._ask(":trigger:tv:line?"))
# # may need processing
# self._trigger_tv_line_number = value
# self._set_cache_valid()
# return self._trigger_tv_line_number
#
# def _set_trigger_tv_line_number(self, value):
# value = int(value)
# # may need processing
# if not self._driver_operation_simulate:
# self._write(":trigger:tv:line %e" % value)
# self._trigger_tv_line_number = value
# self._set_cache_valid()
#
# def _get_trigger_tv_polarity(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:tv:polarity?").lower()
# self._trigger_tv_polarity = [k for k, v in PolarityMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_tv_polarity
#
# def _set_trigger_tv_polarity(self, value):
# if value not in PolarityMapping:
# raise ivi.ValueNotSupportedException()
# if not self._driver_operation_simulate:
# self._write(":trigger:tv:polarity %s" % PolarityMapping[value])
# self._trigger_tv_polarity = value
# self._set_cache_valid()
#
# def _get_trigger_tv_signal_format(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:tv:standard?").lower()
# self._trigger_tv_signal_format = [k for k, v in TVTriggerFormatMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_tv_signal_format
#
# def _set_trigger_tv_signal_format(self, value):
# if value not in TVTriggerFormatMapping:
# raise ivi.ValueNotSupportedException()
# if not self._driver_operation_simulate:
# self._write(":trigger:tv:standard %s" % TVTriggerFormatMapping[value])
# self._trigger_tv_signal_format = value
# self._set_cache_valid()
#
# def _get_trigger_glitch_condition(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:glitch:qualifier?").lower()
# if value in GlitchConditionMapping.values():
# self._trigger_glitch_condition = [k for k, v in GlitchConditionMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_glitch_condition
#
# def _set_trigger_glitch_condition(self, value):
# if value not in GlitchConditionMapping:
# raise ivi.ValueNotSupportedException()
# if not self._driver_operation_simulate:
# self._write(":trigger:glitch:qualifier %s" % GlitchConditionMapping[value])
# self._trigger_glitch_condition = value
# self._set_cache_valid()
#
# def _get_trigger_glitch_polarity(self):
# return self._get_trigger_width_polarity()
#
# def _set_trigger_glitch_polarity(self, value):
# self._set_trigger_width_polarity(value)
#
# def _get_trigger_glitch_width(self):
# if self._get_trigger_glitch_condition() == 'greater_than':
# return self._get_trigger_width_threshold_low()
# else:
# return self._get_trigger_width_threshold_high()
#
# def _set_trigger_glitch_width(self, value):
# if self._get_trigger_glitch_condition() == 'greater_than':
# self._set_trigger_width_threshold_low(value)
# else:
# self._set_trigger_width_threshold_high(value)
#
# def _get_trigger_width_condition(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:glitch:qualifier?").lower()
# if value in WidthConditionMapping.values():
# self._trigger_width_condition = [k for k, v in WidthConditionMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_width_condition
#
# def _set_trigger_width_condition(self, value):
# if value not in WidthConditionMapping:
# raise ivi.ValueNotSupportedException()
# if not self._driver_operation_simulate:
# self._write(":trigger:glitch:qualifier %s" % WidthConditionMapping[value])
# self._trigger_width_condition = value
# self._set_cache_valid()
#
# def _get_trigger_width_threshold_high(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# self._trigger_width_threshold_high = float(self._ask(":trigger:glitch:lessthan?"))
# self._set_cache_valid()
# return self._trigger_width_threshold_high
#
# def _set_trigger_width_threshold_high(self, value):
# value = float(value)
# if not self._driver_operation_simulate:
# self._write(":trigger:glitch:lessthan %e" % value)
# self._trigger_width_threshold_high = value
# self._set_cache_valid()
#
# def _get_trigger_width_threshold_low(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# self._trigger_width_threshold_low = float(self._ask(":trigger:glitch:greaterthan?"))
# self._set_cache_valid()
# return self._trigger_width_threshold_low
#
# def _set_trigger_width_threshold_low(self, value):
# value = float(value)
# if not self._driver_operation_simulate:
# self._write(":trigger:glitch:greaterthan %e" % value)
# self._trigger_width_threshold_low = value
# self._set_cache_valid()
#
# def _get_trigger_width_polarity(self):
# if not self._driver_operation_simulate and not self._get_cache_valid():
# value = self._ask(":trigger:glitch:polarity?").lower()
# self._trigger_width_polarity = [k for k, v in PolarityMapping.items() if v == value][0]
# self._set_cache_valid()
# return self._trigger_width_polarity
#
# def _set_trigger_width_polarity(self, value):
# if value not in Polarity:
# raise ivi.ValueNotSupportedException()
# if not self._driver_operation_simulate:
# self._write(":trigger:glitch:polarity %s" % PolarityMapping[value])
# self._trigger_width_polarity = value
# self._set_cache_valid()
#
# def _get_trigger_ac_line_slope(self):
# return self._get_trigger_edge_slope()
#
# def _set_trigger_ac_line_slope(self, value):
# self._set_trigger_edge_slope(value)
# Modified for LeCroy, WORKING ON WR104XI-A
def _measurement_fetch_waveform(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return list()
# Send the MSB first
# old - self._write(":waveform:byteorder msbfirst")
self._write("COMM_ORDER HI")
self._write("COMM_FORMAT DEF9,WORD,BIN")
# Read wave description and split up parts into variables
pre = self._ask("%s:INSPECT? WAVEDESC" % self._channel_name[index]).split("\r\n")
# Replace following with a more simple solution, make it < Python 2.7 compatible
temp = []
for item in pre:
temp.append(item.split(':'))
# Dict comprehension, python 2.7+
#mydict = {t[0].strip(): ["".join(elem.strip()) for elem in t[1:]] for t in temp}
#format = str(mydict["COMM_TYPE"][0])
#points = int(mydict["PNTS_PER_SCREEN"][0])
#xincrement = float(mydict["HORIZ_INTERVAL"][0])
#xorigin = float(mydict["HORIZ_OFFSET"][0])
#yincrement = float(mydict["VERTICAL_GAIN"][0])
#yorigin = float(mydict["VERTICAL_OFFSET"][0])
# Dict with lost comprehension, python 2.6+
mydict = dict([(d[0].strip(), "".join(d[1:]).strip()) for d in temp])
format = str(mydict["COMM_TYPE"])
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Verify that the data is in 'word' format
if format.lower() != "word":
raise ivi.UnexpectedResponseException()
# Read waveform data
self._write("%s:WAVEFORM? DAT1" % self._channel_name[index])
raw_data = raw_data = self._read_ieee_block()
# Split out points and convert to time and voltage pairs
data = list()
for i in range(points):
x = (i * xincrement) + xorigin
yval = struct.unpack(">H", raw_data[i * 2:i * 2 + 2])[0]
if yval > 32767:
yval = yval - (2 ** 16)
if yval == 0:
# hole value
y = float('nan')
else:
y = (yincrement * yval) - yorigin
data.append((x, y))
return data
def _measurement_read_waveform(self, index, maximum_time):
return self._measurement_fetch_waveform(index)
def _measurement_initiate(self):
if not self._driver_operation_simulate:
self._write(":acquire:complete 100")
self._write(":digitize")
self._set_cache_valid(False, 'trigger_continuous')
def _get_reference_level_high(self):
return self._reference_level_high
def _set_reference_level_high(self, value):
value = float(value)
if value < 5: value = 5
if value > 95: value = 95
self._reference_level_high = value
if not self._driver_operation_simulate:
self._write(":measure:define thresholds, %e, %e, %e" %
(self._reference_level_high,
self._reference_level_middle,
self._reference_level_low))
def _get_reference_level_low(self):
return self._reference_level_low
def _set_reference_level_low(self, value):
value = float(value)
if value < 5: value = 5
if value > 95: value = 95
self._reference_level_low = value
if not self._driver_operation_simulate:
self._write(":measure:define thresholds, %e, %e, %e" %
(self._reference_level_high,
self._reference_level_middle,
self._reference_level_low))
def _get_reference_level_middle(self):
return self._reference_level_middle
def _set_reference_level_middle(self, value):
value = float(value)
if value < 5: value = 5
if value > 95: value = 95
self._reference_level_middle = value
if not self._driver_operation_simulate:
self._write(":measure:define thresholds, %e, %e, %e" %
(self._reference_level_high,
self._reference_level_middle,
self._reference_level_low))
def _measurement_fetch_waveform_measurement(self, index, measurement_function, ref_channel=None):
index = ivi.get_index(self._channel_name, index)
if index < self._analog_channel_count:
if measurement_function not in MeasurementFunctionMapping:
raise ivi.ValueNotSupportedException()
func = MeasurementFunctionMapping[measurement_function]
else:
if measurement_function not in MeasurementFunctionMappingDigital:
raise ivi.ValueNotSupportedException()
func = MeasurementFunctionMappingDigital[measurement_function]
if not self._driver_operation_simulate:
l = func.split(' ')
l[0] = l[0] + '?'
if len(l) > 1:
l[-1] = l[-1] + ','
func = ' '.join(l)
query = ":measure:%s %s" % (func, self._channel_name[index])
if measurement_function in ['ratio', 'phase', 'delay']:
ref_index = ivi.get_index(self._channel_name, ref_channel)
query += ", %s" % self._channel_name[ref_index]
return float(self._ask(query))
return 0
def _measurement_read_waveform_measurement(self, index, measurement_function, maximum_time):
return self._measurement_fetch_waveform_measurement(index, measurement_function)
def _get_acquisition_number_of_envelopes(self):
return self._acquisition_number_of_envelopes
def _set_acquisition_number_of_envelopes(self, value):
self._acquisition_number_of_envelopes = value
def _measurement_fetch_waveform_min_max(self, index):
index = ivi.get_index(self._channel_name, index)
data = list()
return data
def _measurement_read_waveform_min_max(self, index, maximum_time):
return _measurement_fetch_waveform_min_max(index)
def _get_trigger_continuous(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_continuous = (int(self._ask(":oper:cond?")) & 1 << 3) != 0
self._set_cache_valid()
return self._trigger_continuous
def _set_trigger_continuous(self, value):
value = bool(value)
if not self._driver_operation_simulate:
t = 'stop'
if value: t = 'run'
self._write(":%s" % t)
self._trigger_continuous = value
self._set_cache_valid()
def _get_acquisition_number_of_averages(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_number_of_averages = int(self._ask(":acquire:count?"))
self._set_cache_valid()
return self._acquisition_number_of_averages
def _set_acquisition_number_of_averages(self, value):
if value < 1 or value > 65536:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":acquire:count %d" % value)
self._acquisition_number_of_averages = value
self._set_cache_valid()
def _get_acquisition_sample_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":acquire:mode?").lower()
self._acquisition_sample_mode = [k for k, v in SampleModeMapping.items() if v == value][0]
self._set_cache_valid()
return self._acquisition_sample_mode
def _set_acquisition_sample_mode(self, value):
if value not in SampleModeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":acquire:mode %s" % SampleModeMapping[value])
self._acquisition_sample_mode = value
self._set_cache_valid()
# Not changed
def _measurement_auto_setup(self):
if not self._driver_operation_simulate:
self._write("ASET")
# WORKING ON WR104XI-A
def _memory_save(self, index):
index = int(index)
if index < 0 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
# WORKING ON WR104XI-A
def _memory_recall(self, index):
index = int(index)
if index < 0 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
self.driver_operation.invalidate_all_attributes()
| elopezga/ErrorRate | ivi/lecroy/lecroyBaseScope.py | Python | mit | 71,778 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2012 Simon Feltman
#
# gi/_signalhelper.py: GObject signal binding decorator object
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import sys
import inspect
from ._gi import _gobject
# Callable went away in python 3.0 and came back in 3.2.
# Use versioning to figure out when to define it, otherwise we have to deal with
# the complexity of using __builtin__ or builtin between python versions to
# check if callable exists which PyFlakes will also complain about.
if (3, 0) <= sys.version_info < (3, 2):
def callable(fn):
return hasattr(fn, '__call__')
class Signal(str):
"""Object which gives a nice API for creating and binding signals.
:param name:
Name of signal or callable closure when used as a decorator.
:type name: str or callable
:param callable func:
Callable closure method.
:param GObject.SignalFlags flags:
Flags specifying when to run closure.
:param type return_type:
Return type of the Signal.
:param list arg_types:
List of argument types specifying the signals function signature
:param str doc:
Documentation of signal object.
:param callable accumulator:
Accumulator method with the signature:
func(ihint, return_accu, handler_return, accu_data) -> boolean
:param object accu_data:
User data passed to the accumulator.
:Example:
.. code-block:: python
class Spam(GObject.Object):
velocity = 0
@GObject.Signal
def pushed(self):
self.velocity += 1
@GObject.Signal(flags=GObject.SignalFlags.RUN_LAST)
def pulled(self):
self.velocity -= 1
stomped = GObject.Signal('stomped', arg_types=(int,))
@GObject.Signal
def annotated_signal(self, a:int, b:str):
"Python3 annotation support for parameter types.
def on_pushed(obj):
print(obj)
spam = Spam()
spam.pushed.connect(on_pushed)
spam.pushed.emit()
"""
class BoundSignal(str):
"""
Temporary binding object which can be used for connecting signals
without specifying the signal name string to connect.
"""
def __new__(cls, name, *args, **kargs):
return str.__new__(cls, name)
def __init__(self, signal, gobj):
str.__init__(self)
self.signal = signal
self.gobj = gobj
def __repr__(self):
return 'BoundSignal("%s")' % self
def __call__(self, *args, **kargs):
"""Call the signals closure."""
return self.signal.func(self.gobj, *args, **kargs)
def connect(self, callback, *args, **kargs):
"""Same as GObject.Object.connect except there is no need to specify
the signal name."""
return self.gobj.connect(self, callback, *args, **kargs)
def connect_detailed(self, callback, detail, *args, **kargs):
"""Same as GObject.Object.connect except there is no need to specify
the signal name. In addition concats "::<detail>" to the signal name
when connecting; for use with notifications like "notify" when a property
changes.
"""
return self.gobj.connect(self + '::' + detail, callback, *args, **kargs)
def disconnect(self, handler_id):
"""Same as GObject.Object.disconnect."""
self.instance.disconnect(handler_id)
def emit(self, *args, **kargs):
"""Same as GObject.Object.emit except there is no need to specify
the signal name."""
return self.gobj.emit(str(self), *args, **kargs)
def __new__(cls, name='', *args, **kargs):
if callable(name):
name = name.__name__
return str.__new__(cls, name)
def __init__(self, name='', func=None, flags=_gobject.SIGNAL_RUN_FIRST,
return_type=None, arg_types=None, doc='', accumulator=None, accu_data=None):
if func and not name:
name = func.__name__
elif callable(name):
func = name
name = func.__name__
if func and not doc:
doc = func.__doc__
str.__init__(self)
if func and not (return_type or arg_types):
return_type, arg_types = get_signal_annotations(func)
if arg_types is None:
arg_types = tuple()
self.func = func
self.flags = flags
self.return_type = return_type
self.arg_types = arg_types
self.__doc__ = doc
self.accumulator = accumulator
self.accu_data = accu_data
def __get__(self, instance, owner=None):
"""Returns a BoundSignal when accessed on an object instance."""
if instance is None:
return self
return self.BoundSignal(self, instance)
def __call__(self, obj, *args, **kargs):
"""Allows for instantiated Signals to be used as a decorator or calling
of the underlying signal method."""
# If obj is a GObject, than we call this signal as a closure otherwise
# it is used as a re-application of a decorator.
if isinstance(obj, _gobject.GObject):
self.func(obj, *args, **kargs)
else:
# If self is already an allocated name, use it otherwise create a new named
# signal using the closure name as the name.
if str(self):
name = str(self)
else:
name = obj.__name__
# Return a new value of this type since it is based on an immutable string.
return type(self)(name=name, func=obj, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def copy(self, newName=None):
"""Returns a renamed copy of the Signal."""
if newName is None:
newName = self.name
return type(self)(name=newName, func=self.func, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def get_signal_args(self):
"""Returns a tuple of: (flags, return_type, arg_types, accumulator, accu_data)"""
return (self.flags, self.return_type, self.arg_types, self.accumulator, self.accu_data)
class SignalOverride(Signal):
"""Specialized sub-class of Signal which can be used as a decorator for overriding
existing signals on GObjects.
:Example:
.. code-block:: python
class MyWidget(Gtk.Widget):
@GObject.SignalOverride
def configure_event(self):
pass
"""
def get_signal_args(self):
"""Returns the string 'override'."""
return 'override'
def get_signal_annotations(func):
"""Attempt pulling python 3 function annotations off of 'func' for
use as a signals type information. Returns an ordered nested tuple
of (return_type, (arg_type1, arg_type2, ...)). If the given function
does not have annotations then (None, tuple()) is returned.
"""
arg_types = tuple()
return_type = None
if hasattr(func, '__annotations__'):
spec = inspect.getfullargspec(func)
arg_types = tuple(spec.annotations[arg] for arg in spec.args
if arg in spec.annotations)
if 'return' in spec.annotations:
return_type = spec.annotations['return']
return return_type, arg_types
def install_signals(cls):
"""Adds Signal instances on a GObject derived class into the '__gsignals__'
dictionary to be picked up and registered as real GObject signals.
"""
gsignals = cls.__dict__.get('__gsignals__', {})
newsignals = {}
for name, signal in cls.__dict__.items():
if isinstance(signal, Signal):
signalName = str(signal)
# Fixup a signal which is unnamed by using the class variable name.
# Since Signal is based on string which immutable,
# we must copy and replace the class variable.
if not signalName:
signalName = name
signal = signal.copy(name)
setattr(cls, name, signal)
if signalName in gsignals:
raise ValueError('Signal "%s" has already been registered.' % name)
newsignals[signalName] = signal
gsignals[signalName] = signal.get_signal_args()
cls.__gsignals__ = gsignals
# Setup signal closures by adding the specially named
# method to the class in the form of "do_<signal_name>".
for name, signal in newsignals.items():
if signal.func is not None:
funcName = 'do_' + name.replace('-', '_')
if not hasattr(cls, funcName):
setattr(cls, funcName, signal.func)
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/gi/_signalhelper.py | Python | mit | 9,776 |
# LIBRARIES
from django.db import models, connections, connection as default_connection
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.query import Q
from google.appengine.api import datastore
# DJANGAE
from djangae.db.backends.appengine.query import transform_query, Query, WhereNode
from djangae.test import TestCase
DEFAULT_NAMESPACE = default_connection.ops.connection.settings_dict.get("NAMESPACE")
class TransformTestModel(models.Model):
field1 = models.CharField(max_length=255)
field2 = models.CharField(max_length=255, unique=True)
field3 = models.CharField(null=True, max_length=255)
field4 = models.TextField()
class Meta:
app_label = "djangae"
class InheritedModel(TransformTestModel):
class Meta:
app_label = "djangae"
class TransformQueryTest(TestCase):
def test_polymodel_filter_applied(self):
query = transform_query(
connections['default'],
InheritedModel.objects.filter(field1="One").all().query
)
query.prepare()
self.assertEqual(2, len(query.where.children))
self.assertTrue(query.where.children[0].children[0].is_leaf)
self.assertTrue(query.where.children[1].children[0].is_leaf)
self.assertEqual("class", query.where.children[0].children[0].column)
self.assertEqual("field1", query.where.children[1].children[0].column)
def test_basic_query(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.all().query
)
self.assertEqual(query.model, TransformTestModel)
self.assertEqual(query.kind, 'SELECT')
self.assertEqual(query.tables, [ TransformTestModel._meta.db_table ])
self.assertIsNone(query.where)
def test_and_filter(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.filter(field1="One", field2="Two").all().query
)
self.assertEqual(query.model, TransformTestModel)
self.assertEqual(query.kind, 'SELECT')
self.assertEqual(query.tables, [ TransformTestModel._meta.db_table ])
self.assertTrue(query.where)
self.assertEqual(2, len(query.where.children)) # Two child nodes
def test_exclude_filter(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.exclude(field1="One").all().query
)
self.assertEqual(query.model, TransformTestModel)
self.assertEqual(query.kind, 'SELECT')
self.assertEqual(query.tables, [ TransformTestModel._meta.db_table ])
self.assertTrue(query.where)
self.assertEqual(1, len(query.where.children)) # One child node
self.assertTrue(query.where.children[0].negated)
self.assertEqual(1, len(query.where.children[0].children))
def test_ordering(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.filter(field1="One", field2="Two").order_by("field1", "-field2").query
)
self.assertEqual(query.model, TransformTestModel)
self.assertEqual(query.kind, 'SELECT')
self.assertEqual(query.tables, [ TransformTestModel._meta.db_table ])
self.assertTrue(query.where)
self.assertEqual(2, len(query.where.children)) # Two child nodes
self.assertEqual(["field1", "-field2"], query.order_by)
def test_projection(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.only("field1").query
)
self.assertItemsEqual(["id", "field1"], query.columns)
query = transform_query(
connections['default'],
TransformTestModel.objects.values_list("field1").query
)
self.assertEqual(set(["field1"]), query.columns)
query = transform_query(
connections['default'],
TransformTestModel.objects.defer("field1", "field4").query
)
self.assertItemsEqual(set(["id", "field2", "field3"]), query.columns)
def test_no_results_returns_emptyresultset(self):
self.assertRaises(
EmptyResultSet,
transform_query,
connections['default'],
TransformTestModel.objects.none().query
)
def test_offset_and_limit(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.all()[5:10].query
)
self.assertEqual(5, query.low_mark)
self.assertEqual(10, query.high_mark)
def test_isnull(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.filter(field3__isnull=True).all()[5:10].query
)
self.assertTrue(query.where.children[0].value)
self.assertEqual("ISNULL", query.where.children[0].operator)
def test_distinct(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.distinct("field2", "field3").query
)
self.assertTrue(query.distinct)
self.assertEqual(query.columns, set(["field2", "field3"]))
query = transform_query(
connections['default'],
TransformTestModel.objects.distinct().values("field2", "field3").query
)
self.assertTrue(query.distinct)
self.assertEqual(query.columns, set(["field2", "field3"]))
def test_order_by_pk(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.order_by("pk").query
)
self.assertEqual("__key__", query.order_by[0])
query = transform_query(
connections['default'],
TransformTestModel.objects.order_by("-pk").query
)
self.assertEqual("-__key__", query.order_by[0])
def test_reversed_ordering(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.order_by("pk").reverse().query
)
self.assertEqual("-__key__", query.order_by[0])
def test_clear_ordering(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.order_by("pk").order_by().query
)
self.assertFalse(query.order_by)
def test_projection_on_textfield_disabled(self):
query = transform_query(
connections['default'],
TransformTestModel.objects.values_list("field4").query
)
self.assertFalse(query.columns)
self.assertFalse(query.projection_possible)
from djangae.tests.test_connector import Relation
from djangae.db.backends.appengine.dnf import normalize_query
class QueryNormalizationTests(TestCase):
"""
The parse_dnf function takes a Django where tree, and converts it
into a tree of one of the following forms:
[ (column, operator, value), (column, operator, value) ] <- AND only query
[ [(column, operator, value)], [(column, operator, value) ]] <- OR query, of multiple ANDs
"""
def test_and_with_child_or_promoted(self):
from .test_connector import TestUser
"""
Given the following tree:
AND
/ | \
A B OR
/ \
C D
The OR should be promoted, so the resulting tree is
OR
/ \
AND AND
/ | \ / | \
A B C A B D
"""
query = Query(TestUser, "SELECT")
query.where = WhereNode()
query.where.children.append(WhereNode())
query.where.children[-1].column = "A"
query.where.children[-1].operator = "="
query.where.children.append(WhereNode())
query.where.children[-1].column = "B"
query.where.children[-1].operator = "="
query.where.children.append(WhereNode())
query.where.children[-1].connector = "OR"
query.where.children[-1].children.append(WhereNode())
query.where.children[-1].children[-1].column = "C"
query.where.children[-1].children[-1].operator = "="
query.where.children[-1].children.append(WhereNode())
query.where.children[-1].children[-1].column = "D"
query.where.children[-1].children[-1].operator = "="
query = normalize_query(query)
self.assertEqual(query.where.connector, "OR")
self.assertEqual(2, len(query.where.children))
self.assertFalse(query.where.children[0].is_leaf)
self.assertFalse(query.where.children[1].is_leaf)
self.assertEqual(query.where.children[0].connector, "AND")
self.assertEqual(query.where.children[1].connector, "AND")
self.assertEqual(3, len(query.where.children[0].children))
self.assertEqual(3, len(query.where.children[1].children))
def test_and_queries(self):
from .test_connector import TestUser
qs = TestUser.objects.filter(username="test").all()
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertTrue(1, len(query.where.children))
self.assertEqual(query.where.children[0].children[0].column, "username")
self.assertEqual(query.where.children[0].children[0].operator, "=")
self.assertEqual(query.where.children[0].children[0].value, "test")
qs = TestUser.objects.filter(username="test", email="test@example.com")
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertTrue(2, len(query.where.children[0].children))
self.assertEqual(query.where.connector, "OR")
self.assertEqual(query.where.children[0].connector, "AND")
self.assertEqual(query.where.children[0].children[0].column, "username")
self.assertEqual(query.where.children[0].children[0].operator, "=")
self.assertEqual(query.where.children[0].children[0].value, "test")
self.assertEqual(query.where.children[0].children[1].column, "email")
self.assertEqual(query.where.children[0].children[1].operator, "=")
self.assertEqual(query.where.children[0].children[1].value, "test@example.com")
qs = TestUser.objects.filter(username="test").exclude(email="test@example.com")
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertTrue(2, len(query.where.children[0].children))
self.assertEqual(query.where.connector, "OR")
self.assertEqual(query.where.children[0].connector, "AND")
self.assertEqual(query.where.children[0].children[0].column, "username")
self.assertEqual(query.where.children[0].children[0].operator, "=")
self.assertEqual(query.where.children[0].children[0].value, "test")
self.assertEqual(query.where.children[0].children[1].column, "email")
self.assertEqual(query.where.children[0].children[1].operator, "<")
self.assertEqual(query.where.children[0].children[1].value, "test@example.com")
self.assertEqual(query.where.children[1].children[0].column, "username")
self.assertEqual(query.where.children[1].children[0].operator, "=")
self.assertEqual(query.where.children[1].children[0].value, "test")
self.assertEqual(query.where.children[1].children[1].column, "email")
self.assertEqual(query.where.children[1].children[1].operator, ">")
self.assertEqual(query.where.children[1].children[1].value, "test@example.com")
instance = Relation(pk=1)
qs = instance.related_set.filter(headline__startswith='Fir')
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertTrue(2, len(query.where.children[0].children))
self.assertEqual(query.where.connector, "OR")
self.assertEqual(query.where.children[0].connector, "AND")
self.assertEqual(query.where.children[0].children[0].column, "relation_id")
self.assertEqual(query.where.children[0].children[0].operator, "=")
self.assertEqual(query.where.children[0].children[0].value, 1)
self.assertEqual(query.where.children[0].children[1].column, "_idx_startswith_headline")
self.assertEqual(query.where.children[0].children[1].operator, "=")
self.assertEqual(query.where.children[0].children[1].value, u"Fir")
def test_or_queries(self):
from .test_connector import TestUser
qs = TestUser.objects.filter(
username="python").filter(
Q(username__in=["ruby", "jruby"]) | (Q(username="php") & ~Q(username="perl"))
)
query = normalize_query(transform_query(
connections['default'],
qs.query
))
# After IN and != explosion, we have...
# (AND: (username='python', OR: (username='ruby', username='jruby', AND: (username='php', AND: (username < 'perl', username > 'perl')))))
# Working backwards,
# AND: (username < 'perl', username > 'perl') can't be simplified
# AND: (username='php', AND: (username < 'perl', username > 'perl')) can become (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl'))
# OR: (username='ruby', username='jruby', (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl')) can't be simplified
# (AND: (username='python', OR: (username='ruby', username='jruby', (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl'))
# becomes...
# (OR: (AND: username='python', username = 'ruby'), (AND: username='python', username='jruby'), (AND: username='python', username='php', username < 'perl') \
# (AND: username='python', username='php', username > 'perl')
self.assertTrue(4, len(query.where.children[0].children))
self.assertEqual(query.where.children[0].connector, "AND")
self.assertEqual(query.where.children[0].children[0].column, "username")
self.assertEqual(query.where.children[0].children[0].operator, "=")
self.assertEqual(query.where.children[0].children[0].value, "python")
self.assertEqual(query.where.children[0].children[1].column, "username")
self.assertEqual(query.where.children[0].children[1].operator, "=")
self.assertEqual(query.where.children[0].children[1].value, "php")
self.assertEqual(query.where.children[0].children[2].column, "username")
self.assertEqual(query.where.children[0].children[2].operator, "<")
self.assertEqual(query.where.children[0].children[2].value, "perl")
self.assertEqual(query.where.children[1].connector, "AND")
self.assertEqual(query.where.children[1].children[0].column, "username")
self.assertEqual(query.where.children[1].children[0].operator, "=")
self.assertEqual(query.where.children[1].children[0].value, "python")
self.assertEqual(query.where.children[1].children[1].column, "username")
self.assertEqual(query.where.children[1].children[1].operator, "=")
self.assertEqual(query.where.children[1].children[1].value, "jruby")
self.assertEqual(query.where.children[2].connector, "AND")
self.assertEqual(query.where.children[2].children[0].column, "username")
self.assertEqual(query.where.children[2].children[0].operator, "=")
self.assertEqual(query.where.children[2].children[0].value, "python")
self.assertEqual(query.where.children[2].children[1].column, "username")
self.assertEqual(query.where.children[2].children[1].operator, "=")
self.assertEqual(query.where.children[2].children[1].value, "php")
self.assertEqual(query.where.children[2].children[2].column, "username")
self.assertEqual(query.where.children[2].children[2].operator, ">")
self.assertEqual(query.where.children[2].children[2].value, "perl")
self.assertEqual(query.where.connector, "OR")
self.assertEqual(query.where.children[3].connector, "AND")
self.assertEqual(query.where.children[3].children[0].column, "username")
self.assertEqual(query.where.children[3].children[0].operator, "=")
self.assertEqual(query.where.children[3].children[0].value, "python")
self.assertEqual(query.where.children[3].children[1].column, "username")
self.assertEqual(query.where.children[3].children[1].operator, "=")
self.assertEqual(query.where.children[3].children[1].value, "ruby")
qs = TestUser.objects.filter(username="test") | TestUser.objects.filter(username="cheese")
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertEqual(query.where.connector, "OR")
self.assertEqual(2, len(query.where.children))
self.assertTrue(query.where.children[0].is_leaf)
self.assertEqual("cheese", query.where.children[0].value)
self.assertTrue(query.where.children[1].is_leaf)
self.assertEqual("test", query.where.children[1].value)
qs = TestUser.objects.using("default").filter(username__in=set()).values_list('email')
with self.assertRaises(EmptyResultSet):
query = normalize_query(transform_query(
connections['default'],
qs.query
))
qs = TestUser.objects.filter(username__startswith='Hello') | TestUser.objects.filter(username__startswith='Goodbye')
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertEqual(2, len(query.where.children))
self.assertEqual("_idx_startswith_username", query.where.children[0].column)
self.assertEqual(u"Goodbye", query.where.children[0].value)
self.assertEqual("_idx_startswith_username", query.where.children[1].column)
self.assertEqual(u"Hello", query.where.children[1].value)
qs = TestUser.objects.filter(pk__in=[1, 2, 3])
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertEqual(3, len(query.where.children))
self.assertEqual("__key__", query.where.children[0].column)
self.assertEqual("__key__", query.where.children[1].column)
self.assertEqual("__key__", query.where.children[2].column)
self.assertEqual({
datastore.Key.from_path(TestUser._meta.db_table, 1, namespace=DEFAULT_NAMESPACE),
datastore.Key.from_path(TestUser._meta.db_table, 2, namespace=DEFAULT_NAMESPACE),
datastore.Key.from_path(TestUser._meta.db_table, 3, namespace=DEFAULT_NAMESPACE),
}, {
query.where.children[0].value,
query.where.children[1].value,
query.where.children[2].value,
}
)
qs = TestUser.objects.filter(pk__in=[1, 2, 3]).filter(username="test")
query = normalize_query(transform_query(
connections['default'],
qs.query
))
self.assertEqual(3, len(query.where.children))
self.assertEqual("__key__", query.where.children[0].children[0].column)
self.assertEqual("test", query.where.children[0].children[1].value)
self.assertEqual("__key__", query.where.children[1].children[0].column)
self.assertEqual("test", query.where.children[0].children[1].value)
self.assertEqual("__key__", query.where.children[2].children[0].column)
self.assertEqual("test", query.where.children[0].children[1].value)
self.assertEqual({
datastore.Key.from_path(TestUser._meta.db_table, 1, namespace=DEFAULT_NAMESPACE),
datastore.Key.from_path(TestUser._meta.db_table, 2, namespace=DEFAULT_NAMESPACE),
datastore.Key.from_path(TestUser._meta.db_table, 3, namespace=DEFAULT_NAMESPACE),
}, {
query.where.children[0].children[0].value,
query.where.children[1].children[0].value,
query.where.children[2].children[0].value,
}
)
| Ali-aqrabawi/ezclinic | lib/djangae/tests/test_query_transform.py | Python | mit | 20,523 |