code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import treemap.json_field class Migration(migrations.Migration): dependencies = [ ('modeling', '0002_remove_plan_currentscenarioid'), ] operations = [ migrations.AddField( model_name='plan', name='zoom_lat_lng', field=treemap.json_field.JSONField(null=True, blank=True), ), ]
maurizi/otm-core
opentreemap/modeling/migrations/0003_plan_zoom_lat_lng.py
Python
agpl-3.0
462
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Ssmtp(AutotoolsPackage): """A program that replaces sendmail on workstations that should send their mail via the departmental mailhub from which they pick up their mail.""" homepage = "https://salsa.debian.org/debian/ssmtp" url = "https://deb.debian.org/debian/pool/main/s/ssmtp/ssmtp_2.64.orig.tar.bz2" version('2.64', sha256='22c37dc90c871e8e052b2cab0ad219d010fa938608cd66b21c8f3c759046fa36') variant('ssl', default=True, description='Enable support for secure connection to mail server') variant('inet6', default=True, description='Enable support for IPv6 transport') variant('md5auth', default=True, description='Enable support for MD5 authentication') depends_on('libnsl') depends_on('openssl', when='+ssl') patch('install.patch') @when('+ssl') def setup_build_environment(self, env): # The configure script is generated with a very old version of # autoconf, which cannot accept LIBS as a command-line argument env.set('LIBS', self.spec['openssl'].libs.link_flags) def configure_args(self): args = self.enable_or_disable('ssl') args += self.enable_or_disable('inet6') args += self.enable_or_disable('md5auth') return args def install(self, spec, prefix): install_answers = [ # Please enter the mail name of your system. # This is the hostname portion of the address to be shown # on outgoing news and mail messages headers. # The default is your system's host name. # # Mail name [system.host.name]: '\n', # Please enter the SMTP port number [25]: '\n' ] install_answers_filename = 'spack-install.in' with working_dir(self.build_directory): with open(install_answers_filename, 'w') as f: f.writelines(install_answers) make('install-sendmail', input=install_answers_filename)
LLNL/spack
var/spack/repos/builtin/packages/ssmtp/package.py
Python
lgpl-2.1
2,247
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from tapi_server.models.base_model_ import Model from tapi_server.models.tapi_photonic_media_media_channel_properties_pac import TapiPhotonicMediaMediaChannelPropertiesPac # noqa: F401,E501 from tapi_server import util class TapiPhotonicMediaMediaChannelConnectionEndPointSpec(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ def __init__(self, media_channel=None): # noqa: E501 """TapiPhotonicMediaMediaChannelConnectionEndPointSpec - a model defined in OpenAPI :param media_channel: The media_channel of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. # noqa: E501 :type media_channel: TapiPhotonicMediaMediaChannelPropertiesPac """ self.openapi_types = { 'media_channel': TapiPhotonicMediaMediaChannelPropertiesPac } self.attribute_map = { 'media_channel': 'media-channel' } self._media_channel = media_channel @classmethod def from_dict(cls, dikt) -> 'TapiPhotonicMediaMediaChannelConnectionEndPointSpec': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The tapi.photonic.media.MediaChannelConnectionEndPointSpec of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. # noqa: E501 :rtype: TapiPhotonicMediaMediaChannelConnectionEndPointSpec """ return util.deserialize_model(dikt, cls) @property def media_channel(self): """Gets the media_channel of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. :return: The media_channel of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. :rtype: TapiPhotonicMediaMediaChannelPropertiesPac """ return self._media_channel @media_channel.setter def media_channel(self, media_channel): """Sets the media_channel of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. :param media_channel: The media_channel of this TapiPhotonicMediaMediaChannelConnectionEndPointSpec. :type media_channel: TapiPhotonicMediaMediaChannelPropertiesPac """ self._media_channel = media_channel
karthik-sethuraman/ONFOpenTransport
RI/flask_server/tapi_server/models/tapi_photonic_media_media_channel_connection_end_point_spec.py
Python
apache-2.0
2,409
''' Created on Jun 10, 2010 @author: jnaous ''' from django import forms from django.contrib.auth.models import User from models import PermissionUser, PermissionRequest class PermissionRequestForm(forms.ModelForm): """ A form that can be used to request a permission from another user. """ class Meta: fields=["permission_owner", "message"] model = PermissionRequest def __init__(self, permission_owners_qs, *args, **kwargs): """ Set the permission_owners queryset. """ super(PermissionRequestForm, self).__init__(*args, **kwargs) self.fields["permission_owner"].queryset = permission_owners_qs
dana-i2cat/felix
optin_manager/src/python/openflow/common/permissions/forms.py
Python
apache-2.0
688
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any, Dict from django.conf import settings from zerver.lib.test_helpers import ( most_recent_message, ) from zerver.lib.test_classes import ( ZulipTestCase, ) from zerver.models import ( get_system_bot, UserProfile ) import ujson def fix_params(raw_params): # type: (Dict[str, Any]) -> Dict[str, str] # A few of our few legacy endpoints need their # individual parameters serialized as JSON. return {k: ujson.dumps(v) for k, v in raw_params.items()} class TutorialTests(ZulipTestCase): def test_send_message(self): # type: () -> None user = self.example_user('hamlet') email = user.email self.login(email) welcome_bot = get_system_bot(settings.WELCOME_BOT) raw_params = dict( type='stream', recipient='Denmark', topic='welcome', content='hello' ) params = fix_params(raw_params) result = self.client_post("/json/tutorial_send_message", params) self.assert_json_success(result) message = most_recent_message(user) self.assertEqual(message.content, 'hello') self.assertEqual(message.sender, welcome_bot) # now test some error cases result = self.client_post("/json/tutorial_send_message", {}) self.assert_json_error(result, "Missing 'type' argument") result = self.client_post("/json/tutorial_send_message", raw_params) self.assert_json_error(result, 'Argument "type" is not valid JSON.') raw_params = dict( type='INVALID', recipient='Denmark', topic='welcome', content='hello' ) params = fix_params(raw_params) result = self.client_post("/json/tutorial_send_message", params) self.assert_json_error(result, 'Bad data passed in to tutorial_send_message') def test_tutorial_status(self): # type: () -> None email = self.example_email('hamlet') self.login(email) cases = [ ('started', UserProfile.TUTORIAL_STARTED), ('finished', UserProfile.TUTORIAL_FINISHED), ] for incoming_status, expected_db_status in cases: raw_params = dict(status=incoming_status) params = fix_params(raw_params) result = self.client_post('/json/tutorial_status', params) self.assert_json_success(result) user = self.example_user('hamlet') self.assertEqual(user.tutorial_status, expected_db_status)
vaidap/zulip
zerver/tests/test_tutorial.py
Python
apache-2.0
2,671
from __future__ import absolute_import, unicode_literals from mock import patch from mopidy.mpd.protocol.status import SUBSYSTEMS from tests.mpd import protocol class IdleHandlerTest(protocol.BaseTestCase): def idle_event(self, subsystem): self.session.on_event(subsystem) def assertEqualEvents(self, events): # noqa: N802 self.assertEqual(set(events), self.context.events) def assertEqualSubscriptions(self, events): # noqa: N802 self.assertEqual(set(events), self.context.subscriptions) def assertNoEvents(self): # noqa: N802 self.assertEqualEvents([]) def assertNoSubscriptions(self): # noqa: N802 self.assertEqualSubscriptions([]) def test_base_state(self): self.assertNoSubscriptions() self.assertNoEvents() self.assertNoResponse() def test_idle(self): self.send_request('idle') self.assertEqualSubscriptions(SUBSYSTEMS) self.assertNoEvents() self.assertNoResponse() def test_idle_disables_timeout(self): self.send_request('idle') self.connection.disable_timeout.assert_called_once_with() def test_noidle(self): self.send_request('noidle') self.assertNoSubscriptions() self.assertNoEvents() self.assertNoResponse() def test_idle_player(self): self.send_request('idle player') self.assertEqualSubscriptions(['player']) self.assertNoEvents() self.assertNoResponse() def test_idle_output(self): self.send_request('idle output') self.assertEqualSubscriptions(['output']) self.assertNoEvents() self.assertNoResponse() def test_idle_player_playlist(self): self.send_request('idle player playlist') self.assertEqualSubscriptions(['player', 'playlist']) self.assertNoEvents() self.assertNoResponse() def test_idle_then_noidle(self): self.send_request('idle') self.send_request('noidle') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('OK') def test_idle_then_noidle_enables_timeout(self): self.send_request('idle') self.send_request('noidle') self.connection.enable_timeout.assert_called_once_with() def test_idle_then_play(self): with patch.object(self.session, 'stop') as stop_mock: self.send_request('idle') self.send_request('play') stop_mock.assert_called_once_with() def test_idle_then_idle(self): with patch.object(self.session, 'stop') as stop_mock: self.send_request('idle') self.send_request('idle') stop_mock.assert_called_once_with() def test_idle_player_then_play(self): with patch.object(self.session, 'stop') as stop_mock: self.send_request('idle player') self.send_request('play') stop_mock.assert_called_once_with() def test_idle_then_player(self): self.send_request('idle') self.idle_event('player') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('changed: player') self.assertOnceInResponse('OK') def test_idle_player_then_event_player(self): self.send_request('idle player') self.idle_event('player') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('changed: player') self.assertOnceInResponse('OK') def test_idle_then_output(self): self.send_request('idle') self.idle_event('output') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('changed: output') self.assertOnceInResponse('OK') def test_idle_output_then_event_output(self): self.send_request('idle output') self.idle_event('output') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('changed: output') self.assertOnceInResponse('OK') def test_idle_player_then_noidle(self): self.send_request('idle player') self.send_request('noidle') self.assertNoSubscriptions() self.assertNoEvents() self.assertOnceInResponse('OK') def test_idle_player_playlist_then_noidle(self): self.send_request('idle player playlist') self.send_request('noidle') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('OK') def test_idle_player_playlist_then_player(self): self.send_request('idle player playlist') self.idle_event('player') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('changed: player') self.assertNotInResponse('changed: playlist') self.assertOnceInResponse('OK') def test_idle_playlist_then_player(self): self.send_request('idle playlist') self.idle_event('player') self.assertEqualEvents(['player']) self.assertEqualSubscriptions(['playlist']) self.assertNoResponse() def test_idle_playlist_then_player_then_playlist(self): self.send_request('idle playlist') self.idle_event('player') self.idle_event('playlist') self.assertNoEvents() self.assertNoSubscriptions() self.assertNotInResponse('changed: player') self.assertOnceInResponse('changed: playlist') self.assertOnceInResponse('OK') def test_player(self): self.idle_event('player') self.assertEqualEvents(['player']) self.assertNoSubscriptions() self.assertNoResponse() def test_player_then_idle_player(self): self.idle_event('player') self.send_request('idle player') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('changed: player') self.assertNotInResponse('changed: playlist') self.assertOnceInResponse('OK') def test_player_then_playlist(self): self.idle_event('player') self.idle_event('playlist') self.assertEqualEvents(['player', 'playlist']) self.assertNoSubscriptions() self.assertNoResponse() def test_player_then_idle(self): self.idle_event('player') self.send_request('idle') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('changed: player') self.assertOnceInResponse('OK') def test_player_then_playlist_then_idle(self): self.idle_event('player') self.idle_event('playlist') self.send_request('idle') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('changed: player') self.assertOnceInResponse('changed: playlist') self.assertOnceInResponse('OK') def test_player_then_idle_playlist(self): self.idle_event('player') self.send_request('idle playlist') self.assertEqualEvents(['player']) self.assertEqualSubscriptions(['playlist']) self.assertNoResponse() def test_player_then_idle_playlist_then_noidle(self): self.idle_event('player') self.send_request('idle playlist') self.send_request('noidle') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('OK') def test_player_then_playlist_then_idle_playlist(self): self.idle_event('player') self.idle_event('playlist') self.send_request('idle playlist') self.assertNoEvents() self.assertNoSubscriptions() self.assertNotInResponse('changed: player') self.assertOnceInResponse('changed: playlist') self.assertOnceInResponse('OK') def test_output_then_idle_toggleoutput(self): self.idle_event('output') self.send_request('idle output') self.assertNoEvents() self.assertNoSubscriptions() self.assertOnceInResponse('changed: output') self.assertOnceInResponse('OK')
ZenithDK/mopidy
tests/mpd/protocol/test_idle.py
Python
apache-2.0
8,146
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Pooling layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.utils import conv_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.util.tf_export import keras_export class Pooling1D(Layer): """Pooling layer for arbitrary pooling functions, for 1D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(Pooling1D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 1, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=3) def call(self, inputs): pad_axis = 2 if self.data_format == 'channels_last' else 3 inputs = array_ops.expand_dims(inputs, pad_axis) outputs = self.pool_function( inputs, self.pool_size + (1,), strides=self.strides + (1,), padding=self.padding, data_format=self.data_format) return array_ops.squeeze(outputs, pad_axis) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': steps = input_shape[2] features = input_shape[1] else: steps = input_shape[1] features = input_shape[2] length = conv_utils.conv_output_length(steps, self.pool_size[0], self.padding, self.strides[0]) if self.data_format == 'channels_first': return tensor_shape.TensorShape([input_shape[0], features, length]) else: return tensor_shape.TensorShape([input_shape[0], length, features]) def get_config(self): config = { 'strides': self.strides, 'pool_size': self.pool_size, 'padding': self.padding, 'data_format': self.data_format, } base_config = super(Pooling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D') class MaxPooling1D(Pooling1D): """Max pooling operation for 1D temporal data. Downsamples the input representation by taking the maximum value over the window defined by `pool_size`. The window is shifted by `strides`. The resulting output when using "valid" padding option has a shape of: `output_shape = (input_shape - pool_size + 1) / strides)` The resulting output shape when using the "same" padding option is: `output_shape = input_shape / strides` For example, for strides=1 and padding="valid": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding='valid') >>> max_pool_1d(x) <tf.Tensor: shape=(1, 4, 1), dtype=float32, numpy= array([[[2.], [3.], [4.], [5.]]], dtype=float32)> For example, for strides=2 and padding="valid": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=2, padding='valid') >>> max_pool_1d(x) <tf.Tensor: shape=(1, 2, 1), dtype=float32, numpy= array([[[2.], [4.]]], dtype=float32)> For example, for strides=1 and padding="same": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding='same') >>> max_pool_1d(x) <tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy= array([[[2.], [3.], [4.], [5.], [5.]]], dtype=float32)> Arguments: pool_size: Integer, size of the max pooling window. strides: Integer, or None. Specifies how much the pooling window moves for each pooling step. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). "valid" adds no padding. "same" adds padding such that if the stride is 1, the output shape is the same as the input shape. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`. """ def __init__(self, pool_size=2, strides=None, padding='valid', data_format='channels_last', **kwargs): super(MaxPooling1D, self).__init__( functools.partial(backend.pool2d, pool_mode='max'), pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D') class AveragePooling1D(Pooling1D): """Average pooling for temporal data. Arguments: pool_size: Integer, size of the average pooling windows. strides: Integer, or None. Factor by which to downscale. E.g. 2 will halve the input. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`. """ def __init__(self, pool_size=2, strides=None, padding='valid', data_format='channels_last', **kwargs): super(AveragePooling1D, self).__init__( functools.partial(backend.pool2d, pool_mode='avg'), pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class Pooling2D(Layer): """Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images). This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format=None, name=None, **kwargs): super(Pooling2D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) def call(self, inputs): if self.data_format == 'channels_last': pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) else: pool_shape = (1, 1) + self.pool_size strides = (1, 1) + self.strides outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=conv_utils.convert_data_format(self.data_format, 4)) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] else: rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols]) else: return tensor_shape.TensorShape( [input_shape[0], rows, cols, input_shape[3]]) def get_config(self): config = { 'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format } base_config = super(Pooling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D') class MaxPooling2D(Pooling2D): """Max pooling operation for 2D spatial data. Downsamples the input representation by taking the maximum value over the window defined by `pool_size` for each dimension along the features axis. The window is shifted by `strides` in each dimension. The resulting output when using "valid" padding option has a shape(number of rows or columns) of: `output_shape = (input_shape - pool_size + 1) / strides)` The resulting output shape when using the "same" padding option is: `output_shape = input_shape / strides` For example, for stride=(1,1) and padding="valid": >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='valid') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[5.], [6.]], [[8.], [9.]]]], dtype=float32)> For example, for stride=(2,2) and padding="valid": >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = tf.reshape(x, [1, 3, 4, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='valid') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 2, 3, 1), dtype=float32, numpy= array([[[[ 6.], [ 7.], [ 8.]], [[10.], [11.], [12.]]]], dtype=float32)> Usage Example: >>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]], ... [[2.], [2.], [3.], [2.]], ... [[4.], [1.], [1.], [1.]], ... [[2.], [2.], [1.], [4.]]]]) >>> output = tf.constant([[[[1], [0]], ... [[0], [1]]]]) >>> model = tf.keras.models.Sequential() >>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... input_shape=(4,4,1))) >>> model.compile('adam', 'mean_squared_error') >>> model.predict(input_image, steps=1) array([[[[2.], [4.]], [[4.], [4.]]]], dtype=float32) For example, for stride=(1,1) and padding="same": >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='same') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= array([[[[5.], [6.], [6.]], [[8.], [9.], [9.]], [[8.], [9.], [9.]]]], dtype=float32)> Arguments: pool_size: integer or tuple of 2 integers, window size over which to take the maximum. `(2, 2)` will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. Specifies how far the pooling window moves for each pooling step. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). "valid" adds no zero padding. "same" adds padding such that if the stride is 1, the output shape is the same as input shape. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. Returns: A tensor of rank 4 representing the maximum pooled values. See above for output shape. """ def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(MaxPooling2D, self).__init__( nn.max_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D') class AveragePooling2D(Pooling2D): """Average pooling operation for spatial data. Arguments: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). `(2, 2)` will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. """ def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(AveragePooling2D, self).__init__( nn.avg_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class Pooling3D(Layer): """Pooling layer for arbitrary pooling functions, for 3D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(Pooling3D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 3, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=5) def call(self, inputs): pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) if self.data_format == 'channels_first': # TF does not support `channels_first` with 3D pooling operations, # so we must handle this case manually. # TODO(fchollet): remove this when TF pooling is feature-complete. inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1)) outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper()) if self.data_format == 'channels_first': outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3)) return outputs def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': len_dim1 = input_shape[2] len_dim2 = input_shape[3] len_dim3 = input_shape[4] else: len_dim1 = input_shape[1] len_dim2 = input_shape[2] len_dim3 = input_shape[3] len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0], self.padding, self.strides[0]) len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1], self.padding, self.strides[1]) len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2], self.padding, self.strides[2]) if self.data_format == 'channels_first': return tensor_shape.TensorShape( [input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]) else: return tensor_shape.TensorShape( [input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]) def get_config(self): config = { 'pool_size': self.pool_size, 'padding': self.padding, 'strides': self.strides, 'data_format': self.data_format } base_config = super(Pooling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D') class MaxPooling3D(Pooling3D): """Max pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: Tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` """ def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(MaxPooling3D, self).__init__( nn.max_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) @keras_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D') class AveragePooling3D(Pooling3D): """Average pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` """ def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs): super(AveragePooling3D, self).__init__( nn.avg_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs) class GlobalPooling1D(Layer): """Abstract class for different global pooling 1D layers.""" def __init__(self, data_format='channels_last', **kwargs): super(GlobalPooling1D, self).__init__(**kwargs) self.input_spec = InputSpec(ndim=3) self.data_format = conv_utils.normalize_data_format(data_format) self._supports_ragged_inputs = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[2]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAvgPool1D') class GlobalAveragePooling1D(GlobalPooling1D): """Global average pooling operation for temporal data. Examples: >>> input_shape = (2, 3, 4) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling1D()(x) >>> print(y.shape) (2, 4) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(batch_size, steps)` indicating whether a given step should be masked (excluded from the average). Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`. """ def __init__(self, data_format='channels_last', **kwargs): super(GlobalAveragePooling1D, self).__init__(data_format=data_format, **kwargs) self.supports_masking = True def call(self, inputs, mask=None): steps_axis = 1 if self.data_format == 'channels_last' else 2 if mask is not None: mask = math_ops.cast(mask, backend.floatx()) mask = array_ops.expand_dims( mask, 2 if self.data_format == 'channels_last' else 1) inputs *= mask return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum( mask, axis=steps_axis) else: return backend.mean(inputs, axis=steps_axis) def compute_mask(self, inputs, mask=None): return None @keras_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D') class GlobalMaxPooling1D(GlobalPooling1D): """Global max pooling operation for 1D temporal data. Downsamples the input representation by taking the maximum value over the time dimension. For example: >>> x = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) >>> x = tf.reshape(x, [3, 3, 1]) >>> x <tf.Tensor: shape=(3, 3, 1), dtype=float32, numpy= array([[[1.], [2.], [3.]], [[4.], [5.], [6.]], [[7.], [8.], [9.]]], dtype=float32)> >>> max_pool_1d = tf.keras.layers.GlobalMaxPooling1D() >>> max_pool_1d(x) <tf.Tensor: shape=(3, 1), dtype=float32, numpy= array([[3.], [6.], [9.], dtype=float32)> Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`. """ def call(self, inputs): steps_axis = 1 if self.data_format == 'channels_last' else 2 return backend.max(inputs, axis=steps_axis) class GlobalPooling2D(Layer): """Abstract class for different global pooling 2D layers. """ def __init__(self, data_format=None, **kwargs): super(GlobalPooling2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) self._supports_ragged_inputs = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[3]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAvgPool2D') class GlobalAveragePooling2D(GlobalPooling2D): """Global average pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling2D()(x) >>> print(y.shape) (2, 3) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.mean(inputs, axis=[1, 2]) else: return backend.mean(inputs, axis=[2, 3]) @keras_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D') class GlobalMaxPooling2D(GlobalPooling2D): """Global max pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalMaxPool2D()(x) >>> print(y.shape) (2, 3) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.max(inputs, axis=[1, 2]) else: return backend.max(inputs, axis=[2, 3]) class GlobalPooling3D(Layer): """Abstract class for different global pooling 3D layers.""" def __init__(self, data_format=None, **kwargs): super(GlobalPooling3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=5) self._supports_ragged_inputs = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': return tensor_shape.TensorShape([input_shape[0], input_shape[4]]) else: return tensor_shape.TensorShape([input_shape[0], input_shape[1]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {'data_format': self.data_format} base_config = super(GlobalPooling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.GlobalAveragePooling3D', 'keras.layers.GlobalAvgPool3D') class GlobalAveragePooling3D(GlobalPooling3D): """Global Average pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.mean(inputs, axis=[1, 2, 3]) else: return backend.mean(inputs, axis=[2, 3, 4]) @keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D') class GlobalMaxPooling3D(GlobalPooling3D): """Global Max pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`. """ def call(self, inputs): if self.data_format == 'channels_last': return backend.max(inputs, axis=[1, 2, 3]) else: return backend.max(inputs, axis=[2, 3, 4]) # Aliases AvgPool1D = AveragePooling1D MaxPool1D = MaxPooling1D AvgPool2D = AveragePooling2D MaxPool2D = MaxPooling2D AvgPool3D = AveragePooling3D MaxPool3D = MaxPooling3D GlobalMaxPool1D = GlobalMaxPooling1D GlobalMaxPool2D = GlobalMaxPooling2D GlobalMaxPool3D = GlobalMaxPooling3D GlobalAvgPool1D = GlobalAveragePooling1D GlobalAvgPool2D = GlobalAveragePooling2D GlobalAvgPool3D = GlobalAveragePooling3D
gunan/tensorflow
tensorflow/python/keras/layers/pooling.py
Python
apache-2.0
39,575
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None): """ A generator function for listing keys in a bucket. """ more_results = True k = None while more_results: rs = bucket.get_all_keys(prefix=prefix, marker=marker, delimiter=delimiter, headers=headers) for k in rs: yield k if k: marker = k.name more_results= rs.is_truncated class BucketListResultSet: """ A resultset for listing keys within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This transparently handles the results paging from S3 so even if you have many thousands of keys within the bucket you can iterate over all keys in a reasonably efficient manner. """ def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None): self.bucket = bucket self.prefix = prefix self.delimiter = delimiter self.marker = marker self.headers = headers def __iter__(self): return bucket_lister(self.bucket, prefix=self.prefix, delimiter=self.delimiter, marker=self.marker, headers=self.headers) def versioned_bucket_lister(bucket, prefix='', delimiter='', key_marker='', version_id_marker='', headers=None): """ A generator function for listing versions in a bucket. """ more_results = True k = None while more_results: rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, version_id_marker=version_id_marker, delimiter=delimiter, headers=headers, max_keys=999) for k in rs: yield k key_marker = rs.next_key_marker version_id_marker = rs.next_version_id_marker more_results= rs.is_truncated class VersionedBucketListResultSet: """ A resultset for listing versions within a bucket. Uses the bucket_lister generator function and implements the iterator interface. This transparently handles the results paging from S3 so even if you have many thousands of keys within the bucket you can iterate over all keys in a reasonably efficient manner. """ def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', version_id_marker='', headers=None): self.bucket = bucket self.prefix = prefix self.delimiter = delimiter self.key_marker = key_marker self.version_id_marker = version_id_marker self.headers = headers def __iter__(self): return versioned_bucket_lister(self.bucket, prefix=self.prefix, delimiter=self.delimiter, key_marker=self.key_marker, version_id_marker=self.version_id_marker, headers=self.headers) def multipart_upload_lister(bucket, key_marker='', upload_id_marker='', headers=None): """ A generator function for listing multipart uploads in a bucket. """ more_results = True k = None while more_results: rs = bucket.get_all_multipart_uploads(key_marker=key_marker, upload_id_marker=upload_id_marker, headers=headers) for k in rs: yield k key_marker = rs.next_key_marker upload_id_marker = rs.next_upload_id_marker more_results= rs.is_truncated class MultiPartUploadListResultSet: """ A resultset for listing multipart uploads within a bucket. Uses the multipart_upload_lister generator function and implements the iterator interface. This transparently handles the results paging from S3 so even if you have many thousands of uploads within the bucket you can iterate over all keys in a reasonably efficient manner. """ def __init__(self, bucket=None, key_marker='', upload_id_marker='', headers=None): self.bucket = bucket self.key_marker = key_marker self.upload_id_marker = upload_id_marker self.headers = headers def __iter__(self): return multipart_upload_lister(self.bucket, key_marker=self.key_marker, upload_id_marker=self.upload_id_marker, headers=self.headers)
YanjieGao/sparrow
deploy/third_party/boto-2.1.1/boto/s3/bucketlistresultset.py
Python
apache-2.0
5,867
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. '''serializer.py: common python serializer for heron''' from abc import abstractmethod try: import cPickle as pickle except: import pickle import heronpy.api.cloudpickle as cloudpickle class IHeronSerializer(object): """Serializer interface for Heron""" @abstractmethod def initialize(self, config): """Initializes the serializer""" pass @abstractmethod def serialize(self, obj): """Serialize an object :param obj: The object to be serialized :returns: Serialized object as byte string """ pass @abstractmethod def deserialize(self, input_str): """Deserialize an object :param input_str: Serialized object as byte string :returns: Deserialized object """ pass class PythonSerializer(IHeronSerializer): """Default serializer""" def initialize(self, config=None): pass def serialize(self, obj): return cloudpickle.dumps(obj) def deserialize(self, input_str): return pickle.loads(input_str) default_serializer = PythonSerializer()
mycFelix/heron
heronpy/api/serializer.py
Python
apache-2.0
1,874
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This module contains operator to move data from Hive to S3 bucket. """ import bz2 import gzip import os import tempfile from tempfile import NamedTemporaryFile, TemporaryDirectory from typing import Dict, Optional, Union from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.providers.amazon.aws.hooks.s3 import S3Hook from airflow.providers.apache.hive.hooks.hive import HiveCliHook from airflow.utils.compression import uncompress_file from airflow.utils.decorators import apply_defaults class S3ToHiveTransfer(BaseOperator): # pylint: disable=too-many-instance-attributes """ Moves data from S3 to Hive. The operator downloads a file from S3, stores the file locally before loading it into a Hive table. If the ``create`` or ``recreate`` arguments are set to ``True``, a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated. Hive data types are inferred from the cursor's metadata from. Note that the table generated in Hive uses ``STORED AS textfile`` which isn't the most efficient serialization format. If a large amount of data is loaded and/or if the tables gets queried considerably, you may want to use this operator only to stage the data into a temporary table before loading it into its final destination using a ``HiveOperator``. :param s3_key: The key to be retrieved from S3. (templated) :type s3_key: str :param field_dict: A dictionary of the fields name in the file as keys and their Hive types as values :type field_dict: dict :param hive_table: target Hive table, use dot notation to target a specific database. (templated) :type hive_table: str :param delimiter: field delimiter in the file :type delimiter: str :param create: whether to create the table if it doesn't exist :type create: bool :param recreate: whether to drop and recreate the table at every execution :type recreate: bool :param partition: target partition as a dict of partition columns and values. (templated) :type partition: dict :param headers: whether the file contains column names on the first line :type headers: bool :param check_headers: whether the column names on the first line should be checked against the keys of field_dict :type check_headers: bool :param wildcard_match: whether the s3_key should be interpreted as a Unix wildcard pattern :type wildcard_match: bool :param aws_conn_id: source s3 connection :type aws_conn_id: str :param verify: Whether or not to verify SSL certificates for S3 connection. By default SSL certificates are verified. You can provide the following values: - ``False``: do not validate SSL certificates. SSL will still be used (unless use_ssl is False), but SSL certificates will not be verified. - ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses. You can specify this argument if you want to use a different CA cert bundle than the one used by botocore. :type verify: bool or str :param hive_cli_conn_id: destination hive connection :type hive_cli_conn_id: str :param input_compressed: Boolean to determine if file decompression is required to process headers :type input_compressed: bool :param tblproperties: TBLPROPERTIES of the hive table being created :type tblproperties: dict :param select_expression: S3 Select expression :type select_expression: str """ template_fields = ('s3_key', 'partition', 'hive_table') template_ext = () ui_color = '#a0e08c' @apply_defaults def __init__( # pylint: disable=too-many-arguments self, s3_key: str, field_dict: Dict, hive_table: str, delimiter: str = ',', create: bool = True, recreate: bool = False, partition: Optional[Dict] = None, headers: bool = False, check_headers: bool = False, wildcard_match: bool = False, aws_conn_id: str = 'aws_default', verify: Optional[Union[bool, str]] = None, hive_cli_conn_id: str = 'hive_cli_default', input_compressed: bool = False, tblproperties: Optional[Dict] = None, select_expression: Optional[str] = None, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.s3_key = s3_key self.field_dict = field_dict self.hive_table = hive_table self.delimiter = delimiter self.create = create self.recreate = recreate self.partition = partition self.headers = headers self.check_headers = check_headers self.wildcard_match = wildcard_match self.hive_cli_conn_id = hive_cli_conn_id self.aws_conn_id = aws_conn_id self.verify = verify self.input_compressed = input_compressed self.tblproperties = tblproperties self.select_expression = select_expression if (self.check_headers and not (self.field_dict is not None and self.headers)): raise AirflowException("To check_headers provide " + "field_dict and headers") def execute(self, context): # Downloading file from S3 s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify) hive_hook = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id) self.log.info("Downloading S3 file") if self.wildcard_match: if not s3_hook.check_for_wildcard_key(self.s3_key): raise AirflowException(f"No key matches {self.s3_key}") s3_key_object = s3_hook.get_wildcard_key(self.s3_key) else: if not s3_hook.check_for_key(self.s3_key): raise AirflowException(f"The key {self.s3_key} does not exists") s3_key_object = s3_hook.get_key(self.s3_key) _, file_ext = os.path.splitext(s3_key_object.key) if (self.select_expression and self.input_compressed and file_ext.lower() != '.gz'): raise AirflowException("GZIP is the only compression " + "format Amazon S3 Select supports") with TemporaryDirectory(prefix='tmps32hive_') as tmp_dir,\ NamedTemporaryFile(mode="wb", dir=tmp_dir, suffix=file_ext) as f: self.log.info( "Dumping S3 key %s contents to local file %s", s3_key_object.key, f.name ) if self.select_expression: option = {} if self.headers: option['FileHeaderInfo'] = 'USE' if self.delimiter: option['FieldDelimiter'] = self.delimiter input_serialization = {'CSV': option} if self.input_compressed: input_serialization['CompressionType'] = 'GZIP' content = s3_hook.select_key( bucket_name=s3_key_object.bucket_name, key=s3_key_object.key, expression=self.select_expression, input_serialization=input_serialization ) f.write(content.encode("utf-8")) else: s3_key_object.download_fileobj(f) f.flush() if self.select_expression or not self.headers: self.log.info("Loading file %s into Hive", f.name) hive_hook.load_file( f.name, self.hive_table, field_dict=self.field_dict, create=self.create, partition=self.partition, delimiter=self.delimiter, recreate=self.recreate, tblproperties=self.tblproperties) else: # Decompressing file if self.input_compressed: self.log.info("Uncompressing file %s", f.name) fn_uncompressed = uncompress_file(f.name, file_ext, tmp_dir) self.log.info("Uncompressed to %s", fn_uncompressed) # uncompressed file available now so deleting # compressed file to save disk space f.close() else: fn_uncompressed = f.name # Testing if header matches field_dict if self.check_headers: self.log.info("Matching file header against field_dict") header_list = self._get_top_row_as_list(fn_uncompressed) if not self._match_headers(header_list): raise AirflowException("Header check failed") # Deleting top header row self.log.info("Removing header from file %s", fn_uncompressed) headless_file = ( self._delete_top_row_and_compress(fn_uncompressed, file_ext, tmp_dir)) self.log.info("Headless file %s", headless_file) self.log.info("Loading file %s into Hive", headless_file) hive_hook.load_file(headless_file, self.hive_table, field_dict=self.field_dict, create=self.create, partition=self.partition, delimiter=self.delimiter, recreate=self.recreate, tblproperties=self.tblproperties) def _get_top_row_as_list(self, file_name): with open(file_name, 'rt') as file: header_line = file.readline().strip() header_list = header_line.split(self.delimiter) return header_list def _match_headers(self, header_list): if not header_list: raise AirflowException("Unable to retrieve header row from file") field_names = self.field_dict.keys() if len(field_names) != len(header_list): self.log.warning( "Headers count mismatch File headers:\n %s\nField names: \n %s\n", header_list, field_names ) return False test_field_match = [h1.lower() == h2.lower() for h1, h2 in zip(header_list, field_names)] if not all(test_field_match): self.log.warning( "Headers do not match field names File headers:\n %s\nField names: \n %s\n", header_list, field_names ) return False else: return True @staticmethod def _delete_top_row_and_compress( input_file_name, output_file_ext, dest_dir): # When output_file_ext is not defined, file is not compressed open_fn = open if output_file_ext.lower() == '.gz': open_fn = gzip.GzipFile elif output_file_ext.lower() == '.bz2': open_fn = bz2.BZ2File _, fn_output = tempfile.mkstemp(suffix=output_file_ext, dir=dest_dir) with open(input_file_name, 'rb') as f_in, open_fn(fn_output, 'wb') as f_out: f_in.seek(0) next(f_in) for line in f_in: f_out.write(line) return fn_output
spektom/incubator-airflow
airflow/providers/apache/hive/operators/s3_to_hive.py
Python
apache-2.0
12,661
# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import collections import time try: from collections import UserDict as IterableUserDict # Python 3 except ImportError: from UserDict import IterableUserDict # Python 2 import iso8601 from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils import six from nova.compute import task_states from nova.compute import vm_states from nova import context as context_module from nova import exception from nova.i18n import _, _LI, _LW from nova import objects from nova.pci import stats as pci_stats from nova.scheduler import filters from nova.scheduler import weights from nova import utils from nova.virt import hardware host_manager_opts = [ cfg.MultiStrOpt('scheduler_available_filters', default=['nova.scheduler.filters.all_filters'], help='Filter classes available to the scheduler which may ' 'be specified more than once. An entry of ' '"nova.scheduler.filters.all_filters" ' 'maps to all filters included with nova.'), cfg.ListOpt('scheduler_default_filters', default=[ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter', ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_weight_classes', default=['nova.scheduler.weights.all_weighers'], help='Which weight class names to use for weighing hosts'), cfg.BoolOpt('scheduler_tracks_instance_changes', default=True, help='Determines if the Scheduler tracks changes to instances ' 'to help with its filtering decisions.'), ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) LOG = logging.getLogger(__name__) HOST_INSTANCE_SEMAPHORE = "host_instance" class ReadOnlyDict(IterableUserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} if source: self.data.update(source) def __setitem__(self, key, item): raise TypeError() def __delitem__(self, key): raise TypeError() def clear(self): raise TypeError() def pop(self, key, *args): raise TypeError() def popitem(self): raise TypeError() def update(self): raise TypeError() # Representation of a single metric value from a compute node. MetricItem = collections.namedtuple( 'MetricItem', ['value', 'timestamp', 'source']) class HostState(object): """Mutable and immutable information tracked for a host. This is an attempt to remove the ad-hoc data structures previously used and lock down access. """ def __init__(self, host, node, compute=None): self.host = host self.nodename = node # Mutable available resources. # These will change as resources are virtually "consumed". self.total_usable_ram_mb = 0 self.total_usable_disk_gb = 0 self.disk_mb_used = 0 self.free_ram_mb = 0 self.free_disk_mb = 0 self.vcpus_total = 0 self.vcpus_used = 0 self.pci_stats = None self.numa_topology = None # Additional host information from the compute node stats: self.num_instances = 0 self.num_io_ops = 0 # Other information self.host_ip = None self.hypervisor_type = None self.hypervisor_version = None self.hypervisor_hostname = None self.cpu_info = None self.supported_instances = None # Resource oversubscription values for the compute host: self.limits = {} # Generic metrics from compute nodes self.metrics = {} # List of aggregates the host belongs to self.aggregates = [] # Instances on this host self.instances = {} self.updated = None if compute: self.update_from_compute_node(compute) def update_service(self, service): self.service = ReadOnlyDict(service) def _update_metrics_from_compute_node(self, compute): """Update metrics from a ComputeNode object.""" # NOTE(llu): The 'or []' is to avoid json decode failure of None # returned from compute.get, because DB schema allows # NULL in the metrics column metrics = compute.metrics or [] if metrics: metrics = jsonutils.loads(metrics) for metric in metrics: # 'name', 'value', 'timestamp' and 'source' are all required # to be valid keys, just let KeyError happen if any one of # them is missing. But we also require 'name' to be True. name = metric['name'] item = MetricItem(value=metric['value'], timestamp=metric['timestamp'], source=metric['source']) if name: self.metrics[name] = item else: LOG.warning(_LW("Metric name unknown of %r"), item) def update_from_compute_node(self, compute): """Update information about a host from a ComputeNode object.""" if (self.updated and compute.updated_at and self.updated > compute.updated_at): return all_ram_mb = compute.memory_mb # Assume virtual size is all consumed by instances if use qcow2 disk. free_gb = compute.free_disk_gb least_gb = compute.disk_available_least if least_gb is not None: if least_gb > free_gb: # can occur when an instance in database is not on host LOG.warning(_LW("Host %(hostname)s has more disk space than " "database expected " "(%(physical)sgb > %(database)sgb)"), {'physical': least_gb, 'database': free_gb, 'hostname': compute.hypervisor_hostname}) free_gb = min(least_gb, free_gb) free_disk_mb = free_gb * 1024 self.disk_mb_used = compute.local_gb_used * 1024 # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute.free_ram_mb self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute.local_gb self.free_disk_mb = free_disk_mb self.vcpus_total = compute.vcpus self.vcpus_used = compute.vcpus_used self.updated = compute.updated_at self.numa_topology = compute.numa_topology self.pci_stats = pci_stats.PciDeviceStats( compute.pci_device_pools) # All virt drivers report host_ip self.host_ip = compute.host_ip self.hypervisor_type = compute.hypervisor_type self.hypervisor_version = compute.hypervisor_version self.hypervisor_hostname = compute.hypervisor_hostname self.cpu_info = compute.cpu_info if compute.supported_hv_specs: self.supported_instances = [spec.to_list() for spec in compute.supported_hv_specs] else: self.supported_instances = [] # Don't store stats directly in host_state to make sure these don't # overwrite any values, or get overwritten themselves. Store in self so # filters can schedule with them. self.stats = compute.stats or {} # Track number of instances on host self.num_instances = int(self.stats.get('num_instances', 0)) self.num_io_ops = int(self.stats.get('io_workload', 0)) # update metrics self._update_metrics_from_compute_node(compute) def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus now = timeutils.utcnow() # NOTE(sbauza): Objects are UTC tz-aware by default self.updated = now.replace(tzinfo=iso8601.iso8601.Utc()) # Track number of instances on host self.num_instances += 1 pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: pci_requests = objects.InstancePCIRequests \ .from_request_spec_instance_props(pci_requests) pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = hardware.instance_topology_from_instance( instance) instance['numa_topology'] = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if instance['numa_topology']: instance_cells = instance['numa_topology'].cells self.pci_stats.apply_requests(pci_requests, instance_cells) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.UNSHELVING, task_states.RESCUING]: self.num_io_ops += 1 def __repr__(self): return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" % (self.host, self.nodename, self.free_ram_mb, self.free_disk_mb, self.num_io_ops, self.num_instances)) class HostManager(object): """Base HostManager class.""" # Can be overridden in a subclass def host_state_cls(self, host, node, **kwargs): return HostState(host, node, **kwargs) def __init__(self): self.host_state_map = {} self.filter_handler = filters.HostFilterHandler() filter_classes = self.filter_handler.get_matching_classes( CONF.scheduler_available_filters) self.filter_cls_map = {cls.__name__: cls for cls in filter_classes} self.filter_obj_map = {} self.default_filters = self._choose_host_filters(self._load_filters()) self.weight_handler = weights.HostWeightHandler() weigher_classes = self.weight_handler.get_matching_classes( CONF.scheduler_weight_classes) self.weighers = [cls() for cls in weigher_classes] # Dict of aggregates keyed by their ID self.aggs_by_id = {} # Dict of set of aggregate IDs keyed by the name of the host belonging # to those aggregates self.host_aggregates_map = collections.defaultdict(set) self._init_aggregates() self.tracks_instance_changes = CONF.scheduler_tracks_instance_changes # Dict of instances and status, keyed by host self._instance_info = {} if self.tracks_instance_changes: self._init_instance_info() def _load_filters(self): return CONF.scheduler_default_filters def _init_aggregates(self): elevated = context_module.get_admin_context() aggs = objects.AggregateList.get_all(elevated) for agg in aggs: self.aggs_by_id[agg.id] = agg for host in agg.hosts: self.host_aggregates_map[host].add(agg.id) def update_aggregates(self, aggregates): """Updates internal HostManager information about aggregates.""" if isinstance(aggregates, (list, objects.AggregateList)): for agg in aggregates: self._update_aggregate(agg) else: self._update_aggregate(aggregates) def _update_aggregate(self, aggregate): self.aggs_by_id[aggregate.id] = aggregate for host in aggregate.hosts: self.host_aggregates_map[host].add(aggregate.id) # Refreshing the mapping dict to remove all hosts that are no longer # part of the aggregate for host in self.host_aggregates_map: if (aggregate.id in self.host_aggregates_map[host] and host not in aggregate.hosts): self.host_aggregates_map[host].remove(aggregate.id) def delete_aggregate(self, aggregate): """Deletes internal HostManager information about a specific aggregate. """ if aggregate.id in self.aggs_by_id: del self.aggs_by_id[aggregate.id] for host in aggregate.hosts: if aggregate.id in self.host_aggregates_map[host]: self.host_aggregates_map[host].remove(aggregate.id) def _init_instance_info(self): """Creates the initial view of instances for all hosts. As this initial population of instance information may take some time, we don't wish to block the scheduler's startup while this completes. The async method allows us to simply mock out the _init_instance_info() method in tests. """ def _async_init_instance_info(): context = context_module.get_admin_context() LOG.debug("START:_async_init_instance_info") self._instance_info = {} compute_nodes = objects.ComputeNodeList.get_all(context).objects LOG.debug("Total number of compute nodes: %s", len(compute_nodes)) # Break the queries into batches of 10 to reduce the total number # of calls to the DB. batch_size = 10 start_node = 0 end_node = batch_size while start_node <= len(compute_nodes): curr_nodes = compute_nodes[start_node:end_node] start_node += batch_size end_node += batch_size filters = {"host": [curr_node.host for curr_node in curr_nodes]} result = objects.InstanceList.get_by_filters(context, filters) instances = result.objects LOG.debug("Adding %s instances for hosts %s-%s", len(instances), start_node, end_node) for instance in instances: host = instance.host if host not in self._instance_info: self._instance_info[host] = {"instances": {}, "updated": False} inst_dict = self._instance_info[host] inst_dict["instances"][instance.uuid] = instance # Call sleep() to cooperatively yield time.sleep(0) LOG.debug("END:_async_init_instance_info") # Run this async so that we don't block the scheduler start-up utils.spawn_n(_async_init_instance_info) def _choose_host_filters(self, filter_cls_names): """Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: if filter_name not in self.filter_obj_map: if filter_name not in self.filter_cls_map: bad_filters.append(filter_name) continue filter_cls = self.filter_cls_map[filter_name] self.filter_obj_map[filter_name] = filter_cls() good_filters.append(self.filter_obj_map[filter_name]) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None, index=0): """Filter hosts and return only ones passing all filters.""" def _strip_ignore_hosts(host_map, hosts_to_ignore): ignored_hosts = [] for host in hosts_to_ignore: for (hostname, nodename) in list(host_map.keys()): if host == hostname: del host_map[(hostname, nodename)] ignored_hosts.append(host) ignored_hosts_str = ', '.join(ignored_hosts) msg = _('Host filter ignoring hosts: %s') LOG.info(msg % ignored_hosts_str) def _match_forced_hosts(host_map, hosts_to_force): forced_hosts = [] for (hostname, nodename) in list(host_map.keys()): if hostname not in hosts_to_force: del host_map[(hostname, nodename)] else: forced_hosts.append(hostname) if host_map: forced_hosts_str = ', '.join(forced_hosts) msg = _('Host filter forcing available hosts to %s') else: forced_hosts_str = ', '.join(hosts_to_force) msg = _("No hosts matched due to not matching " "'force_hosts' value of '%s'") LOG.info(msg % forced_hosts_str) def _match_forced_nodes(host_map, nodes_to_force): forced_nodes = [] for (hostname, nodename) in list(host_map.keys()): if nodename not in nodes_to_force: del host_map[(hostname, nodename)] else: forced_nodes.append(nodename) if host_map: forced_nodes_str = ', '.join(forced_nodes) msg = _('Host filter forcing available nodes to %s') else: forced_nodes_str = ', '.join(nodes_to_force) msg = _("No nodes matched due to not matching " "'force_nodes' value of '%s'") LOG.info(msg % forced_nodes_str) if filter_class_names is None: filters = self.default_filters else: filters = self._choose_host_filters(filter_class_names) ignore_hosts = filter_properties.get('ignore_hosts', []) force_hosts = filter_properties.get('force_hosts', []) force_nodes = filter_properties.get('force_nodes', []) if ignore_hosts or force_hosts or force_nodes: # NOTE(deva): we can't assume "host" is unique because # one host may have many nodes. name_to_cls_map = {(x.host, x.nodename): x for x in hosts} if ignore_hosts: _strip_ignore_hosts(name_to_cls_map, ignore_hosts) if not name_to_cls_map: return [] # NOTE(deva): allow force_hosts and force_nodes independently if force_hosts: _match_forced_hosts(name_to_cls_map, force_hosts) if force_nodes: _match_forced_nodes(name_to_cls_map, force_nodes) if force_hosts or force_nodes: # NOTE(deva): Skip filters when forcing host or node if name_to_cls_map: return name_to_cls_map.values() hosts = six.itervalues(name_to_cls_map) return self.filter_handler.get_filtered_objects(filters, hosts, filter_properties, index) def get_weighed_hosts(self, hosts, weight_properties): """Weigh the hosts.""" return self.weight_handler.get_weighed_objects(self.weighers, hosts, weight_properties) def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ service_refs = {service.host: service for service in objects.ServiceList.get_by_binary( context, 'nova-compute')} # Get resource usage across the available compute nodes: compute_nodes = objects.ComputeNodeList.get_all(context) seen_nodes = set() for compute in compute_nodes: service = service_refs.get(compute.host) if not service: LOG.warning(_LW( "No compute service record found for host %(host)s"), {'host': compute.host}) continue host = compute.host node = compute.hypervisor_hostname state_key = (host, node) host_state = self.host_state_map.get(state_key) if host_state: host_state.update_from_compute_node(compute) else: host_state = self.host_state_cls(host, node, compute=compute) self.host_state_map[state_key] = host_state # We force to update the aggregates info each time a new request # comes in, because some changes on the aggregates could have been # happening after setting this field for the first time host_state.aggregates = [self.aggs_by_id[agg_id] for agg_id in self.host_aggregates_map[ host_state.host]] host_state.update_service(dict(service)) self._add_instance_info(context, compute, host_state) seen_nodes.add(state_key) # remove compute nodes from host_state_map if they are not active dead_nodes = set(self.host_state_map.keys()) - seen_nodes for state_key in dead_nodes: host, node = state_key LOG.info(_LI("Removing dead compute node %(host)s:%(node)s " "from scheduler"), {'host': host, 'node': node}) del self.host_state_map[state_key] return six.itervalues(self.host_state_map) def _add_instance_info(self, context, compute, host_state): """Adds the host instance info to the host_state object. Some older compute nodes may not be sending instance change updates to the Scheduler; other sites may disable this feature for performance reasons. In either of these cases, there will either be no information for the host, or the 'updated' value for that host dict will be False. In those cases, we need to grab the current InstanceList instead of relying on the version in _instance_info. """ host_name = compute.host host_info = self._instance_info.get(host_name) if host_info and host_info.get("updated"): inst_dict = host_info["instances"] else: # Host is running old version, or updates aren't flowing. inst_list = objects.InstanceList.get_by_host(context, host_name) inst_dict = {instance.uuid: instance for instance in inst_list.objects} host_state.instances = inst_dict def _recreate_instance_info(self, context, host_name): """Get the InstanceList for the specified host, and store it in the _instance_info dict. """ instances = objects.InstanceList.get_by_host(context, host_name) inst_dict = {instance.uuid: instance for instance in instances} host_info = self._instance_info[host_name] = {} host_info["instances"] = inst_dict host_info["updated"] = False @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def update_instance_info(self, context, host_name, instance_info): """Receives an InstanceList object from a compute node. This method receives information from a compute node when it starts up, or when its instances have changed, and updates its view of hosts and instances with it. """ host_info = self._instance_info.get(host_name) if host_info: inst_dict = host_info.get("instances") for instance in instance_info.objects: # Overwrite the entry (if any) with the new info. inst_dict[instance.uuid] = instance host_info["updated"] = True else: instances = instance_info.objects if len(instances) > 1: # This is a host sending its full instance list, so use it. host_info = self._instance_info[host_name] = {} host_info["instances"] = {instance.uuid: instance for instance in instances} host_info["updated"] = True else: self._recreate_instance_info(context, host_name) LOG.info(_LI("Received an update from an unknown host '%s'. " "Re-created its InstanceList."), host_name) @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def delete_instance_info(self, context, host_name, instance_uuid): """Receives the UUID from a compute node when one of its instances is terminated. The instance in the local view of the host's instances is removed. """ host_info = self._instance_info.get(host_name) if host_info: inst_dict = host_info["instances"] # Remove the existing Instance object, if any inst_dict.pop(instance_uuid, None) host_info["updated"] = True else: self._recreate_instance_info(context, host_name) LOG.info(_LI("Received a delete update from an unknown host '%s'. " "Re-created its InstanceList."), host_name) @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def sync_instance_info(self, context, host_name, instance_uuids): """Receives the uuids of the instances on a host. This method is periodically called by the compute nodes, which send a list of all the UUID values for the instances on that node. This is used by the scheduler's HostManager to detect when its view of the compute node's instances is out of sync. """ host_info = self._instance_info.get(host_name) if host_info: local_set = set(host_info["instances"].keys()) compute_set = set(instance_uuids) if not local_set == compute_set: self._recreate_instance_info(context, host_name) LOG.info(_LI("The instance sync for host '%s' did not match. " "Re-created its InstanceList."), host_name) return host_info["updated"] = True LOG.info(_LI("Successfully synced instances from host '%s'."), host_name) else: self._recreate_instance_info(context, host_name) LOG.info(_LI("Received a sync request from an unknown host '%s'. " "Re-created its InstanceList."), host_name)
LoHChina/nova
nova/scheduler/host_manager.py
Python
apache-2.0
28,870
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import os.path import tensorflow.python.platform from tensorflow.python.framework import test_util from tensorflow.python.platform import gfile from tensorflow.python.platform import googletest from tensorflow.python.summary import event_accumulator from tensorflow.python.summary import event_multiplexer def _AddEvents(path): if not gfile.IsDirectory(path): gfile.MakeDirs(path) fpath = os.path.join(path, 'hypothetical.tfevents.out') with gfile.GFile(fpath, 'w'): return fpath def _CreateCleanDirectory(path): if gfile.IsDirectory(path): gfile.DeleteRecursively(path) gfile.MkDir(path) class _FakeAccumulator(object): def __init__(self, path): self._path = path self.reload_called = False def Tags(self): return {event_accumulator.IMAGES: ['im1', 'im2'], event_accumulator.HISTOGRAMS: ['hst1', 'hst2'], event_accumulator.COMPRESSED_HISTOGRAMS: ['cmphst1', 'cmphst2'], event_accumulator.SCALARS: ['sv1', 'sv2']} def Scalars(self, tag_name): if tag_name not in self.Tags()[event_accumulator.SCALARS]: raise KeyError return ['%s/%s' % (self._path, tag_name)] def Histograms(self, tag_name): if tag_name not in self.Tags()[event_accumulator.HISTOGRAMS]: raise KeyError return ['%s/%s' % (self._path, tag_name)] def CompressedHistograms(self, tag_name): if tag_name not in self.Tags()[event_accumulator.COMPRESSED_HISTOGRAMS]: raise KeyError return ['%s/%s' % (self._path, tag_name)] def Images(self, tag_name): if tag_name not in self.Tags()[event_accumulator.IMAGES]: raise KeyError return ['%s/%s' % (self._path, tag_name)] def Reload(self): self.reload_called = True def _GetFakeAccumulator(path, size_guidance): # pylint: disable=unused-argument return _FakeAccumulator(path) class EventMultiplexerTest(test_util.TensorFlowTestCase): def setUp(self): super(EventMultiplexerTest, self).setUp() event_accumulator.EventAccumulator = _GetFakeAccumulator def testEmptyLoader(self): x = event_multiplexer.EventMultiplexer() self.assertEqual(x.Runs(), {}) def testRunNamesRespected(self): x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'}) self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'run2']) self.assertEqual(x._GetAccumulator('run1')._path, 'path1') self.assertEqual(x._GetAccumulator('run2')._path, 'path2') def testReload(self): x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'}) self.assertFalse(x._GetAccumulator('run1').reload_called) self.assertFalse(x._GetAccumulator('run2').reload_called) x.Reload() self.assertTrue(x._GetAccumulator('run1').reload_called) self.assertTrue(x._GetAccumulator('run2').reload_called) def testScalars(self): x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'}) run1_actual = x.Scalars('run1', 'sv1') run1_expected = ['path1/sv1'] self.assertEqual(run1_expected, run1_actual) def testExceptions(self): x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'}) with self.assertRaises(KeyError): x.Scalars('sv1', 'xxx') def testInitialization(self): x = event_multiplexer.EventMultiplexer() self.assertEqual(x.Runs(), {}) x = event_multiplexer.EventMultiplexer({'run1': 'path1', 'run2': 'path2'}) self.assertItemsEqual(x.Runs(), ['run1', 'run2']) self.assertEqual(x._GetAccumulator('run1')._path, 'path1') self.assertEqual(x._GetAccumulator('run2')._path, 'path2') def testAddRunsFromDirectory(self): x = event_multiplexer.EventMultiplexer() tmpdir = self.get_temp_dir() join = os.path.join fakedir = join(tmpdir, 'fake_accumulator_directory') realdir = join(tmpdir, 'real_accumulator_directory') self.assertEqual(x.Runs(), {}) x.AddRunsFromDirectory(fakedir) self.assertEqual(x.Runs(), {}, 'loading fakedir had no effect') _CreateCleanDirectory(realdir) x.AddRunsFromDirectory(realdir) self.assertEqual(x.Runs(), {}, 'loading empty directory had no effect') path1 = join(realdir, 'path1') gfile.MkDir(path1) x.AddRunsFromDirectory(realdir) self.assertEqual(x.Runs(), {}, 'creating empty subdirectory had no effect') _AddEvents(path1) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['path1'], 'loaded run: path1') loader1 = x._GetAccumulator('path1') self.assertEqual(loader1._path, path1, 'has the correct path') path2 = join(realdir, 'path2') _AddEvents(path2) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['path1', 'path2']) self.assertEqual(x._GetAccumulator('path1'), loader1, 'loader1 not regenerated') path2_2 = join(path2, 'path2') _AddEvents(path2_2) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['path1', 'path2', 'path2/path2']) self.assertEqual(x._GetAccumulator('path2/path2')._path, path2_2, 'loader2 path correct') def testAddRunsFromDirectoryThatContainsEvents(self): x = event_multiplexer.EventMultiplexer() tmpdir = self.get_temp_dir() join = os.path.join realdir = join(tmpdir, 'event_containing_directory') _CreateCleanDirectory(realdir) self.assertEqual(x.Runs(), {}) _AddEvents(realdir) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['.']) subdir = join(realdir, 'subdir') _AddEvents(subdir) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['.', 'subdir']) def testAddRunsFromDirectoryWithRunNames(self): x = event_multiplexer.EventMultiplexer() tmpdir = self.get_temp_dir() join = os.path.join realdir = join(tmpdir, 'event_containing_directory') _CreateCleanDirectory(realdir) self.assertEqual(x.Runs(), {}) _AddEvents(realdir) x.AddRunsFromDirectory(realdir, 'foo') self.assertItemsEqual(x.Runs(), ['foo/.']) subdir = join(realdir, 'subdir') _AddEvents(subdir) x.AddRunsFromDirectory(realdir, 'foo') self.assertItemsEqual(x.Runs(), ['foo/.', 'foo/subdir']) def testAddRunsFromDirectoryWalksTree(self): x = event_multiplexer.EventMultiplexer() tmpdir = self.get_temp_dir() join = os.path.join realdir = join(tmpdir, 'event_containing_directory') _CreateCleanDirectory(realdir) _AddEvents(realdir) sub = join(realdir, 'subdirectory') sub1 = join(sub, '1') sub2 = join(sub, '2') sub1_1 = join(sub1, '1') _AddEvents(sub1) _AddEvents(sub2) _AddEvents(sub1_1) x.AddRunsFromDirectory(realdir) self.assertItemsEqual(x.Runs(), ['.', 'subdirectory/1', 'subdirectory/2', 'subdirectory/1/1']) def testAddRunsFromDirectoryThrowsException(self): x = event_multiplexer.EventMultiplexer() tmpdir = self.get_temp_dir() filepath = _AddEvents(tmpdir) with self.assertRaises(ValueError): x.AddRunsFromDirectory(filepath) def testAddRun(self): x = event_multiplexer.EventMultiplexer() x.AddRun('run1_path', 'run1') run1 = x._GetAccumulator('run1') self.assertEqual(sorted(x.Runs().keys()), ['run1']) self.assertEqual(run1._path, 'run1_path') x.AddRun('run1_path', 'run1') self.assertEqual(run1, x._GetAccumulator('run1'), 'loader not recreated') x.AddRun('run2_path', 'run1') new_run1 = x._GetAccumulator('run1') self.assertEqual(new_run1._path, 'run2_path') self.assertNotEqual(run1, new_run1) x.AddRun('runName3') self.assertItemsEqual(sorted(x.Runs().keys()), ['run1', 'runName3']) self.assertEqual(x._GetAccumulator('runName3')._path, 'runName3') def testAddRunMaintainsLoading(self): x = event_multiplexer.EventMultiplexer() x.Reload() x.AddRun('run1') x.AddRun('run2') self.assertTrue(x._GetAccumulator('run1').reload_called) self.assertTrue(x._GetAccumulator('run2').reload_called) if __name__ == '__main__': googletest.main()
lukas-krecan/tensorflow
tensorflow/python/summary/event_multiplexer_test.py
Python
apache-2.0
8,901
""" AWS SQS platform for notify component. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.aws_sqs/ """ import logging import json import voluptuous as vol from homeassistant.const import ( CONF_PLATFORM, CONF_NAME) from homeassistant.components.notify import ( ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ["boto3==1.9.16"] CONF_REGION = 'region_name' CONF_ACCESS_KEY_ID = 'aws_access_key_id' CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key' CONF_PROFILE_NAME = 'profile_name' ATTR_CREDENTIALS = 'credentials' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default='us-east-1'): cv.string, vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string, vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string, vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string, }) def get_service(hass, config, discovery_info=None): """Get the AWS SQS notification service.""" import boto3 aws_config = config.copy() del aws_config[CONF_PLATFORM] del aws_config[CONF_NAME] profile = aws_config.get(CONF_PROFILE_NAME) if profile is not None: boto3.setup_default_session(profile_name=profile) del aws_config[CONF_PROFILE_NAME] sqs_client = boto3.client("sqs", **aws_config) return AWSSQS(sqs_client) class AWSSQS(BaseNotificationService): """Implement the notification service for the AWS SQS service.""" def __init__(self, sqs_client): """Initialize the service.""" self.client = sqs_client def send_message(self, message="", **kwargs): """Send notification to specified SQS ARN.""" targets = kwargs.get(ATTR_TARGET) if not targets: _LOGGER.info("At least 1 target is required") return for target in targets: cleaned_kwargs = dict((k, v) for k, v in kwargs.items() if v) message_body = {"message": message} message_body.update(cleaned_kwargs) message_attributes = {} for key, val in cleaned_kwargs.items(): message_attributes[key] = {"StringValue": json.dumps(val), "DataType": "String"} self.client.send_message(QueueUrl=target, MessageBody=json.dumps(message_body), MessageAttributes=message_attributes)
HydrelioxGitHub/home-assistant
homeassistant/components/notify/aws_sqs.py
Python
apache-2.0
2,607
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2011 Openstack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ redhat/centos helper module """
prometheanfire/openstack-guest-agents-unix
commands/redhat/__init__.py
Python
apache-2.0
730
"""Hierarchy of warnings to feed back to users.""" # ============================================================================= # CONTENTS # ----------------------------------------------------------------------------- # abdt_userwarning # # Public Classes: # Base # UsedDefaultTestPlan # UnknownReviewers # SelfReviewer # LargeDiff # # ----------------------------------------------------------------------------- # (this contents block is generated, edits will be lost) # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function class Base(object): def __init__(self, message): super(Base, self).__init__() self.message = message def __repr__(self): return 'abdt_userwarning.Warning({})'.format(repr(self.message)) class UsedDefaultTestPlan(Base): def __init__(self, default_message): super(UsedDefaultTestPlan, self).__init__( 'used default message: {}'.format(default_message)) self.default_message = default_message class UnknownReviewers(Base): def __init__(self, unknown_reviewers, commit_message): super(UnknownReviewers, self).__init__( 'some specified reviewers are unknown: {}'.format( unknown_reviewers)) self.unknown_reviewers = unknown_reviewers self.commit_message = commit_message class SelfReviewer(Base): def __init__(self, user, commit_message): super(SelfReviewer, self).__init__( 'you cannot review your own change: {}\n{}'.format( user, commit_message)) self.user = user self.commit_message = commit_message class LargeDiff(Base): def __init__(self, diff_result): super(LargeDiff, self).__init__('large diff') self.diff_result = diff_result # ----------------------------------------------------------------------------- # Copyright (C) 2014 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
kjedruczyk/phabricator-tools
py/abd/abdt_userwarning.py
Python
apache-2.0
2,673
from unittest.mock import Mock from app.master.atom import Atom, AtomState from app.master.job_config import JobConfig from app.master.subjob import Subjob from app.project_type.project_type import ProjectType from test.framework.base_unit_test_case import BaseUnitTestCase class TestSubjob(BaseUnitTestCase): def setUp(self): super().setUp() self._job_config_command = 'fake command' self._subjob = Subjob( build_id=12, subjob_id=34, project_type=Mock(spec_set=ProjectType), job_config=Mock(spec=JobConfig, command=self._job_config_command), atoms=[ Atom( 'export BREAKFAST="pancakes";', expected_time=23.4, actual_time=56.7, exit_code=1, state=AtomState.NOT_STARTED, atom_id=0, ), Atom( 'export BREAKFAST="cereal";', expected_time=89.0, actual_time=24.6, exit_code=0, state=AtomState.NOT_STARTED, atom_id=1, ), ], ) def test_subjob_constructor_sets_subjob_id_on_atoms(self): atoms = [Mock(), Mock()] Subjob(build_id=1, subjob_id=4, project_type=Mock(), job_config=Mock(), atoms=atoms) for atom in atoms: self.assertEqual(atom.subjob_id, 4) def test_api_representation_matches_expected(self): actual_api_repr = self._subjob.api_representation() expected_api_repr = { 'id': 34, 'command': self._job_config_command, 'slave': None, 'atoms': [ { 'id': 0, 'command_string': 'export BREAKFAST="pancakes";', 'expected_time': 23.4, 'actual_time': 56.7, 'exit_code': 1, 'state': 'NOT_STARTED', 'subjob_id': 34 }, { 'id': 1, 'command_string': 'export BREAKFAST="cereal";', 'expected_time': 89.0, 'actual_time': 24.6, 'exit_code': 0, 'state': 'NOT_STARTED', 'subjob_id': 34 }, ] } self.assertEqual(actual_api_repr, expected_api_repr, 'Actual api representation should match expected.') def _assert_atoms_are_in_state(self, api_repr, state_str): for atom_dict in api_repr['atoms']: self.assertEqual(atom_dict['state'], state_str) def test_mark_in_progress_marks_all_atoms_in_progress(self): self._subjob.mark_in_progress(None) actual_api_repr = self._subjob.api_representation() self._assert_atoms_are_in_state(actual_api_repr, 'IN_PROGRESS') def test_mark_completed_marks_all_atoms_completed(self): self._subjob.mark_completed() actual_api_repr = self._subjob.api_representation() self._assert_atoms_are_in_state(actual_api_repr, 'COMPLETED')
box/ClusterRunner
test/unit/master/test_subjob.py
Python
apache-2.0
3,214
def test_helloWorld(): assert 1==1
AnsgarSchmidt/sensomatic
web/Test.py
Python
apache-2.0
38
from django.contrib import admin from .models import Tag class TagAdmin(admin.ModelAdmin): list_display = ('tag_text', 'num_addons', 'created', 'enable_for_random_shelf') ordering = ('-created',) search_fields = ('^tag_text',) readonly_fields = ('num_addons', 'created') list_editable = ('enable_for_random_shelf',) admin.site.register(Tag, TagAdmin)
mozilla/addons-server
src/olympia/tags/admin.py
Python
bsd-3-clause
376
from lib.common import helpers class Stager: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'pkg', 'Author': ['@xorrior'], 'Description': ('Generates a pkg installer. The installer will copy a custom (empty) application to the /Applications folder. The postinstall script will execute an EmPyre launcher.'), 'Comments': [ '' ] } # any options needed by the stager, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Listener' : { 'Description' : 'Listener to generate stager for.', 'Required' : True, 'Value' : '' }, 'Language' : { 'Description' : 'Language of the stager to generate.', 'Required' : True, 'Value' : 'python' }, 'AppIcon' : { 'Description' : 'Path to AppIcon.icns file. The size should be 16x16,32x32,128x128, or 256x256. Defaults to none.', 'Required' : False, 'Value' : '' }, 'AppName' : { 'Description' : 'Name of the Application Bundle. This change will reflect in the Info.plist and the name of the binary in Contents/MacOS/.', 'Required' : False, 'Value' : '' }, 'OutFile' : { 'Description' : 'File to write dmg volume to.', 'Required' : True, 'Value' : '/tmp/out.pkg' }, 'SafeChecks' : { 'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.', 'Required' : True, 'Value' : 'True' }, 'UserAgent' : { 'Description' : 'User-agent string to use for the staging request (default, none, or other).', 'Required' : False, 'Value' : 'default' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): # extract all of our options language = self.options['Language']['Value'] listenerName = self.options['Listener']['Value'] userAgent = self.options['UserAgent']['Value'] SafeChecks = self.options['SafeChecks']['Value'] icnsPath = self.options['AppIcon']['Value'] AppName = self.options['AppName']['Value'] arch = 'x64' # generate the launcher code launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, userAgent=userAgent, safeChecks=SafeChecks) if launcher == "": print helpers.color("[!] Error in launcher command generation.") return "" else: if AppName == '': AppName = "Update" Disarm=True launcherCode = launcher.strip('echo').strip(' | python &').strip("\"") ApplicationZip = self.mainMenu.stagers.generate_appbundle(launcherCode=launcherCode,Arch=arch,icon=icnsPath,AppName=AppName,disarm=Disarm) pkginstaller = self.mainMenu.stagers.generate_pkg(launcher=launcher,bundleZip=ApplicationZip,AppName=AppName) return pkginstaller
Hackplayers/Empire-mod-Hpys-tests
lib/stagers/osx/pkg.py
Python
bsd-3-clause
3,867
from __future__ import (absolute_import, division, print_function, unicode_literals) from ... import NDData, NDIOMixin, NDDataRef # Alias NDDataAllMixins in case this will be renamed ... :-) NDDataIO = NDDataRef def test_simple_write_read(tmpdir): ndd = NDDataIO([1, 2, 3]) assert hasattr(ndd, 'read') assert hasattr(ndd, 'write')
tbabej/astropy
astropy/nddata/mixins/tests/test_ndio.py
Python
bsd-3-clause
372
########################################################################## # # Copyright (c) 2015, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import functools import IECore import Gaffer import GafferUI import GafferImage ## \todo Add buttons for removing existing ChannelPlugs, and for adding # extras. This is probably best done as part of a concerted effort to # support layers everywhere (we can already compute arbitrary numbers of # named channels, we just need a convention and a UI for presenting this # as layers). Gaffer.Metadata.registerNode( GafferImage.Shuffle, "description", """ Shuffles data between image channels, for instance by copying R into G or a constant white into A. """, plugs = { "channels" : [ "description", """ The definition of the shuffling to be performed - an arbitrary number of channel edits can be made by adding Shuffle.ChannelPlugs as children of this plug. """, "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget", ], "channels.*.out" : [ "plugValueWidget:type", "GafferImageUI.ChannelPlugValueWidget", "channelPlugValueWidget:allowNewChannels", True, ], "channels.*.in" : [ "plugValueWidget:type", "GafferImageUI.ChannelPlugValueWidget", "channelPlugValueWidget:extraChannels", IECore.StringVectorData( [ "__white", "__black" ] ), "channelPlugValueWidget:extraChannelLabels", IECore.StringVectorData( [ "White", "Black" ] ), ], } ) def nodeMenuCreateCommand() : result = GafferImage.Shuffle() for channel in ( "R", "G", "B", "A" ) : result["channels"].addChild( result.ChannelPlug( channel, channel ) ) return result class _ShuffleChannelPlugValueWidget( GafferUI.PlugValueWidget ) : def __init__( self, plug, **kw ) : self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) GafferUI.PlugValueWidget.__init__( self, self.__row, plug, **kw ) with self.__row : GafferUI.PlugValueWidget.create( plug["out"] ) GafferUI.Image( "shuffleArrow.png" ) GafferUI.PlugValueWidget.create( plug["in"] ) def setPlug( self, plug ) : GafferUI.PlugValueWidget.setPlug( self, plug ) self.__row[0].setPlug( plug[0] ) self.__row[2].setPlug( plug[1] ) def childPlugValueWidget( self, childPlug, lazy=True ) : for w in self.__row[0], self.__row[2] : if childPlug.isSame( w.getPlug() ) : return w return None def hasLabel( self ) : return True def _updateFromPlug( self ) : pass GafferUI.PlugValueWidget.registerType( GafferImage.Shuffle.ChannelPlug, _ShuffleChannelPlugValueWidget )
appleseedhq/gaffer
python/GafferImageUI/ShuffleUI.py
Python
bsd-3-clause
4,256
# -*- coding: utf-8 -*- from __future__ import division, absolute_import, unicode_literals from django.contrib import admin from .models import CaptionedFile, UncaptionedFile admin.site.register(CaptionedFile) admin.site.register(UncaptionedFile)
hnakamur/django-admin2
example/files/admin.py
Python
bsd-3-clause
251
########################################################################## # # Copyright (c) 2011, John Haddon. All rights reserved. # Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import IECore import Gaffer import GafferUI import GafferTest import GafferUITest class NodeGadgetTest( GafferUITest.TestCase ) : def test( self ) : n = GafferTest.AddNode() g = GafferUI.NodeGadget.create( n ) self.assertEqual( n, g.node() ) self.assert_( g.nodule( n["op1"] ) ) self.assert_( g.nodule( n["op2"] ) ) self.assert_( g.nodule( n["sum"] ) ) def testDynamicPlugs( self ) : n = GafferTest.AddNode() g = GafferUI.NodeGadget.create( n ) self.assertEqual( n, g.node() ) self.assert_( g.nodule( n["op1"] ) ) self.assert_( g.nodule( n["op2"] ) ) self.assert_( g.nodule( n["sum"] ) ) d = Gaffer.FloatPlug() n["d"] = d self.assert_( g.nodule( n["op1"] ) ) self.assert_( g.nodule( n["op2"] ) ) self.assert_( g.nodule( n["sum"] ) ) self.assert_( g.nodule( d ) ) n.removeChild( d ) self.assert_( g.nodule( n["op1"] ) ) self.assert_( g.nodule( n["op2"] ) ) self.assert_( g.nodule( n["sum"] ) ) self.assert_( not g.nodule( d ) ) def testFactoryRegistration( self ) : class MyNode( Gaffer.Node ) : def __init__( self ) : Gaffer.Node.__init__( self ) IECore.registerRunTimeTyped( MyNode ) def creator( node ) : result = GafferUI.StandardNodeGadget( node ) result.getContents().setText( "lovinglyHandCraftedInCreator" ) return result GafferUI.NodeGadget.registerNodeGadget( MyNode, creator ) n = MyNode() g = GafferUI.NodeGadget.create( n ) self.failUnless( g.node() is n ) self.assertEqual( g.getContents().getText(), "lovinglyHandCraftedInCreator" ) def testFactoryMetadata( self ) : n = Gaffer.Node() self.assertTrue( isinstance( GafferUI.NodeGadget.create( n ), GafferUI.StandardNodeGadget ) ) Gaffer.Metadata.registerValue( n, "nodeGadget:type", "" ) self.assertEqual( GafferUI.NodeGadget.create( n ), None ) Gaffer.Metadata.registerValue( n, "nodeGadget:type", "GafferUI::StandardNodeGadget" ) self.assertTrue( isinstance( GafferUI.NodeGadget.create( n ), GafferUI.StandardNodeGadget ) ) if __name__ == "__main__": unittest.main()
appleseedhq/gaffer
python/GafferUITest/NodeGadgetTest.py
Python
bsd-3-clause
3,936
"""EntryAdmin for Zinnia""" from __future__ import unicode_literals from django.contrib import admin from django.db.models import Q from django.utils import timezone from django.contrib.sites.models import Site from django.core.urlresolvers import reverse from django.core.urlresolvers import NoReverseMatch from django.utils.html import format_html from django.utils.html import format_html_join from django.utils.html import conditional_escape from django.utils.translation import ungettext_lazy from django.utils.translation import ugettext_lazy as _ from zinnia import settings from zinnia.managers import HIDDEN from zinnia.managers import PUBLISHED from zinnia.models.author import Author from zinnia.ping import DirectoryPinger from zinnia.admin.forms import EntryAdminForm from zinnia.admin.filters import AuthorListFilter from zinnia.admin.filters import CategoryListFilter from zinnia.comparison import EntryPublishedVectorBuilder class EntryAdmin(admin.ModelAdmin): """ Admin for Entry model. """ form = EntryAdminForm date_hierarchy = 'publication_date' fieldsets = ( (_('Content'), { 'fields': (('title', 'status'), 'lead', 'content',)}), (_('Illustration'), { 'fields': ('image', 'image_caption'), 'classes': ('collapse', 'collapse-closed')}), (_('Publication'), { 'fields': ('publication_date', 'sites', ('start_publication', 'end_publication')), 'classes': ('collapse', 'collapse-closed')}), (_('Discussions'), { 'fields': ('comment_enabled', 'pingback_enabled', 'trackback_enabled'), 'classes': ('collapse', 'collapse-closed')}), (_('Privacy'), { 'fields': ('login_required', 'password'), 'classes': ('collapse', 'collapse-closed')}), (_('Templates'), { 'fields': ('content_template', 'detail_template'), 'classes': ('collapse', 'collapse-closed')}), (_('Metadatas'), { 'fields': ('featured', 'excerpt', 'authors', 'related'), 'classes': ('collapse', 'collapse-closed')}), (None, {'fields': ('categories', 'tags', 'slug')})) list_filter = (CategoryListFilter, AuthorListFilter, 'publication_date', 'sites', 'status') list_display = ('get_title', 'get_authors', 'get_categories', 'get_tags', 'get_sites', 'get_is_visible', 'featured', 'get_short_url', 'publication_date') radio_fields = {'content_template': admin.VERTICAL, 'detail_template': admin.VERTICAL} filter_horizontal = ('categories', 'authors', 'related') prepopulated_fields = {'slug': ('title', )} search_fields = ('title', 'excerpt', 'content', 'tags') actions = ['make_mine', 'make_published', 'make_hidden', 'close_comments', 'close_pingbacks', 'close_trackbacks', 'ping_directories', 'put_on_top', 'mark_featured', 'unmark_featured'] actions_on_top = True actions_on_bottom = True def __init__(self, model, admin_site): self.form.admin_site = admin_site super(EntryAdmin, self).__init__(model, admin_site) # Custom Display def get_title(self, entry): """ Return the title with word count and number of comments. """ title = _('%(title)s (%(word_count)i words)') % \ {'title': entry.title, 'word_count': entry.word_count} reaction_count = int(entry.comment_count + entry.pingback_count + entry.trackback_count) if reaction_count: return ungettext_lazy( '%(title)s (%(reactions)i reaction)', '%(title)s (%(reactions)i reactions)', reaction_count) % \ {'title': title, 'reactions': reaction_count} return title get_title.short_description = _('title') def get_authors(self, entry): """ Return the authors in HTML. """ try: return format_html_join( ', ', '<a href="{}" target="blank">{}</a>', [(author.get_absolute_url(), getattr(author, author.USERNAME_FIELD)) for author in entry.authors.all()]) except NoReverseMatch: return ', '.join( [conditional_escape(getattr(author, author.USERNAME_FIELD)) for author in entry.authors.all()]) get_authors.allow_tags = True get_authors.short_description = _('author(s)') def get_categories(self, entry): """ Return the categories linked in HTML. """ try: return format_html_join( ', ', '<a href="{}" target="blank">{}</a>', [(category.get_absolute_url(), category.title) for category in entry.categories.all()]) except NoReverseMatch: return ', '.join([conditional_escape(category.title) for category in entry.categories.all()]) get_categories.allow_tags = True get_categories.short_description = _('category(s)') def get_tags(self, entry): """ Return the tags linked in HTML. """ try: return format_html_join( ', ', '<a href="{}" target="blank">{}</a>', [(reverse('zinnia:tag_detail', args=[tag]), tag) for tag in entry.tags_list]) except NoReverseMatch: return conditional_escape(entry.tags) get_tags.allow_tags = True get_tags.short_description = _('tag(s)') def get_sites(self, entry): """ Return the sites linked in HTML. """ try: index_url = reverse('zinnia:entry_archive_index') except NoReverseMatch: index_url = '' return format_html_join( ', ', '<a href="{}://{}{}" target="blank">{}</a>', [(settings.PROTOCOL, site.domain, index_url, conditional_escape(site.name)) for site in entry.sites.all()]) get_sites.allow_tags = True get_sites.short_description = _('site(s)') def get_short_url(self, entry): """ Return the short url in HTML. """ try: short_url = entry.short_url except NoReverseMatch: short_url = entry.get_absolute_url() return format_html('<a href="{url}" target="blank">{url}</a>', url=short_url) get_short_url.allow_tags = True get_short_url.short_description = _('short url') def get_is_visible(self, entry): """ Admin wrapper for entry.is_visible. """ return entry.is_visible get_is_visible.boolean = True get_is_visible.short_description = _('is visible') # Custom Methods def get_queryset(self, request): """ Make special filtering by user's permissions. """ if not request.user.has_perm('zinnia.can_view_all'): queryset = self.model.objects.filter(authors__pk=request.user.pk) else: queryset = super(EntryAdmin, self).get_queryset(request) return queryset.prefetch_related('categories', 'authors', 'sites') def get_changeform_initial_data(self, request): """ Provide initial datas when creating an entry. """ get_data = super(EntryAdmin, self).get_changeform_initial_data(request) return get_data or { 'sites': [Site.objects.get_current().pk], 'authors': [request.user.pk] } def formfield_for_manytomany(self, db_field, request, **kwargs): """ Filter the disposable authors. """ if db_field.name == 'authors': kwargs['queryset'] = Author.objects.filter( Q(is_staff=True) | Q(entries__isnull=False) ).distinct() return super(EntryAdmin, self).formfield_for_manytomany( db_field, request, **kwargs) def get_readonly_fields(self, request, obj=None): """ Return readonly fields by user's permissions. """ readonly_fields = list(super(EntryAdmin, self).get_readonly_fields( request, obj)) if not request.user.has_perm('zinnia.can_change_status'): readonly_fields.append('status') if not request.user.has_perm('zinnia.can_change_author'): readonly_fields.append('authors') return readonly_fields def get_actions(self, request): """ Define actions by user's permissions. """ actions = super(EntryAdmin, self).get_actions(request) if not actions: return actions if (not request.user.has_perm('zinnia.can_change_author') or not request.user.has_perm('zinnia.can_view_all')): del actions['make_mine'] if not request.user.has_perm('zinnia.can_change_status'): del actions['make_hidden'] del actions['make_published'] if not settings.PING_DIRECTORIES: del actions['ping_directories'] return actions # Custom Actions def make_mine(self, request, queryset): """ Set the entries to the current user. """ author = Author.objects.get(pk=request.user.pk) for entry in queryset: if author not in entry.authors.all(): entry.authors.add(author) self.message_user( request, _('The selected entries now belong to you.')) make_mine.short_description = _('Set the entries to the user') def make_published(self, request, queryset): """ Set entries selected as published. """ queryset.update(status=PUBLISHED) EntryPublishedVectorBuilder().cache_flush() self.ping_directories(request, queryset, messages=False) self.message_user( request, _('The selected entries are now marked as published.')) make_published.short_description = _('Set entries selected as published') def make_hidden(self, request, queryset): """ Set entries selected as hidden. """ queryset.update(status=HIDDEN) EntryPublishedVectorBuilder().cache_flush() self.message_user( request, _('The selected entries are now marked as hidden.')) make_hidden.short_description = _('Set entries selected as hidden') def close_comments(self, request, queryset): """ Close the comments for selected entries. """ queryset.update(comment_enabled=False) self.message_user( request, _('Comments are now closed for selected entries.')) close_comments.short_description = _('Close the comments for ' 'selected entries') def close_pingbacks(self, request, queryset): """ Close the pingbacks for selected entries. """ queryset.update(pingback_enabled=False) self.message_user( request, _('Pingbacks are now closed for selected entries.')) close_pingbacks.short_description = _( 'Close the pingbacks for selected entries') def close_trackbacks(self, request, queryset): """ Close the trackbacks for selected entries. """ queryset.update(trackback_enabled=False) self.message_user( request, _('Trackbacks are now closed for selected entries.')) close_trackbacks.short_description = _( 'Close the trackbacks for selected entries') def put_on_top(self, request, queryset): """ Put the selected entries on top at the current date. """ queryset.update(publication_date=timezone.now()) self.ping_directories(request, queryset, messages=False) self.message_user(request, _( 'The selected entries are now set at the current date.')) put_on_top.short_description = _( 'Put the selected entries on top at the current date') def mark_featured(self, request, queryset): """ Mark selected as featured post. """ queryset.update(featured=True) self.message_user( request, _('Selected entries are now marked as featured.')) mark_featured.short_description = _('Mark selected entries as featured') def unmark_featured(self, request, queryset): """ Un-Mark selected featured posts. """ queryset.update(featured=False) self.message_user( request, _('Selected entries are no longer marked as featured.')) unmark_featured.short_description = _( 'Unmark selected entries as featured') def ping_directories(self, request, queryset, messages=True): """ Ping web directories for selected entries. """ for directory in settings.PING_DIRECTORIES: pinger = DirectoryPinger(directory, queryset) pinger.join() if messages: success = 0 for result in pinger.results: if not result.get('flerror', True): success += 1 else: self.message_user(request, '%s : %s' % (directory, result['message'])) if success: self.message_user( request, _('%(directory)s directory succesfully ' 'pinged %(success)d entries.') % {'directory': directory, 'success': success}) ping_directories.short_description = _( 'Ping Directories for selected entries')
aorzh/django-blog-zinnia
zinnia/admin/entry.py
Python
bsd-3-clause
13,945
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function INCLUDES = """ #include <openssl/conf.h> """ TYPES = """ typedef ... CONF; """ FUNCTIONS = """ void OPENSSL_config(const char *); void OPENSSL_no_config(void); """ MACROS = """ """ CUSTOMIZATIONS = """ """ CONDITIONAL_NAMES = {}
rhurkes/chasegame
venv/lib/python2.7/site-packages/cryptography/hazmat/bindings/openssl/conf.py
Python
mit
846
from __future__ import print_function import time start = time.clock() count = 0 for i in range(0, 1000000): if "abc" == "abc": count = count + 1 if "a slightly longer string" == \ "a slightly longer string": count = count + 1 if "a significantly longer string but still not overwhelmingly long string" == \ "a significantly longer string but still not overwhelmingly long string": count = count + 1 if "" == "abc": count = count + 1 if "abc" == "abcd": count = count + 1 if "changed one character" == "changed !ne character": count = count + 1 if "123" == 123: count = count + 1 if "a slightly longer string" == \ "a slightly longer string!": count = count + 1 if "a slightly longer string" == \ "a slightly longer strinh": count = count + 1 if "a significantly longer string but still not overwhelmingly long string" == \ "another": count = count + 1 print(count) print("elapsed: " + str(time.clock() - start))
foresterre/wren
test/benchmark/string_equals.py
Python
mit
1,008
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .azure_ml_web_services_management_client import AzureMLWebServicesManagementClient from .version import VERSION __all__ = ['AzureMLWebServicesManagementClient'] __version__ = VERSION
SUSE/azure-sdk-for-python
unreleased/azure-mgmt-machinelearning/azure/mgmt/machinelearning/__init__.py
Python
mit
665
# Copyright 1999-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import print_function import signal import sys from portage.output import bold, create_color_func def userquery(prompt, enter_invalid, responses=None, colours=None): """Displays a prompt and a set of responses, then waits for a response which is checked against the responses and the first to match is returned. An empty response will match the first value in responses, unless enter_invalid is True. The input buffer is *not* cleared prior to the prompt! prompt: a String. responses: a List of Strings. colours: a List of Functions taking and returning a String, used to process the responses for display. Typically these will be functions like red() but could be e.g. lambda x: "DisplayString". If responses is omitted, defaults to ["Yes", "No"], [green, red]. If only colours is omitted, defaults to [bold, ...]. Returns a member of the List responses. (If called without optional arguments, returns "Yes" or "No".) KeyboardInterrupt is converted to SystemExit to avoid tracebacks being printed.""" if responses is None: responses = ["Yes", "No"] colours = [ create_color_func("PROMPT_CHOICE_DEFAULT"), create_color_func("PROMPT_CHOICE_OTHER") ] elif colours is None: colours=[bold] colours=(colours*len(responses))[:len(responses)] print(bold(prompt), end=' ') try: while True: if sys.hexversion >= 0x3000000: response=input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ") else: response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ") if response or not enter_invalid: for key in responses: # An empty response will match the # first value in responses. if response.upper()==key[:len(response)].upper(): return key print("Sorry, response '%s' not understood." % response, end=' ') except (EOFError, KeyboardInterrupt): print("Interrupted.") sys.exit(128 + signal.SIGINT)
prometheanfire/portage
pym/_emerge/userquery.py
Python
gpl-2.0
2,062
# # Copyright 2013 Intel Corp. # # Author: Yunhong Jiang <yunhong.jiang@intel.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import six from stevedore import extension class TransformerExtensionManager(extension.ExtensionManager): def __init__(self, namespace): super(TransformerExtensionManager, self).__init__( namespace=namespace, invoke_on_load=False, invoke_args=(), invoke_kwds={} ) self.by_name = dict((e.name, e) for e in self.extensions) def get_ext(self, name): return self.by_name[name] @six.add_metaclass(abc.ABCMeta) class TransformerBase(object): """Base class for plugins that transform the sample.""" def __init__(self, **kwargs): """Setup transformer. Each time a transformed is involved in a pipeline, a new transformer instance is created and chained into the pipeline. i.e. transformer instance is per pipeline. This helps if transformer need keep some cache and per-pipeline information. :param kwargs: The parameters that are defined in pipeline config file. """ super(TransformerBase, self).__init__() @abc.abstractmethod def handle_sample(self, context, sample): """Transform a sample. :param context: Passed from the data collector. :param sample: A sample. """ def flush(self, context): """Flush samples cached previously. :param context: Passed from the data collector. """ return [] class Namespace(object): """Encapsulates the namespace. Encapsulation is done by wrapping the evaluation of the configured rule. This allows nested dicts to be accessed in the attribute style, and missing attributes to yield false when used in a boolean expression. """ def __init__(self, seed): self.__dict__ = collections.defaultdict(lambda: Namespace({})) self.__dict__.update(seed) for k, v in six.iteritems(self.__dict__): if isinstance(v, dict): self.__dict__[k] = Namespace(v) def __getattr__(self, attr): return self.__dict__[attr] def __getitem__(self, key): return self.__dict__[key] def __nonzero__(self): return len(self.__dict__) > 0
ChinaMassClouds/copenstack-server
openstack/src/ceilometer-2014.2.2/ceilometer/transformer/__init__.py
Python
gpl-2.0
2,862
#!/usr/bin/env python data = '../data/fm_train_real.dat' parameter_list = [[data]] def converter_factoranalysis(data_fname): try: import numpy from shogun import RealFeatures, FactorAnalysis, EuclideanDistance, CSVFile features = RealFeatures(CSVFile(data_fname)) converter = FactorAnalysis() converter.set_target_dim(2) embedding = converter.apply(features) X = embedding.get_feature_matrix() covdet = numpy.linalg.det(numpy.dot(X,X.T)) return covdet > 0 except ImportError: print('No Eigen3 available') if __name__=='__main__': print('Factor Analysis') converter_factoranalysis(*parameter_list[0])
cfjhallgren/shogun
examples/undocumented/python/converter_factoranalysis.py
Python
gpl-3.0
630
######################################################################## # $HeadURL $ # File: FTSStrategy.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2013/04/12 13:12:07 ######################################################################## """ :mod: FTSStrategy ================= .. module: FTSStrategy :synopsis: replication strategy for FTS transfers .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com replication strategy for all FTS transfers """ __RCSID__ = "$Id: $" # # # @file FTSStrategy.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2013/04/12 13:12:20 # @brief Definition of FTSStrategy class. # # imports import random # # from DIRAC from DIRAC import gLogger, gConfig, S_OK, S_ERROR from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton from DIRAC.Core.Utilities.LockRing import LockRing # from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources # # from DMS from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView from DIRAC.DataManagementSystem.private.FTS2.FTS2Graph import FTS2Graph ######################################################################## class FTS2Strategy( object ): """ .. class:: FTSStrategy helper class to create replication forest for a given file and it's replicas using several different strategies """ # # make it singleton __metaclass__ = DIRACSingleton # # list of supported strategies __supportedStrategies = [ 'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait' ] # # FTS graph ftsGraph = None # # lock __graphLock = None # # resources __resources = None # # rss client __rssClient = None # # acceptable failure rate acceptableFailureRate = 75 # # acceptable failed files acceptableFailedFiles = 5 # # scheduling type schedulingType = "Files" def __init__( self, csPath = None, ftsSites = None, ftsHistoryViews = None ): """c'tor :param self: self reference :param str csPath: CS path :param list ftsSites: list of FTSSites :param list ftsHistoryViews: list of FTSHistoryViews """ # ## config path self.csPath = csPath # # fts sites ftsSites = ftsSites if ftsSites else [] # # history views ftsHistoryViews = ftsHistoryViews if ftsHistoryViews else [] # # own sub logger self.log = gLogger.getSubLogger( "FTSStrategy", child = True ) self.log.setLevel( gConfig.getValue( self.csPath + "/LogLevel", "DEBUG" ) ) for ftsSite in ftsSites: self.log.info( "FTSSite: %-16s FTSServer=%s" % ( ftsSite.Name, ftsSite.FTSServer ) ) # # CS options self.log.info( "Supported strategies = %s" % ", ".join( self.supportedStrategies ) ) self.activeStrategies = gConfig.getValue( "%s/%s" % ( self.csPath, "ActiveStrategies" ), ["MinimiseTotalWait"] ) self.log.info( "ActiveStrategies = %s" % ", ".join( self.activeStrategies ) ) self.numberOfStrategies = len( self.activeStrategies ) self.log.info( "Number of active strategies = %s" % self.numberOfStrategies ) self.sigma = gConfig.getValue( "%s/%s" % ( self.csPath, "HopSigma" ), 5 ) self.log.info( "HopSigma = %s" % self.sigma ) self.schedulingType = gConfig.getValue( "%s/%s" % ( self.csPath, "SchedulingType" ), "Files" ) self.log.info( "SchedulingType = %s" % self.schedulingType ) self.acceptableFailureRate = gConfig.getValue( "%s/%s" % ( self.csPath, "AcceptableFailureRate" ), 75 ) self.log.info( "AcceptableFailureRate = %s" % self.acceptableFailureRate ) self.acceptableFailedFiles = gConfig.getValue( "%s/%s" % ( self.csPath, "AcceptableFailedFiles" ), 5 ) self.log.info( "AcceptableFailedFiles = %s" % self.acceptableFailedFiles ) # # chosen strategy self.chosenStrategy = 0 # dispatcher self.strategyDispatcher = { "MinimiseTotalWait" : self.minimiseTotalWait, "DynamicThroughput" : self.dynamicThroughput, "Simple" : self.simple, "Swarm" : self.swarm } self.ftsGraph = FTS2Graph( "FTSGraph", ftsHistoryViews, self.acceptableFailureRate, self.acceptableFailedFiles, self.schedulingType ) # for node in self.ftsGraph.nodes(): # self.log.debug( node ) # for edge in self.ftsGraph.edges(): # self.log.debug( edge ) # # if we land here everything is OK self.log.info( "%s has been constructed" % self.__class__.__name__ ) @classmethod def graphLock( cls ): """ get graph lock """ if not cls.__graphLock: cls.__graphLock = LockRing().getLock( "FTSGraphLock" ) return cls.__graphLock @classmethod def resetGraph( cls, ftsHistoryViews ): """ reset graph :param list ftsHistoryViews: list of FTSHistoryViews """ ftsGraph = None try: cls.graphLock().acquire() ftsGraph = FTS2Graph( "FTSGraph", ftsHistoryViews, cls.acceptableFailureRate, cls.acceptableFailedFiles, cls.schedulingType ) if ftsGraph: cls.ftsGraph = ftsGraph finally: cls.graphLock().release() return S_OK() def updateRWAccess( self ): """ update RW access in FTS graph """ updateRWAccess = S_OK() try: self.graphLock().acquire() updateRWAccess = self.ftsGraph.updateRWAccess() if not updateRWAccess["OK"]: self.log.error( updateRWAccess["Message"] ) finally: self.graphLock().release() return updateRWAccess def addTreeToGraph( self, replicationTree = None, size = 0.0 ): """ update rw access for nodes (sites) and size anf files for edges (channels) """ replicationTree = replicationTree if replicationTree else {} size = size if size else 0.0 if replicationTree: try: self.graphLock().acquire() for route in self.ftsGraph.edges(): if route.routeName in replicationTree: route.WaitingSize += size route.WaitingFiles += 1 finally: self.graphLock().release() return S_OK() def simple( self, sourceSEs, targetSEs ): """ simple strategy - one source, many targets :param list sourceSEs: list with only one sourceSE name :param list targetSEs: list with target SE names :param str lfn: logical file name """ # # make targetSEs list unique if len( sourceSEs ) != 1: return S_ERROR( "simple: wrong argument supplied for sourceSEs, only one sourceSE allowed" ) sourceSE = sourceSEs[0] tree = {} for targetSE in targetSEs: route = self.ftsGraph.findRoute( sourceSE, targetSE ) if not route["OK"]: return S_ERROR( route["Message"] ) route = route["Value"] if not route.fromNode.SEs[sourceSE]["read"]: return S_ERROR( "simple: sourceSE '%s' in banned for reading right now" % sourceSE ) if not route.toNode.SEs[targetSE]["write"]: return S_ERROR( "simple: targetSE '%s' is banned for writing right now" % targetSE ) if route.name in tree: return S_ERROR( "simple: unable to create replication tree, route '%s' cannot be used twice" % \ route.name ) tree[route.name] = { "Ancestor" : False, "SourceSE" : sourceSE, "TargetSE" : targetSE, "Strategy" : "Simple" } return S_OK( tree ) def swarm( self, sourceSEs, targetSEs ): """ swarm strategy - one target, many sources, pick up the fastest :param list sourceSEs: list of source SE :param str targetSEs: on element list with name of target SE :param str lfn: logical file name """ tree = {} routes = [] if len( targetSEs ) > 1: return S_ERROR( "swarm: wrong argument supplied for targetSEs, only one targetSE allowed" ) targetSE = targetSEs[0] # # find channels for sourceSE in sourceSEs: route = self.ftsGraph.findRoute( sourceSE, targetSE ) if not route["OK"]: self.log.warn( "swarm: %s" % route["Message"] ) continue routes.append( ( sourceSE, route["Value"] ) ) # # exit - no channels if not routes: return S_ERROR( "swarm: unable to find FTS routes between '%s' and '%s'" % ( ",".join( sourceSEs ), targetSE ) ) # # filter out non active channels routes = [ ( sourceSE, route ) for sourceSE, route in routes if route.fromNode.SEs[sourceSE]["read"] and route.toNode.SEs[targetSE]["write"] and route.timeToStart < float( "inf" ) ] # # exit - no active channels if not routes: return S_ERROR( "swarm: no active routes found between %s and %s" % ( sourceSEs, targetSE ) ) # # find min timeToStart minTimeToStart = float( "inf" ) selSourceSE = selRoute = None for sourceSE, route in routes: if route.timeToStart < minTimeToStart: minTimeToStart = route.timeToStart selSourceSE = sourceSE selRoute = route if not selSourceSE: return S_ERROR( "swarm: no active routes found between %s and %s" % ( sourceSEs, targetSE ) ) tree[selRoute.name] = { "Ancestor" : False, "SourceSE" : selSourceSE, "TargetSE" : targetSE, "Strategy" : "Swarm" } return S_OK( tree ) def minimiseTotalWait( self, sourceSEs, targetSEs ): """ find dag minimizing start time :param list sourceSEs: list of avialable source SEs :param list targetSEs: list of target SEs :param str lfn: logical file name """ tree = {} primarySources = sourceSEs while targetSEs: minTimeToStart = float( "inf" ) channels = [] self.log.verbose( "minimiseTotalWait: searching routes between %s and %s" % ( ",".join( sourceSEs ), ",".join( targetSEs ) ) ) for targetSE in targetSEs: for sourceSE in sourceSEs: ftsChannel = self.ftsGraph.findRoute( sourceSE, targetSE ) if not ftsChannel["OK"]: self.log.warn( "minimiseTotalWait: %s" % ftsChannel["Message"] ) continue ftsChannel = ftsChannel["Value"] channels.append( ( ftsChannel, sourceSE, targetSE ) ) if not channels: msg = "minimiseTotalWait: FTS route between these SEs are not defined" self.log.error( msg ) return S_ERROR( msg ) self.log.verbose( "minimiseTotalWait: found %s candidate routes, checking RSS status" % len( channels ) ) for ch, s, t in channels: self.log.verbose( "%s %s %s" % ( ch.routeName, ch.fromNode.SEs[s]["read"], ch.toNode.SEs[t]["write"] ) ) channels = [ ( channel, sourceSE, targetSE ) for channel, sourceSE, targetSE in channels if channel.fromNode.SEs[sourceSE]["read"] and channel.toNode.SEs[targetSE]["write"] and channel.timeToStart < float( "inf" ) ] if not channels: self.log.error( "minimiseTotalWait: no active FTS routes found" ) return S_ERROR( "minimiseTotalWait: no active FTS routes found" ) candidates = [] for channel, sourceSE, targetSE in channels: timeToStart = channel.timeToStart if sourceSE not in primarySources: timeToStart += self.sigma # # local found if channel.fromNode == channel.toNode: self.log.debug( "minimiseTotalWait: found local route '%s'" % channel.routeName ) candidates = [ ( channel, sourceSE, targetSE ) ] break if timeToStart <= minTimeToStart: minTimeToStart = timeToStart candidates = [ ( channel, sourceSE, targetSE ) ] elif timeToStart == minTimeToStart: candidates.append( ( channel, sourceSE, targetSE ) ) if not candidates: return S_ERROR( "minimiseTotalWait: unable to find candidate FTS routes minimizing total wait time" ) random.shuffle( candidates ) selChannel, selSourceSE, selTargetSE = candidates[0] ancestor = False for routeName, treeItem in tree.items(): if selSourceSE in treeItem["TargetSE"]: ancestor = treeItem["TargetSE"] tree[selChannel.routeName] = { "Ancestor" : ancestor, "SourceSE" : selSourceSE, "TargetSE" : selTargetSE, "Strategy" : "MinimiseTotalWait" } sourceSEs.append( selTargetSE ) targetSEs.remove( selTargetSE ) return S_OK( tree ) def dynamicThroughput( self, sourceSEs, targetSEs ): """ dynamic throughput - many sources, many targets - find dag minimizing overall throughput :param list sourceSEs: list of available source SE names :param list targetSE: list of target SE names :param str lfn: logical file name """ tree = {} primarySources = sourceSEs timeToSite = {} while targetSEs: minTimeToStart = float( "inf" ) channels = [] for targetSE in targetSEs: for sourceSE in sourceSEs: ftsChannel = self.ftsGraph.findRoute( sourceSE, targetSE ) if not ftsChannel["OK"]: self.log.warn( "dynamicThroughput: %s" % ftsChannel["Message"] ) continue ftsChannel = ftsChannel["Value"] channels.append( ( ftsChannel, sourceSE, targetSE ) ) # # no candidate channels found if not channels: msg = "dynamicThroughput: FTS routes between %s and %s are not defined" % ( ",".join( sourceSEs ), ",".join( targetSEs ) ) self.log.error( msg ) return S_ERROR( msg ) # # filter out already used channels channels = [ ( channel, sourceSE, targetSE ) for channel, sourceSE, targetSE in channels if channel.routeName not in tree ] if not channels: msg = "dynamicThroughput: all FTS routes between %s and %s are already used in tree" % ( ",".join( sourceSEs ), ",".join( targetSEs ) ) self.log.error( msg ) return S_ERROR( msg ) # # filter out non-active channels self.log.debug( "dynamicThroughput: found %s candidate routes, checking activity" % len( channels ) ) channels = [ ( channel, sourceSE, targetSE ) for channel, sourceSE, targetSE in channels if channel.fromNode.SEs[sourceSE]["read"] and channel.toNode.SEs[targetSE]["write"] and channel.timeToStart < float( "inf" ) ] if not channels: self.log.warn( "dynamicThroughput: active candidate routes not found" ) return S_ERROR( "dynamicThroughput: no active candidate FTS routes found" ) candidates = [] selTimeToStart = None for channel, sourceSE, targetSE in channels: timeToStart = channel.timeToStart if sourceSE not in primarySources: timeToStart += self.sigma if sourceSE in timeToSite: timeToStart += timeToSite[sourceSE] # # local found if channel.fromNode == channel.toNode: self.log.debug( "dynamicThroughput: found local route '%s'" % channel.channelName ) candidates = [ ( channel, sourceSE, targetSE ) ] selTimeToStart = timeToStart break if timeToStart <= minTimeToStart: selTimeToStart = timeToStart minTimeToStart = timeToStart candidates = [ ( channel, sourceSE, targetSE ) ] elif timeToStart == minTimeToStart: candidates.append( ( channel, sourceSE, targetSE ) ) if not candidates: return S_ERROR( "dynamicThroughput: unable to find candidate FTS routes" ) random.shuffle( candidates ) selChannel, selSourceSE, selTargetSE = candidates[0] ancestor = False for routeName, treeItem in tree.items(): if selSourceSE in treeItem["TargetSE"]: ancestor = treeItem["TargetSE"] tree[selChannel.name] = { "Ancestor": ancestor, "SourceSE": selSourceSE, "TargetSE": selTargetSE, "Strategy": "DynamicThroughput" } timeToSite[selTargetSE] = selTimeToStart sourceSEs.append( selTargetSE ) targetSEs.remove( selTargetSE ) return S_OK( tree ) def reset( self ): """ reset :chosenStrategy: :param self: self reference """ self.chosenStrategy = 0 @property def supportedStrategies( self ): """ Get supported strategies. :param self: self reference """ return self.__supportedStrategies def replicationTree( self, sourceSEs, targetSEs, size, strategy = None ): """ get replication tree :param str lfn: LFN :param list sourceSEs: list of sources SE names to use :param list targetSEs: list of target SE names to use :param long size: file size :param str strategy: strategy name """ # # get strategy strategy = strategy if strategy else self.__selectStrategy() if strategy not in self.activeStrategies: return S_ERROR( "replicationTree: inactive or unsupported strategy '%s'" % strategy ) self.log.verbose( "replicationTree: strategy=%s sourceSEs=%s targetSEs=%s size=%s" % \ ( strategy, sourceSEs, targetSEs, size ) ) # # fire action from dispatcher tree = self.strategyDispatcher[strategy]( sourceSEs, targetSEs ) if not tree["OK"]: self.log.error( "replicationTree: %s" % tree["Message"] ) return tree # # update graph edges self.log.verbose( "replicationTree: %s" % tree["Value"] ) update = self.addTreeToGraph( replicationTree = tree["Value"], size = size ) if not update["OK"]: self.log.error( "replicationTree: unable to update FTS graph: %s" % update["Message"] ) return update return tree def __selectStrategy( self ): """ If more than one active strategy use one after the other. :param self: self reference """ chosenStrategy = self.activeStrategies[self.chosenStrategy] self.chosenStrategy += 1 if self.chosenStrategy == self.numberOfStrategies: self.chosenStrategy = 0 return chosenStrategy
vmendez/DIRAC
DataManagementSystem/private/FTS2/FTS2Strategy.py
Python
gpl-3.0
18,293
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _, tools class MrpRoutingWorkcenter(models.Model): _name = 'mrp.routing.workcenter' _description = 'Work Center Usage' _order = 'bom_id, sequence, id' _check_company_auto = True name = fields.Char('Operation', required=True) active = fields.Boolean(default=True) workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True, check_company=True) sequence = fields.Integer( 'Sequence', default=100, help="Gives the sequence order when displaying a list of routing Work Centers.") bom_id = fields.Many2one( 'mrp.bom', 'Bill of Material', index=True, ondelete='cascade', required=True, check_company=True, help="The Bill of Material this operation is linked to") company_id = fields.Many2one('res.company', 'Company', related='bom_id.company_id') worksheet_type = fields.Selection([ ('pdf', 'PDF'), ('google_slide', 'Google Slide'), ('text', 'Text')], string="Work Sheet", default="text", help="Defines if you want to use a PDF or a Google Slide as work sheet." ) note = fields.Html('Description', help="Text worksheet description") worksheet = fields.Binary('PDF') worksheet_google_slide = fields.Char('Google Slide', help="Paste the url of your Google Slide. Make sure the access to the document is public.") time_mode = fields.Selection([ ('auto', 'Compute based on tracked time'), ('manual', 'Set duration manually')], string='Duration Computation', default='manual') time_mode_batch = fields.Integer('Based on', default=10) time_computed_on = fields.Char('Computed on last', compute='_compute_time_computed_on') time_cycle_manual = fields.Float( 'Manual Duration', default=60, help="Time in minutes:" "- In manual mode, time used" "- In automatic mode, supposed first time when there aren't any work orders yet") time_cycle = fields.Float('Duration', compute="_compute_time_cycle") workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count") workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders") possible_bom_product_template_attribute_value_ids = fields.Many2many(related='bom_id.possible_product_template_attribute_value_ids') bom_product_template_attribute_value_ids = fields.Many2many( 'product.template.attribute.value', string="Apply on Variants", ondelete='restrict', domain="[('id', 'in', possible_bom_product_template_attribute_value_ids)]", help="BOM Product Variants needed to apply this line.") @api.depends('time_mode', 'time_mode_batch') def _compute_time_computed_on(self): for operation in self: operation.time_computed_on = _('%i work orders') % operation.time_mode_batch if operation.time_mode != 'manual' else False @api.depends('time_cycle_manual', 'time_mode', 'workorder_ids') def _compute_time_cycle(self): manual_ops = self.filtered(lambda operation: operation.time_mode == 'manual') for operation in manual_ops: operation.time_cycle = operation.time_cycle_manual for operation in self - manual_ops: data = self.env['mrp.workorder'].search([ ('operation_id', '=', operation.id), ('qty_produced', '>', 0), ('state', '=', 'done')], limit=operation.time_mode_batch, order="date_finished desc") # To compute the time_cycle, we can take the total duration of previous operations # but for the quantity, we will take in consideration the qty_produced like if the capacity was 1. # So producing 50 in 00:10 with capacity 2, for the time_cycle, we assume it is 25 in 00:10 # When recomputing the expected duration, the capacity is used again to divide the qty to produce # so that if we need 50 with capacity 2, it will compute the expected of 25 which is 00:10 total_duration = 0 # Can be 0 since it's not an invalid duration for BoM cycle_number = 0 # Never 0 unless infinite item['workcenter_id'].capacity for item in data: total_duration += item['duration'] cycle_number += tools.float_round((item['qty_produced'] / item['workcenter_id'].capacity or 1.0), precision_digits=0, rounding_method='UP') if cycle_number: operation.time_cycle = total_duration / cycle_number else: operation.time_cycle = operation.time_cycle_manual def _compute_workorder_count(self): data = self.env['mrp.workorder'].read_group([ ('operation_id', 'in', self.ids), ('state', '=', 'done')], ['operation_id'], ['operation_id']) count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data) for operation in self: operation.workorder_count = count_data.get(operation.id, 0) def copy_to_bom(self): if 'bom_id' in self.env.context: bom_id = self.env.context.get('bom_id') for operation in self: operation.copy({'name': _("%s (copy)", operation.name), 'bom_id': bom_id}) return { 'view_mode': 'form', 'res_model': 'mrp.bom', 'views': [(False, 'form')], 'type': 'ir.actions.act_window', 'res_id': bom_id, } def _skip_operation_line(self, product): """ Control if a operation should be processed, can be inherited to add custom control. """ self.ensure_one() if product._name == 'product.template': return False return not product._match_all_variant_values(self.bom_product_template_attribute_value_ids)
jeremiahyan/odoo
addons/mrp/models/mrp_routing.py
Python
gpl-3.0
6,018
## Copyright 2009 Laurent Bovet <laurent.bovet@windmaster.ch> ## Jordi Puigsegur <jordi.puigsegur@gmail.com> ## ## This file is part of wfrog ## ## wfrog is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import base import wfcommon.database class MysqlStorage(base.DatabaseStorage): ''' Stores sample data in a MySQL database table. [ Properties ] database [string] (optional): Name of the database. defaults to 'wfrog'. host [string] (optional): Database host. Defaults to 'localhost'. port [string] (optional): Database TCP port. user [string] (optional): Database usename. Defaults to 'root'. password [string] (optional): Database user password. Defaults to 'root'. tablename [string] (optional): Table name. Defaults to 'METEO'. ''' database = 'wfrog' host = 'localhost' port = 3306 user = 'root' password = 'root' logger = logging.getLogger('storage.mysql') def init(self, context=None): self.db = wfcommon.database.MySQLDB(self.database, self.host, self.port, self.user, self.password) table_fields = self._get_table_fields() # Verify Mandatory fields assert 'TIMESTAMP_UTC' in table_fields assert 'TIMESTAMP_LOCAL' in table_fields for field in self.mandatory_storage_fields: assert field in table_fields # Obtain actual storage fields self.storage_fields = self.mandatory_storage_fields + \ [field for field in self.optional_storage_fields if field in table_fields] self.logger.info("Table %s detected with fields: %s" % (self.tablename, ', '.join(self.storage_fields))) def _get_table_fields(self): sql = "show columns from %s;" % self.tablename fields = [] try: self.db.connect() for row in self.db.select(sql): fields.append(row[0]) finally: self.db.disconnect() return fields
zedoude/wfrog
wfcommon/storage/mysql.py
Python
gpl-3.0
2,700
# -*- coding: utf-8 -*- from module.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo class ILoadTo(DeadCrypter): __name__ = "ILoadTo" __type__ = "crypter" __version__ = "0.11" __pattern__ = r'http://(?:www\.)?iload\.to/go/\d+-[\w.-]+/' __config__ = [] __description__ = """Iload.to decrypter plugin""" __license__ = "GPLv3" __authors__ = [("hzpz", None)] getInfo = create_getInfo(ILoadTo)
sebdelsol/pyload
module/plugins/crypter/ILoadTo.py
Python
gpl-3.0
458
# coding: utf-8 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from babel.core import UnknownLocaleError, Locale from mock import patch from bedrock.mozorg.tests import TestCase from lib.l10n_utils.templatetags import helpers def test_get_locale(): """Test that the get_locale() helper works.""" assert helpers.get_locale('pt-BR').language == 'pt' assert helpers.get_locale('not-a-lang').language == 'en' def test_get_locale_hsb(): """Should treat hsb and dsb as de.""" # bug 1130285 assert helpers.get_locale('dsb').language == 'de' assert helpers.get_locale('hsb').language == 'de' @patch.object(helpers, 'lang_file_has_tag') class TestL10nHasTag(TestCase): def test_uses_langfile(self, lfht_mock): """If langfile param specified should only check that file.""" helpers.l10n_has_tag({'langfile': 'dude', 'LANG': 'fr'}, 'abide', langfile='uli') lfht_mock.assert_called_with('uli', 'fr', 'abide') @patch.object(helpers, 'template_has_tag') def test_checks_template_by_default(self, tht_mock, lfht_mock): helpers.l10n_has_tag({'langfile': 'dude', 'template': 'home.html', 'LANG': 'de'}, 'abide') tht_mock.assert_called_with('home.html', 'de', 'abide') self.assertFalse(lfht_mock.called) class TestCurrentLocale(TestCase): @patch('lib.l10n_utils.templatetags.helpers.Locale') def test_unknown_locale(self, Locale): """ If Locale.parse raises an UnknownLocaleError, return the en-US locale object. """ Locale.parse.side_effect = UnknownLocaleError('foo') assert helpers.current_locale() == Locale.return_value Locale.assert_called_with('en', 'US') @patch('lib.l10n_utils.templatetags.helpers.Locale') def test_value_error(self, Locale): """ If Locale.parse raises a ValueError, return the en-US locale object. """ Locale.parse.side_effect = ValueError assert helpers.current_locale() == Locale.return_value Locale.assert_called_with('en', 'US') @patch('lib.l10n_utils.templatetags.helpers.get_language') @patch('lib.l10n_utils.templatetags.helpers.Locale') def test_success(self, Locale, get_language): assert helpers.current_locale() == Locale.parse.return_value Locale.parse.assert_called_with(get_language.return_value, sep='-') class TestL10nFormat(TestCase): @patch('lib.l10n_utils.templatetags.helpers.format_date') def test_format_date(self, format_date): ctx = {'LANG': 'de'} locale = Locale('de') assert ( helpers.l10n_format_date(ctx, 'somedate', format='long') == format_date.return_value) format_date.assert_called_with( 'somedate', locale=locale, format='long') @patch('lib.l10n_utils.templatetags.helpers.format_date') def test_format_date_hyphenated_locale(self, format_date): ctx = {'LANG': 'en-US'} locale = Locale('en', 'US') assert ( helpers.l10n_format_date(ctx, 'somedate', format='long') == format_date.return_value) format_date.assert_called_with( 'somedate', locale=locale, format='long') @patch('lib.l10n_utils.templatetags.helpers.format_number') def test_format_number(self, format_number): ctx = {'LANG': 'de'} locale = Locale('de') assert ( helpers.l10n_format_number(ctx, 10000) == format_number.return_value) format_number.assert_called_with( 10000, locale=locale) @patch('lib.l10n_utils.templatetags.helpers.format_number') def test_format_number_hyphenated_locale(self, format_number): ctx = {'LANG': 'pt-BR'} locale = Locale('pt', 'BR') assert ( helpers.l10n_format_number(ctx, 10000) == format_number.return_value) format_number.assert_called_with( 10000, locale=locale)
ericawright/bedrock
lib/l10n_utils/tests/test_helpers.py
Python
mpl-2.0
4,181
#!/usr/bin/env python from __future__ import division, print_function, absolute_import from os.path import join, dirname import sys import os import glob def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info config = Configuration('dsolve',parent_package,top_path) config.add_data_dir('tests') lapack_opt = get_info('lapack_opt',notfound_action=2) if sys.platform == 'win32': superlu_defs = [('NO_TIMER',1)] else: superlu_defs = [] superlu_defs.append(('USE_VENDOR_BLAS',1)) superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') sources = list(glob.glob(join(superlu_src, '*.c'))) headers = list(glob.glob(join(superlu_src, '*.h'))) if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ): # when using MSVC + MKL, lsame is already in MKL sources.remove(join(superlu_src, 'lsame.c')) config.add_library('superlu_src', sources=sources, macros=superlu_defs, include_dirs=[superlu_src], ) # Extension config.add_extension('_superlu', sources=['_superlumodule.c', '_superlu_utils.c', '_superluobject.c'], libraries=['superlu_src'], depends=(sources + headers), extra_info=lapack_opt, ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.py
Python
agpl-3.0
1,730
#!/usr/bin/env python # coding=utf-8 # aeneas is a Python/C library and a set of tools # to automagically synchronize audio and text (aka forced alignment) # # Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it) # Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it) # Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import print_function from aeneas.syncmap.smfgtabular import SyncMapFormatGenericTabular class SyncMapFormatTSV(SyncMapFormatGenericTabular): """ Handler for tab-separated plain text (TSV) I/O format. """ TAG = u"SyncMapFormatTSV" DEFAULT = "tsv" HUMAN = "tsvh" MACHINE = "tsvm" TAB = "tab" MACHINE_ALIASES = [DEFAULT, MACHINE, TAB] FIELD_DELIMITER = u"\t" FIELDS = { "begin": 0, "end": 1, "identifier": 2, } TEXT_DELIMITER = None
danielbair/aeneas
aeneas/syncmap/smftsv.py
Python
agpl-3.0
1,593
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RLdheatmap(RPackage): """Graphical Display of Pairwise Linkage Disequilibria Between SNPs Produces a graphical display, as a heat map, of measures of pairwise linkage disequilibria between single nucleotide polymorphisms (SNPs). Users may optionally include the physical locations or genetic map distances of each SNP on the plot. The methods are described in Shin et al. (2006) <doi:10.18637/jss.v016.c03>. Users should note that the imported package 'snpStats' and the suggested packages 'rtracklayer', 'GenomicRanges', 'GenomInfoDb' and 'IRanges' are all BioConductor packages <https://bioconductor.org>.""" homepage = "https://sfustatgen.github.io/LDheatmap/" url = "https://cloud.r-project.org/src/contrib/LDheatmap_0.99-7.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/LDheatmap" version('1.0-4', sha256='07eb385f19e6a195e8e4d75be0b47c57744eabbf14045e527f0c27e1183ae5ca') version('0.99-7', sha256='aca54c839a424506d8be7153bf03b32026aeefe7ed38f534e8e19708e34212e4') depends_on('r@2.14.0:', type=('build', 'run')) depends_on('r@4.0:', when='@1.0-4:', type=('build', 'run')) depends_on('r-genetics', type=('build', 'run')) depends_on('r-snpstats', type=('build', 'run')) depends_on('r-rcpp', type=('build', 'run'))
LLNL/spack
var/spack/repos/builtin/packages/r-ldheatmap/package.py
Python
lgpl-2.1
1,552
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import tempfile class PyDmTree(PythonPackage): """tree is a library for working with nested data structures. In a way, tree generalizes the builtin map() function which only supports flat sequences, and allows to apply a function to each leaf preserving the overall structure.""" homepage = "https://github.com/deepmind/tree" pypi = "dm-tree/dm-tree-0.1.5.tar.gz" maintainers = ['aweits'] version('0.1.5', sha256='a951d2239111dfcc468071bc8ff792c7b1e3192cab5a3c94d33a8b2bda3127fa') depends_on('py-setuptools', type='build') depends_on('bazel', type='build') depends_on('py-six@1.12.0:', type=('build', 'run')) @run_after('install') def clean(self): remove_linked_tree(self.tmp_path) def patch(self): self.tmp_path = tempfile.mkdtemp(dir='/tmp', prefix='spack') env['TEST_TMPDIR'] = self.tmp_path env['HOME'] = self.tmp_path args = [ # Don't allow user or system .bazelrc to override build settings "'--nohome_rc',\n", "'--nosystem_rc',\n", # Bazel does not work properly on NFS, switch to /tmp "'--output_user_root={0}',\n".format(self.tmp_path), "'build',\n", # Spack logs don't handle colored output well "'--color=no',\n", "'--jobs={0}',\n".format(make_jobs), # Enable verbose output for failures "'--verbose_failures',\n", # Show (formatted) subcommands being executed "'--subcommands=pretty_print',\n", "'--spawn_strategy=local',\n", # Ask bazel to explain what it's up to # Needs a filename as argument "'--explain=explainlogfile.txt',\n", # Increase verbosity of explanation, "'--verbose_explanations',\n", # bazel uses system PYTHONPATH instead of spack paths "'--action_env', 'PYTHONPATH={0}',\n".format(env['PYTHONPATH']), ] filter_file("'build',", ' '.join(args), 'setup.py')
LLNL/spack
var/spack/repos/builtin/packages/py-dm-tree/package.py
Python
lgpl-2.1
2,296
"""Debounce helper.""" import asyncio from logging import Logger from typing import Any, Awaitable, Callable, Optional from homeassistant.core import HassJob, HomeAssistant, callback class Debouncer: """Class to rate limit calls to a specific command.""" def __init__( self, hass: HomeAssistant, logger: Logger, *, cooldown: float, immediate: bool, function: Optional[Callable[..., Awaitable[Any]]] = None, ): """Initialize debounce. immediate: indicate if the function needs to be called right away and wait <cooldown> until executing next invocation. function: optional and can be instantiated later. """ self.hass = hass self.logger = logger self._function = function self.cooldown = cooldown self.immediate = immediate self._timer_task: Optional[asyncio.TimerHandle] = None self._execute_at_end_of_timer: bool = False self._execute_lock = asyncio.Lock() self._job: Optional[HassJob] = None if function is None else HassJob(function) @property def function(self) -> Optional[Callable[..., Awaitable[Any]]]: """Return the function being wrapped by the Debouncer.""" return self._function @function.setter def function(self, function: Callable[..., Awaitable[Any]]) -> None: """Update the function being wrapped by the Debouncer.""" self._function = function if self._job is None or function != self._job.target: self._job = HassJob(function) async def async_call(self) -> None: """Call the function.""" assert self._job is not None if self._timer_task: if not self._execute_at_end_of_timer: self._execute_at_end_of_timer = True return # Locked means a call is in progress. Any call is good, so abort. if self._execute_lock.locked(): return if not self.immediate: self._execute_at_end_of_timer = True self._schedule_timer() return async with self._execute_lock: # Abort if timer got set while we're waiting for the lock. if self._timer_task: return task = self.hass.async_run_hass_job(self._job) if task: await task self._schedule_timer() async def _handle_timer_finish(self) -> None: """Handle a finished timer.""" assert self._job is not None self._timer_task = None if not self._execute_at_end_of_timer: return self._execute_at_end_of_timer = False # Locked means a call is in progress. Any call is good, so abort. if self._execute_lock.locked(): return async with self._execute_lock: # Abort if timer got set while we're waiting for the lock. if self._timer_task: return # type: ignore try: task = self.hass.async_run_hass_job(self._job) if task: await task except Exception: # pylint: disable=broad-except self.logger.exception("Unexpected exception from %s", self.function) self._schedule_timer() @callback def async_cancel(self) -> None: """Cancel any scheduled call.""" if self._timer_task: self._timer_task.cancel() self._timer_task = None self._execute_at_end_of_timer = False @callback def _schedule_timer(self) -> None: """Schedule a timer.""" self._timer_task = self.hass.loop.call_later( self.cooldown, lambda: self.hass.async_create_task(self._handle_timer_finish()), )
tboyce1/home-assistant
homeassistant/helpers/debounce.py
Python
apache-2.0
3,861
input = """ 1 2 2 1 3 4 1 3 2 1 2 4 1 4 0 0 1 5 2 1 6 7 1 6 2 1 5 7 1 7 0 0 1 1 1 1 2 1 1 1 1 5 5 9 4 2 0 2 5 2 2 1 8 1 0 9 0 5 c 3 b 2 a 8 ok 6 d 0 B+ 0 B- 1 0 1 """ output = """ {a, c, ok} """
alviano/wasp
tests/asp/cautious/sum.example15.cautious.asp.test.py
Python
apache-2.0
195
import os import asyncio import tornado.web import tornado.httpserver import tornado.platform.asyncio from waterbutler import settings from waterbutler.server.api import v0 from waterbutler.server.api import v1 from waterbutler.server import handlers from waterbutler.core.utils import AioSentryClient from waterbutler.server import settings as server_settings def api_to_handlers(api): return [ (os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler) for (pattern, handler) in api.HANDLERS ] def make_app(debug): app = tornado.web.Application( api_to_handlers(v0) + api_to_handlers(v1) + [(r'/status', handlers.StatusHandler)], debug=debug, ) app.sentry_client = AioSentryClient(settings.get('SENTRY_DSN', None)) return app def serve(): tornado.platform.asyncio.AsyncIOMainLoop().install() app = make_app(server_settings.DEBUG) ssl_options = None if server_settings.SSL_CERT_FILE and server_settings.SSL_KEY_FILE: ssl_options = { 'certfile': server_settings.SSL_CERT_FILE, 'keyfile': server_settings.SSL_KEY_FILE, } app.listen( server_settings.PORT, address=server_settings.ADDRESS, xheaders=server_settings.XHEADERS, max_body_size=server_settings.MAX_BODY_SIZE, ssl_options=ssl_options, ) asyncio.get_event_loop().set_debug(server_settings.DEBUG) asyncio.get_event_loop().run_forever()
cosenal/waterbutler
waterbutler/server/app.py
Python
apache-2.0
1,490
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo.config.cfg # there are 3 ways to access the configuration. # # a. ryu.cfg.CONF (used to register cli options) # b. RyuApp.CONF (preferred way for ryu applications) # c. oslo.config.cfg.CONF # # Currently all of above shares a single ConfigOpts instance. # We will unshare c. (and stop using it) as soon as ofagent neutron agent # is updated. # We want to avoid using c. for our options as a python program which embeds # ryu applications (eg. neutron agent) might want to its own set of cli # options into it, which can conflict with ours. (Currently there seems # no conflict for the neutron agent. But who knows?) # At some point later we might want to unshare a. and b. as well, in order # to allow app-specific options. CONF = oslo.config.cfg.CONF # re-export for convenience from oslo.config.cfg import ConfigOpts from oslo.config.cfg import BoolOpt from oslo.config.cfg import IntOpt from oslo.config.cfg import ListOpt from oslo.config.cfg import MultiStrOpt from oslo.config.cfg import StrOpt from oslo.config.cfg import RequiredOptError from oslo.config.cfg import ConfigFilesNotFoundError
o3project/ryu-oe
ryu/cfg.py
Python
apache-2.0
1,809
from cloudmailin import __version__ from setuptools import setup, find_packages import os f = open(os.path.join(os.path.dirname(__file__), 'README.rst')) readme = f.read() f.close() setup( name='django-cloudmailin', version=__version__, description='Client for CloudMailin incoming email service', long_description=readme, author='Jeremy Carbaugh', author_email='jcarbaugh@sunlightfoundation.com', url='http://github.com/sunlightlabs/django-cloudmailin/', packages=find_packages(), license='BSD License', platforms=["any"], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Environment :: Web Environment', ], )
mjbrownie/django-cloudmailin
setup.py
Python
bsd-3-clause
897
""" The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function from __future__ import division # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, defaultdict, Sequence from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from ..base import BaseEstimator, is_classifier, clone from ..base import MetaEstimatorMixin from ._split import check_cv from ._validation import _fit_and_score from ..exceptions import NotFittedError from ..externals.joblib import Parallel, delayed from ..externals import six from ..utils import check_random_state from ..utils.fixes import sp_version from ..utils.fixes import rankdata from ..utils.fixes import MaskedArray from ..utils.random import sample_without_replacement from ..utils.validation import indexable, check_is_fitted from ..utils.metaestimators import if_delegate_has_method from ..metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.model_selection import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: Uses :class:`ParameterGrid` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Deterministic behavior is however guaranteed from SciPy 0.16 onwards. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int, RandomState instance or None, optional (default=None) Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.model_selection import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d. For exhaustive searches, use " "GridSearchCV." % (grid_size, self.n_iter)) for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): if sp_version < (0, 16): params[k] = v.rvs() else: params[k] = v.rvs(random_state=rnd) else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params=fit_params, return_n_test_samples=True, error_score=error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for name, v in p.items(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") if (isinstance(v, six.string_types) or not isinstance(v, (np.ndarray, Sequence))): raise ValueError("Parameter values for parameter ({0}) need " "to be a sequence(but not a string) or" " np.ndarray.".format(name)) if len(v) == 0: raise ValueError("Parameter values for parameter ({0}) need " "to be a non-empty sequence.".format(name)) # XXX Remove in 0.20 class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise', return_train_score=True): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score self.return_train_score = return_train_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) return self.scorer_(self.best_estimator_, X, y) def _check_is_fitted(self, method_name): if not self.refit: raise NotFittedError(('This GridSearchCV instance was initialized ' 'with refit=False. %s is ' 'available only after refitting on the best ' 'parameters. ') % method_name) else: check_is_fitted(self, 'best_estimator_') @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict') return self.best_estimator_.predict(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_proba') return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_log_proba') return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('decision_function') return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('transform') return self.best_estimator_.transform(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('inverse_transform') return self.best_estimator_.inverse_transform(Xt) @property def classes_(self): self._check_is_fitted("classes_") return self.best_estimator_.classes_ def fit(self, X, y=None, groups=None, **fit_params): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator """ if self.fit_params: warnings.warn('"fit_params" as a constructor argument was ' 'deprecated in version 0.19 and will be removed ' 'in version 0.21. Pass fit parameters to the ' '"fit" method instead.', DeprecationWarning) if fit_params: warnings.warn('Ignoring fit_params passed as a constructor ' 'argument in favor of keyword arguments to ' 'the "fit" method.', RuntimeWarning) else: fit_params = self.fit_params estimator = self.estimator cv = check_cv(self.cv, y, classifier=is_classifier(estimator)) self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) X, y, groups = indexable(X, y, groups) n_splits = cv.get_n_splits(X, y, groups) # Regenerate parameter iterable for each fit candidate_params = list(self._get_param_iterator()) n_candidates = len(candidate_params) if self.verbose > 0: print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, fit_params=fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=False, error_score=self.error_score) for train, test in cv.split(X, y, groups) for parameters in candidate_params) # if one choose to see train score, "out" will contain train score info if self.return_train_score: (train_scores, test_scores, test_sample_counts, fit_time, score_time) = zip(*out) else: (test_scores, test_sample_counts, fit_time, score_time) = zip(*out) results = dict() def _store(key_name, array, weights=None, splits=False, rank=False): """A small helper to store the scores/times to the cv_results_""" # When iterated first by splits, then by parameters array = np.array(array, dtype=np.float64).reshape(n_splits, n_candidates).T if splits: for split_i in range(n_splits): results["split%d_%s" % (split_i, key_name)] = array[:, split_i] array_means = np.average(array, axis=1, weights=weights) results['mean_%s' % key_name] = array_means # Weighted std is not directly available in numpy array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results['std_%s' % key_name] = array_stds if rank: results["rank_%s" % key_name] = np.asarray( rankdata(-array_means, method='min'), dtype=np.int32) # Computed the (weighted) mean and std for test scores alone # NOTE test_sample counts (weights) remain the same for all candidates test_sample_counts = np.array(test_sample_counts[::n_candidates], dtype=np.int) _store('test_score', test_scores, splits=True, rank=True, weights=test_sample_counts if self.iid else None) if self.return_train_score: _store('train_score', train_scores, splits=True) _store('fit_time', fit_time) _store('score_time', score_time) best_index = np.flatnonzero(results["rank_test_score"] == 1)[0] best_parameters = candidate_params[best_index] # Use one MaskedArray and mask all the places where the param is not # applicable for that candidate. Use defaultdict as each candidate may # not contain all the params param_results = defaultdict(partial(MaskedArray, np.empty(n_candidates,), mask=True, dtype=object)) for cand_i, params in enumerate(candidate_params): for name, value in params.items(): # An all masked empty array gets created for the key # `"param_%s" % name` at the first occurence of `name`. # Setting the value at an index also unmasks that index param_results["param_%s" % name][cand_i] = value results.update(param_results) # Store a list of param dicts at the key 'params' results['params'] = candidate_params self.cv_results_ = results self.best_index_ = best_index self.n_splits_ = n_splits if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best_parameters) if y is not None: best_estimator.fit(X, y, **fit_params) else: best_estimator.fit(X, **fit_params) self.best_estimator_ = best_estimator return self @property def best_params_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['params'][self.best_index_] @property def best_score_(self): check_is_fitted(self, 'cv_results_') return self.cv_results_['mean_test_score'][self.best_index_] @property def grid_scores_(self): warnings.warn( "The grid_scores_ attribute was deprecated in version 0.18" " in favor of the more elaborate cv_results_ attribute." " The grid_scores_ attribute will not be available from 0.20", DeprecationWarning) check_is_fitted(self, 'cv_results_') grid_scores = list() for i, (params, mean, std) in enumerate(zip( self.cv_results_['params'], self.cv_results_['mean_test_score'], self.cv_results_['std_test_score'])): scores = np.array(list(self.cv_results_['split%d_test_score' % s][i] for s in range(self.n_splits_)), dtype=np.float64) grid_scores.append(_CVScoreTuple(params, mean, scores)) return grid_scores class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Examples -------- >>> from sklearn import svm, datasets >>> from sklearn.model_selection import GridSearchCV >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svc = svm.SVC() >>> clf = GridSearchCV(svc, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape='ovr', degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., return_train_score=..., scoring=..., verbose=...) >>> sorted(clf.cv_results_.keys()) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS ['mean_fit_time', 'mean_score_time', 'mean_test_score',... 'mean_train_score', 'param_C', 'param_kernel', 'params',... 'rank_test_score', 'split0_test_score',... 'split0_train_score', 'split1_test_score', 'split1_train_score',... 'split2_test_score', 'split2_train_score',... 'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...] Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``cv_results_`` dict of:: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'split0_test_score' : [0.8, 0.7, 0.8, 0.9], 'split1_test_score' : [0.82, 0.5, 0.7, 0.78], 'mean_test_score' : [0.81, 0.60, 0.75, 0.82], 'std_test_score' : [0.02, 0.01, 0.03, 0.03], 'rank_test_score' : [2, 4, 3, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a hyperparameter grid. :func:`sklearn.model_selection.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise', return_train_score=True): super(GridSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score, return_train_score=return_train_score) self.param_grid = param_grid _check_param_grid(param_grid) def _get_param_iterator(self): """Return ParameterGrid instance for the given param_grid""" return ParameterGrid(self.param_grid) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int, RandomState instance or None, optional, default=None Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. return_train_score : boolean, default=True If ``'False'``, the ``cv_results_`` attribute will not include training scores. Attributes ---------- cv_results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``cv_results_`` dict of:: { 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'split0_test_score' : [0.8, 0.9, 0.7], 'split1_test_score' : [0.82, 0.5, 0.7], 'mean_test_score' : [0.81, 0.7, 0.7], 'std_test_score' : [0.02, 0.2, 0.], 'rank_test_score' : [3, 1, 1], 'split0_train_score' : [0.8, 0.9, 0.7], 'split1_train_score' : [0.82, 0.5, 0.7], 'mean_train_score' : [0.81, 0.7, 0.7], 'std_train_score' : [0.03, 0.03, 0.04], 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], 'mean_score_time' : [0.007, 0.06, 0.04, 0.04], 'std_score_time' : [0.001, 0.002, 0.003, 0.005], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and ``std_score_time`` are all in seconds. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``cv_results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.cv_results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise', return_train_score=True): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score, return_train_score=return_train_score) def _get_param_iterator(self): """Return ParameterSampler instance for the given distributions""" return ParameterSampler( self.param_distributions, self.n_iter, random_state=self.random_state)
hlin117/scikit-learn
sklearn/model_selection/_search.py
Python
bsd-3-clause
49,204
""" Some models for pulling data from Trac. Initially generated by inspectdb then modified heavily by hand, often by consulting http://trac.edgewall.org/wiki/TracDev/DatabaseSchema. These are far from perfect: many (most?) Trac tables have composite primary keys, which Django can't represent. This means a lot of built-in Django stuff (the admin, for example) won't work at all with these models. I haven't investigated just how deeply down thess failures go, but I suspect all sorts of things just won't work. However, they're Good Enough(tm) to let me pull some basic (read-only) data out, and that's all I really need. Some potential TODOs: * Add some convienance manager functions to deal with ticket_custom. Right now you can query with a join:: Ticket.objects.filter(custom_fields__name='ui_ux', custom_fields__value='1') Perhaps we might be able to get something like:: Ticket.objects.with_custom(ui_ux=True) Or even a custom .filter() that intercepts and figures it out? * Trac stores SVN repository revisions as '0000003744' grar. This makes querying awkward. There's probably some tricky manager manger that we could do here. * The whole Revision model will fall apart if we ever had a second repository to Trac. And a few notes on tables that're left out and why: * All the session and permission tables: they're just not needd. * Enum: I don't know what this is or what it's for. * NodeChange: Ditto. """ import datetime from django.db import models from django.utils.timezone import FixedOffset _epoc = datetime.datetime(1970, 1, 1, tzinfo=FixedOffset(0)) class time_property(object): """ Convert Trac timestamps into UTC datetimes. See http://trac.edgewall.org/browser//branches/0.12-stable/trac/util/datefmt.py for Trac's version of all this. Mine's something of a simplification. Like the rest of this module this is far from perfect -- no setters, for example! That's good enough for now. """ def __init__(self, fieldname): self.fieldname = fieldname def __get__(self, instance, owner): if instance is None: return self timestamp = getattr(instance, self.fieldname) return _epoc + datetime.timedelta(microseconds=timestamp) class Ticket(models.Model): id = models.IntegerField(primary_key=True) type = models.TextField() _time = models.BigIntegerField(db_column='time') time = time_property('_time') _changetime = models.BigIntegerField(db_column='changetime') changetime = time_property('_changetime') component = models.ForeignKey('Component', related_name='tickets', db_column='component') severity = models.TextField() owner = models.TextField() reporter = models.TextField() cc = models.TextField() version = models.ForeignKey('Version', related_name='tickets', db_column='version') milestone = models.ForeignKey('Milestone', related_name='tickets', db_column='milestone') priority = models.TextField() status = models.TextField() resolution = models.TextField() summary = models.TextField() description = models.TextField() keywords = models.TextField() class Meta(object): db_table = 'ticket' managed = False def __unicode__(self): return "#%s: %s" % (self.id, self.summary) def __init__(self, *args, **kwargs): super(Ticket, self).__init__(*args, **kwargs) # Munge custom fields onto this object. This sucks since it implies # querying will work (it won't!) and that writing will work (ditto). # Also notice that *nasty* mapping of Trac's "booleanish" things to # real booleans. This can fail in a bunch of ways, but not in our # particular install. for name, value in self.custom_fields.values_list('name', 'value'): if value in ('0', '1'): value = bool(int(value)) setattr(self, name, value) class TicketCustom(models.Model): ticket = models.ForeignKey(Ticket, related_name='custom_fields', db_column='ticket', primary_key=True) name = models.TextField() value = models.TextField() class Meta(object): db_table = 'ticket_custom' managed = False def __unicode__(self): return "%s: %s" % (self.name, self.value) class TicketChange(models.Model): ticket = models.ForeignKey(Ticket, related_name='changes', db_column='ticket', primary_key=True) author = models.TextField() field = models.TextField() oldvalue = models.TextField() newvalue = models.TextField() _time = models.BigIntegerField(db_column='time') time = time_property('_time') class Meta(object): db_table = 'ticket_change' managed = False ordering = ['_time'] def __unicode__(self): return "#%s: changed %s" % (self.ticket.id, self.field) class Component(models.Model): name = models.TextField(primary_key=True) owner = models.TextField() description = models.TextField() class Meta(object): db_table = 'component' managed = False def __unicode__(self): return self.name class Version(models.Model): name = models.TextField(primary_key=True) description = models.TextField() _time = models.BigIntegerField(db_column='time') time = time_property('_time') class Meta(object): db_table = 'version' managed = False def __unicode__(self): return self.name class Milestone(models.Model): name = models.TextField(primary_key=True) description = models.TextField() _due = models.BigIntegerField(db_column='_due') due = time_property('due') _completed = models.BigIntegerField(db_column='_completed') completed = time_property('completed') class Meta(object): db_table = 'milestone' managed = False def __unicode__(self): return self.name class SingleRepoRevisionManager(models.Manager): """ Forces Revision to only query against a single repo, thus making Revision.rev behave something like a primary key. """ def __init__(self, repo_id): self.repo_id = repo_id super(SingleRepoRevisionManager, self).__init__() def get_queryset(self): qs = super(SingleRepoRevisionManager, self).get_queryset() return qs.filter(repos=self.repo_id) SINGLE_REPO_ID = 1 class Revision(models.Model): repos = models.IntegerField() rev = models.TextField(primary_key=True) _time = models.BigIntegerField(db_column='time') time = time_property('time') author = models.TextField() message = models.TextField() objects = SingleRepoRevisionManager(repo_id=SINGLE_REPO_ID) class Meta(object): db_table = 'revision' managed = False def __unicode__(self): return '[%s] %s' % (self.rev, self.message.split('\n', 1)[0]) # The Wiki table uses a composite primary key (name, version). Since # Django doesn't support this, this model sits on top of a simple view. # CREATE VIEW "wiki_django_view" AS # SELECT "name" || '.' || "version" AS "django_id", * # FROM wiki; class Wiki(models.Model): django_id = models.TextField(primary_key=True) name = models.TextField() version = models.IntegerField() _time = models.BigIntegerField(db_column='time') time = time_property('time') author = models.TextField() ipnr = models.TextField() text = models.TextField() comment = models.TextField() readonly = models.IntegerField() class Meta: db_table = 'wiki_django_view' managed = False def __unicode__(self): return '%s (v%s)' % (self.name, self.version) # Same story as for Wiki: attachment's PK is (type, id, filename), so again # there's a simple view this is on top of. # CREATE VIEW "attachment_django_view" AS # SELECT "type" || '.' || "id" || '.' || "filename" AS "django_id", * # FROM attachment; class Attachment(models.Model): django_id = models.TextField(primary_key=True) type = models.TextField() id = models.TextField() filename = models.TextField() size = models.IntegerField() _time = models.BigIntegerField(db_column='time') time = time_property('time') description = models.TextField() author = models.TextField() ipnr = models.TextField() class Meta: db_table = 'attachment_django_view' managed = False def __unicode__(self): attached_to = ('#%s' % self.id) if self.type == 'ticket' else self.id return '%s (on %s)' % (self.filename, attached_to)
hassanabidpk/djangoproject.com
tracdb/models.py
Python
bsd-3-clause
8,724
#!/usr/bin/python # # Copyright 2017 Steven Watanabe # # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) from MockProgram import * command('g++', '-print-prog-name=ar', stdout=script('ar.py')) command('g++', '-print-prog-name=ranlib', stdout=script('ranlib.py')) if allow_properties("variant=debug", "link=shared", "threading=single", "runtime-link=shared"): command("g++", unordered("-O0", "-fno-inline", "-Wall", "-g", "-fPIC"), "-c", "-o", output_file("bin/gcc-gnu-4.8.3/debug/lib.o"), input_file(source="lib.cpp")) command("g++", "-o", output_file("bin/gcc-gnu-4.8.3/debug/libl1.so"), "-Wl,-h", "-Wl,libl1.so", "-shared", "-Wl,--start-group", input_file("bin/gcc-gnu-4.8.3/debug/lib.o"), "-Wl,-Bstatic", "-Wl,-Bdynamic", "-Wl,--end-group", unordered("-g", "-fPIC")) command("g++", unordered("-O0", "-fno-inline", "-Wall", "-g", "-fPIC"), "-c", "-o", output_file("bin/gcc-gnu-4.8.3/debug/main.o"), input_file(source="main.cpp")) command("g++", "-Wl,-rpath", arg("-Wl,", target_path("bin/gcc-gnu-4.8.3/debug/libl1.so")), "-Wl,-rpath-link", arg("-Wl,", target_path("bin/gcc-gnu-4.8.3/debug/libl1.so")), "-o", output_file("bin/gcc-gnu-4.8.3/debug/test"), "-Wl,--start-group", input_file("bin/gcc-gnu-4.8.3/debug/main.o"), input_file("bin/gcc-gnu-4.8.3/debug/libl1.so"), "-Wl,-Bstatic", "-Wl,-Bdynamic", "-Wl,--end-group", unordered("-g", "-fPIC")) if allow_properties("variant=release", "link=shared", "threading=single", "runtime-link=shared"): command('g++', unordered('-O3', '-finline-functions', '-Wno-inline', '-Wall', '-fPIC', '-DNDEBUG'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/release/lib.o'), input_file(source='lib.cpp')) command('g++', '-o', output_file('bin/gcc-gnu-4.8.3/release/libl1.so'), '-Wl,-h', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/release/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', '-fPIC') command('g++', unordered('-O3', '-finline-functions', '-Wno-inline', '-Wall', '-fPIC', '-DNDEBUG'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/release/main.o'), input_file(source='main.cpp')) command('g++', '-Wl,-rpath', arg('-Wl,', target_path('bin/gcc-gnu-4.8.3/release/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/gcc-gnu-4.8.3/release/libl1.so')), '-o', output_file('bin/gcc-gnu-4.8.3/release/test'), '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/release/main.o'), input_file('bin/gcc-gnu-4.8.3/release/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', '-fPIC') if allow_properties("variant=debug", "link=shared", "threading=multi", "runtime-link=shared"): command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-fPIC'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/debug/threading-multi/lib.o'), input_file(source='lib.cpp')) command('g++', '-o', output_file('bin/gcc-gnu-4.8.3/debug/threading-multi/libl1.so'), '-Wl,-h', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/debug/threading-multi/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-lrt', '-Wl,--end-group', unordered('-g', '-pthread', '-fPIC')) command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-fPIC'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/debug/threading-multi/main.o'), input_file(source='main.cpp')) command('g++', '-Wl,-rpath', arg('-Wl,', target_path('bin/gcc-gnu-4.8.3/debug/threading-multi/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/gcc-gnu-4.8.3/debug/threading-multi/libl1.so')), '-o', output_file('bin/gcc-gnu-4.8.3/debug/threading-multi/test'), '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/debug/threading-multi/main.o'), input_file('bin/gcc-gnu-4.8.3/debug/threading-multi/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-lrt', '-Wl,--end-group', unordered('-g', '-pthread', '-fPIC')) if allow_properties("variant=debug", "link=static", "threading=single", "runtime-link=shared"): command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/lib.o'), input_file(source='lib.cpp')) command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g'), '-c', '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/main.o'), input_file(source='main.cpp')) command('g++', '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/test'), '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/debug/link-static/main.o'), input_file('bin/gcc-gnu-4.8.3/debug/link-static/libl1.a'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', '-g') if allow_properties("variant=debug", "link=static", "threading=single", "runtime-link=static"): command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/runtime-link-static/lib.o'), input_file(source='lib.cpp')) command('g++', unordered('-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/runtime-link-static/main.o'), input_file(source='main.cpp')) command('g++', '-o', output_file('bin/gcc-gnu-4.8.3/debug/link-static/runtime-link-static/test'), '-Wl,--start-group', input_file('bin/gcc-gnu-4.8.3/debug/link-static/runtime-link-static/main.o'), input_file('bin/gcc-gnu-4.8.3/debug/link-static/runtime-link-static/libl1.a'), '-Wl,--end-group', unordered('-g', '-static')) if allow_properties("variant=debug", "link=shared", "threading=single", "runtime-link=shared"): command("g++", unordered("-O0", "-fno-inline", "-Wall", "-g", "-fPIC", "-std=c++1y"), "-c", "-o", output_file("bin/gcc-gnu-4.8.3/debug/lib.o"), input_file(source="lib.cpp")) command("g++", "-o", output_file("bin/gcc-gnu-4.8.3/debug/libl1.so"), "-Wl,-h", "-Wl,libl1.so", "-shared", "-Wl,--start-group", input_file("bin/gcc-gnu-4.8.3/debug/lib.o"), "-Wl,-Bstatic", "-Wl,-Bdynamic", "-Wl,--end-group", unordered("-g", "-fPIC", "-std=c++1y")) command("g++", unordered("-O0", "-fno-inline", "-Wall", "-g", "-fPIC", "-std=c++1y"), "-c", "-o", output_file("bin/gcc-gnu-4.8.3/debug/main.o"), input_file(source="main.cpp")) command("g++", "-Wl,-rpath", arg("-Wl,", target_path("bin/gcc-gnu-4.8.3/debug/libl1.so")), "-Wl,-rpath-link", arg("-Wl,", target_path("bin/gcc-gnu-4.8.3/debug/libl1.so")), "-o", output_file("bin/gcc-gnu-4.8.3/debug/test"), "-Wl,--start-group", input_file("bin/gcc-gnu-4.8.3/debug/main.o"), input_file("bin/gcc-gnu-4.8.3/debug/libl1.so"), "-Wl,-Bstatic", "-Wl,-Bdynamic", "-Wl,--end-group", unordered("-g", "-fPIC", "-std=c++1y")) main()
nawawi/poedit
deps/boost/tools/build/test/toolset-mock/src/gcc-4.8.3-linux.py
Python
mit
6,602
# # This code is part of Ansible, but is an independent component. # # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat, Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import re import collections from ansible.module_utils.basic import env_fallback, return_values from ansible.module_utils.network_common import to_list, ComplexList from ansible.module_utils.connection import exec_command from ansible.module_utils.six import iteritems from ansible.module_utils.urls import fetch_url _DEVICE_CONNECTION = None nxos_argument_spec = { 'host': dict(), 'port': dict(type='int'), 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE'])), 'use_ssl': dict(type='bool'), 'validate_certs': dict(type='bool'), 'timeout': dict(type='int'), 'provider': dict(type='dict'), 'transport': dict(choices=['cli', 'nxapi']) } def check_args(module, warnings): provider = module.params['provider'] or {} for key in nxos_argument_spec: if key not in ['provider', 'transport'] and module.params[key]: warnings.append('argument %s has been deprecated and will be ' 'removed in a future version' % key) if provider: for param in ('password',): if provider.get(param): module.no_log_values.update(return_values(provider[param])) def load_params(module): provider = module.params.get('provider') or dict() for key, value in iteritems(provider): if key in nxos_argument_spec: if module.params.get(key) is None and value is not None: module.params[key] = value def get_connection(module): global _DEVICE_CONNECTION if not _DEVICE_CONNECTION: load_params(module) transport = module.params['transport'] provider_transport = (module.params['provider'] or {}).get('transport') if 'nxapi' in (transport, provider_transport): conn = Nxapi(module) else: conn = Cli(module) _DEVICE_CONNECTION = conn return _DEVICE_CONNECTION class Cli: def __init__(self, module): self._module = module self._device_configs = {} def exec_command(self, command): if isinstance(command, dict): command = self._module.jsonify(command) return exec_command(self._module, command) def get_config(self, flags=[]): """Retrieves the current config from the device or cache """ cmd = 'show running-config ' cmd += ' '.join(flags) cmd = cmd.strip() try: return self._device_configs[cmd] except KeyError: rc, out, err = self.exec_command(cmd) if rc != 0: self._module.fail_json(msg=err) cfg = str(out).strip() self._device_configs[cmd] = cfg return cfg def run_commands(self, commands, check_rc=True): """Run list of commands on remote device and return results """ responses = list() for item in to_list(commands): if item['output'] == 'json' and not is_json(item['command']): cmd = '%s | json' % item['command'] elif item['output'] == 'text' and is_json(item['command']): cmd = item['command'].split('|')[0] else: cmd = item['command'] rc, out, err = self.exec_command(cmd) if check_rc and rc != 0: self._module.fail_json(msg=err) try: out = self._module.from_json(out) except ValueError: out = str(out).strip() responses.append(out) return responses def load_config(self, config): """Sends configuration commands to the remote device """ rc, out, err = self.exec_command('configure') if rc != 0: self._module.fail_json(msg='unable to enter configuration mode', output=err) for cmd in config: rc, out, err = self.exec_command(cmd) if rc != 0: self._module.fail_json(msg=err) self.exec_command('end') class Nxapi: OUTPUT_TO_COMMAND_TYPE = { 'text': 'cli_show_ascii', 'json': 'cli_show', 'bash': 'bash', 'config': 'cli_conf' } def __init__(self, module): self._module = module self._nxapi_auth = None self._device_configs = {} self._module.params['url_username'] = self._module.params['username'] self._module.params['url_password'] = self._module.params['password'] host = self._module.params['host'] port = self._module.params['port'] if self._module.params['use_ssl']: proto = 'https' port = port or 443 else: proto = 'http' port = port or 80 self._url = '%s://%s:%s/ins' % (proto, host, port) def _error(self, msg, **kwargs): self._nxapi_auth = None if 'url' not in kwargs: kwargs['url'] = self._url self._module.fail_json(msg=msg, **kwargs) def _request_builder(self, commands, output, version='1.0', chunk='0', sid=None): """Encodes a NXAPI JSON request message """ try: command_type = self.OUTPUT_TO_COMMAND_TYPE[output] except KeyError: msg = 'invalid format, received %s, expected one of %s' % \ (output, ','.join(self.OUTPUT_TO_COMMAND_TYPE.keys())) self._error(msg=msg) if isinstance(commands, (list, set, tuple)): commands = ' ;'.join(commands) msg = { 'version': version, 'type': command_type, 'chunk': chunk, 'sid': sid, 'input': commands, 'output_format': 'json' } return dict(ins_api=msg) def send_request(self, commands, output='text', check_status=True): # only 10 show commands can be encoded in each request # messages sent to the remote device if output != 'config': commands = collections.deque(to_list(commands)) stack = list() requests = list() while commands: stack.append(commands.popleft()) if len(stack) == 10: body = self._request_builder(stack, output) data = self._module.jsonify(body) requests.append(data) stack = list() if stack: body = self._request_builder(stack, output) data = self._module.jsonify(body) requests.append(data) else: body = self._request_builder(commands, 'config') requests = [self._module.jsonify(body)] headers = {'Content-Type': 'application/json'} result = list() timeout = self._module.params['timeout'] or 10 for req in requests: if self._nxapi_auth: headers['Cookie'] = self._nxapi_auth response, headers = fetch_url( self._module, self._url, data=req, headers=headers, timeout=timeout, method='POST' ) self._nxapi_auth = headers.get('set-cookie') if headers['status'] != 200: self._error(**headers) try: response = self._module.from_json(response.read()) except ValueError: self._module.fail_json(msg='unable to parse response') output = response['ins_api']['outputs']['output'] for item in to_list(output): if check_status and item['code'] != '200': self._error(output=output, **item) elif 'body' in item: result.append(item['body']) #else: # error in command but since check_status is disabled # silently drop it. #result.append(item['msg']) return result def get_config(self, flags=[]): """Retrieves the current config from the device or cache """ cmd = 'show running-config ' cmd += ' '.join(flags) cmd = cmd.strip() try: return self._device_configs[cmd] except KeyError: out = self.send_request(cmd) cfg = str(out[0]).strip() self._device_configs[cmd] = cfg return cfg def run_commands(self, commands, check_rc=True): """Run list of commands on remote device and return results """ output = None queue = list() responses = list() _send = lambda commands, output: self.send_request(commands, output, check_status=check_rc) for item in to_list(commands): if is_json(item['command']): item['command'] = str(item['command']).split('|')[0] item['output'] = 'json' if all((output == 'json', item['output'] == 'text')) or all((output =='text', item['output'] == 'json')): responses.extend(_send(queue, output)) queue = list() output = item['output'] or 'json' queue.append(item['command']) if queue: responses.extend(_send(queue, output)) return responses def load_config(self, commands): """Sends the ordered set of commands to the device """ commands = to_list(commands) self.send_request(commands, output='config') is_json = lambda x: str(x).endswith('| json') is_text = lambda x: not is_json def is_nxapi(module): transport = module.params['transport'] provider_transport = (module.params['provider'] or {}).get('transport') return 'nxapi' in (transport, provider_transport) def to_command(module, commands): if is_nxapi(module): default_output = 'json' else: default_output = 'text' transform = ComplexList(dict( command=dict(key=True), output=dict(default=default_output), prompt=dict(), answer=dict() ), module) commands = transform(to_list(commands)) for index, item in enumerate(commands): if is_json(item['command']): item['output'] = 'json' elif is_text(item['command']): item['output'] = 'text' return commands def get_config(module, flags=[]): conn = get_connection(module) return conn.get_config(flags) def run_commands(module, commands, check_rc=True): conn = get_connection(module) return conn.run_commands(to_command(module, commands), check_rc) def load_config(module, config): conn = get_connection(module) return conn.load_config(config)
dmitry-sobolev/ansible
lib/ansible/module_utils/nxos.py
Python
gpl-3.0
12,464
from django.test import TestCase from student.tests.factories import UserFactory, CourseEnrollmentFactory from django_comment_common.models import Role, Permission from factories import RoleFactory import django_comment_client.utils as utils class DictionaryTestCase(TestCase): def test_extract(self): d = {'cats': 'meow', 'dogs': 'woof'} k = ['cats', 'dogs', 'hamsters'] expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} self.assertEqual(utils.extract(d, k), expected) def test_strip_none(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_none(d), expected) def test_strip_blank(self): d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''} expected = {'cats': 'meow', 'dogs': 'woof'} self.assertEqual(utils.strip_blank(d), expected) def test_merge_dict(self): d1 = {'cats': 'meow', 'dogs': 'woof'} d2 = {'lions': 'roar', 'ducks': 'quack'} expected = {'cats': 'meow', 'dogs': 'woof', 'lions': 'roar', 'ducks': 'quack'} self.assertEqual(utils.merge_dict(d1, d2), expected) class AccessUtilsTestCase(TestCase): def setUp(self): self.course_id = 'edX/toy/2012_Fall' self.student_role = RoleFactory(name='Student', course_id=self.course_id) self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id) self.student1 = UserFactory(username='student', email='student@edx.org') self.student1_enrollment = CourseEnrollmentFactory(user=self.student1) self.student_role.users.add(self.student1) self.student2 = UserFactory(username='student2', email='student2@edx.org') self.student2_enrollment = CourseEnrollmentFactory(user=self.student2) self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True) self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator) self.moderator_role.users.add(self.moderator) def test_get_role_ids(self): ret = utils.get_role_ids(self.course_id) expected = {u'Moderator': [3], u'Student': [1, 2], 'Staff': [3]} self.assertEqual(ret, expected) def test_has_forum_access(self): ret = utils.has_forum_access('student', self.course_id, 'Student') self.assertTrue(ret) ret = utils.has_forum_access('not_a_student', self.course_id, 'Student') self.assertFalse(ret) ret = utils.has_forum_access('student', self.course_id, 'NotARole') self.assertFalse(ret)
abhinavp13/IITBX-edx-platform-dev
lms/djangoapps/django_comment_client/tests/test_utils.py
Python
agpl-3.0
2,650
#!/usr/bin/env python2 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Track test skips via launchpadlib API and raise alerts if a bug is fixed but a skip is still in the Tempest test code """ import argparse import logging import os import re try: from launchpadlib import launchpad except ImportError: launchpad = None LPCACHEDIR = os.path.expanduser('~/.launchpadlib/cache') def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('test_path', help='Path of test dir') return parser.parse_args() def info(msg, *args, **kwargs): logging.info(msg, *args, **kwargs) def debug(msg, *args, **kwargs): logging.debug(msg, *args, **kwargs) def find_skips(start): """Find the entire list of skiped tests. Returns a list of tuples (method, bug) that represent test methods that have been decorated to skip because of a particular bug. """ results = {} debug("Searching in %s", start) for root, _dirs, files in os.walk(start): for name in files: if name.startswith('test_') and name.endswith('py'): path = os.path.join(root, name) debug("Searching in %s", path) temp_result = find_skips_in_file(path) for method_name, bug_no in temp_result: if results.get(bug_no): result_dict = results.get(bug_no) if result_dict.get(name): result_dict[name].append(method_name) else: result_dict[name] = [method_name] results[bug_no] = result_dict else: results[bug_no] = {name: [method_name]} return results def find_skips_in_file(path): """Return the skip tuples in a test file.""" BUG_RE = re.compile(r'\s*@.*skip_because\(bug=[\'"](\d+)[\'"]') DEF_RE = re.compile(r'\s*def (\w+)\(') bug_found = False results = [] lines = open(path, 'rb').readlines() for x, line in enumerate(lines): if not bug_found: res = BUG_RE.match(line) if res: bug_no = int(res.group(1)) debug("Found bug skip %s on line %d", bug_no, x + 1) bug_found = True else: res = DEF_RE.match(line) if res: method = res.group(1) debug("Found test method %s skips for bug %d", method, bug_no) results.append((method, bug_no)) bug_found = False return results def get_results(result_dict): results = [] for bug_no in result_dict.keys(): for method in result_dict[bug_no]: results.append((method, bug_no)) return results def main(): logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) parser = parse_args() results = find_skips(parser.test_path) unique_bugs = sorted(set([bug for (method, bug) in get_results(results)])) unskips = [] duplicates = [] info("Total bug skips found: %d", len(results)) info("Total unique bugs causing skips: %d", len(unique_bugs)) if launchpad is not None: lp = launchpad.Launchpad.login_anonymously('grabbing bugs', 'production', LPCACHEDIR) else: print("To check the bug status launchpadlib should be installed") exit(1) for bug_no in unique_bugs: bug = lp.bugs[bug_no] duplicate = bug.duplicate_of_link if duplicate is not None: dup_id = duplicate.split('/')[-1] duplicates.append((bug_no, dup_id)) for task in bug.bug_tasks: info("Bug #%7s (%12s - %12s)", bug_no, task.importance, task.status) if task.status in ('Fix Released', 'Fix Committed'): unskips.append(bug_no) for bug_id, dup_id in duplicates: if bug_id not in unskips: dup_bug = lp.bugs[dup_id] for task in dup_bug.bug_tasks: info("Bug #%7s is a duplicate of Bug#%7s (%12s - %12s)", bug_id, dup_id, task.importance, task.status) if task.status in ('Fix Released', 'Fix Committed'): unskips.append(bug_id) unskips = sorted(set(unskips)) if unskips: print("The following bugs have been fixed and the corresponding skips") print("should be removed from the test cases:") print() for bug in unskips: message = " %7s in " % bug locations = ["%s" % x for x in results[bug].keys()] message += " and ".join(locations) print(message) if __name__ == '__main__': main()
JioCloud/tempest-lib
tempest_lib/cmd/skip_tracker.py
Python
apache-2.0
5,466
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test configs for unroll_batch_matmul.""" import tensorflow.compat.v1 as tf from tensorflow.lite.testing.zip_test_utils import create_tensor_data from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests from tensorflow.lite.testing.zip_test_utils import register_make_test_function @register_make_test_function() def make_unroll_batch_matmul_tests(options): """Make a set of tests to test unroll_batch_matmul.""" # The test cases below requires broadcasting support (BatchMatMulV2 semantic), # whis isn't supported as of this change. broadcast_shape_params = [ # Simple broadcast. [(1, 2, 3), (3, 5), False, False], # Empty batch broadcast. [(2, 5, 3), (3, 7), False, False], # Single batch with non-empty batch broadcast. [(1, 5, 3), (4, 3, 7), False, False], # Broadcast both operands [(3, 1, 5, 3), (1, 4, 3, 7), False, False], ] test_parameters = [{ "dtype": [tf.float32], "shape": [[(2, 2, 3), (2, 3, 2), False, False], [(2, 2, 3), (2, 3, 2), True, True], [(2, 2, 3), (2, 2, 3), False, True], [(2, 2, 3), (2, 2, 3), True, False], [(4, 2, 2, 3), (4, 2, 3, 2), False, False], [(4, 2, 2, 3), (4, 2, 3, 2), True, True], [(4, 2, 2, 3), (4, 2, 2, 3), False, True], [(4, 2, 2, 3), (4, 2, 2, 3), True, False]] + broadcast_shape_params, }] def build_graph(parameters): """Build the batch_matmul op testing graph.""" def _build_graph(): """Build the graph.""" input_tensor1 = tf.compat.v1.placeholder( dtype=parameters["dtype"], shape=parameters["shape"][0]) input_tensor2 = tf.compat.v1.placeholder( dtype=parameters["dtype"], shape=parameters["shape"][1]) # Should be unrolled and replaced with fully_connected ops in the end. out = tf.matmul( input_tensor1, input_tensor2, transpose_a=parameters["shape"][2], transpose_b=parameters["shape"][3]) return [input_tensor1, input_tensor2], [out] return _build_graph() def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data( parameters["dtype"], shape=parameters["shape"][0]) input_value2 = create_tensor_data( parameters["dtype"], shape=parameters["shape"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
tensorflow/tensorflow
tensorflow/lite/testing/op_tests/unroll_batch_matmul.py
Python
apache-2.0
3,288
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python layer for set_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import framework from tensorflow.python.framework import dtypes from tensorflow.python.framework import load_library from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.platform import resource_loader _set_ops = load_library.load_op_library( resource_loader.get_path_to_datafile("_set_ops.so")) assert _set_ops, "Could not load _set_ops.so." _VALID_DTYPES = set([ dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.string]) def _size_shape(unused_op): """Shape function for SetSize op.""" return [tensor_shape.unknown_shape()] def set_size(a, validate_indices=True): """Compute number of unique elements along last dimension of `a`. Args: a: `SparseTensor`, with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a`. Returns: For `a` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of `a`. Raises: TypeError: If `a` is an invalid types. """ a = framework.convert_to_tensor_or_sparse_tensor(a, name="a") if not isinstance(a, ops.SparseTensor): raise TypeError("Expected `SparseTensor`, got %s." % a) if a.values.dtype.base_dtype not in _VALID_DTYPES: raise TypeError("Invalid dtype %s." % a.values.dtype) # pylint: disable=protected-access return _set_ops.set_size(a.indices, a.values, a.shape, validate_indices) # TODO(ptucker): ops vs @ops? ops.NoGradient("SetSize") ops.RegisterShape("SetSize")(_size_shape) def _sparse_shape(op): """Shape function for `SparseTensor` result.""" num_rows = (op.inputs[0].get_shape()[0] if op.type in ("DenseToSparseOperation", "DenseToDenseOperation") else None) return [ tensor_shape.TensorShape([num_rows, 2]), tensor_shape.unknown_shape(1), tensor_shape.unknown_shape(1), ] def _set_operation(a, b, set_operation, validate_indices=True): """Compute set operation of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. set_operation: String indicating set operaiton. See SetOperationOp::SetOperationFromContext for valid values. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the results of the set operation. Raises: TypeError: If inputs are invalid types. ValueError: If `a` is sparse and `b` is dense. """ a = framework.convert_to_tensor_or_sparse_tensor(a, name="a") if a.dtype.base_dtype not in _VALID_DTYPES: raise TypeError("'a' invalid dtype %s." % a.dtype) b = framework.convert_to_tensor_or_sparse_tensor(b, name="b") if b.dtype.base_dtype != a.dtype.base_dtype: raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype)) # pylint: disable=protected-access if isinstance(a, ops.SparseTensor): if isinstance(b, ops.SparseTensor): indices, values, shape = _set_ops.sparse_to_sparse_set_operation( a.indices, a.values, a.shape, b.indices, b.values, b.shape, set_operation, validate_indices) else: raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. " "Please flip the order of your inputs.") elif isinstance(b, ops.SparseTensor): indices, values, shape = _set_ops.dense_to_sparse_set_operation( a, b.indices, b.values, b.shape, set_operation, validate_indices) else: indices, values, shape = _set_ops.dense_to_dense_set_operation( a, b, set_operation, validate_indices) # pylint: enable=protected-access return ops.SparseTensor(indices, values, shape) def set_intersection(a, b, validate_indices=True): """Compute set intersection of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the intersections. """ return _set_operation(a, b, "intersection", validate_indices) ops.NoGradient("SetIntersection") ops.RegisterShape("SetIntersection")(_sparse_shape) def set_difference(a, b, aminusb=True, validate_indices=True): """Compute set difference of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. aminusb: Whether to subtract `b` from `a`, vs vice versa. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the differences. """ return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices) ops.NoGradient("SetDifference") ops.RegisterShape("SetDifference")(_sparse_shape) def set_union(a, b, validate_indices=True): """Compute set union of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the unions. """ return _set_operation(a, b, "union", validate_indices) ops.NoGradient("SetUnion") ops.RegisterShape("SetUnion")(_sparse_shape)
sachinpro/sachinpro.github.io
tensorflow/contrib/metrics/python/ops/set_ops.py
Python
apache-2.0
7,963
# -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- import numpy as np import cv2 import matplotlib.pyplot as plt from ..utils.blob import im_list_to_blob from ..utils.timer import Timer # TODO: make fast_rcnn irrelevant # >>>> obsolete, because it depends on sth outside of this project from ..fast_rcnn.config import cfg # <<<< obsolete def _vis_proposals(im, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return class_name = 'obj' im = im[:, :, (2, 1, 0)] fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') for i in inds: bbox = dets[i, :4] score = dets[i, -1] ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('{} detections with ' 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh), fontsize=14) plt.axis('off') plt.tight_layout() plt.draw() def _get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image in BGR order Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS processed_ims = [] assert len(cfg.TEST.SCALES_BASE) == 1 im_scale = cfg.TRAIN.SCALES_BASE[0] im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :] processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, im_info def im_proposals(net, im): """Generate RPN proposals on a single image.""" blobs = {} blobs['data'], blobs['im_info'] = _get_image_blob(im) net.blobs['data'].reshape(*(blobs['data'].shape)) net.blobs['im_info'].reshape(*(blobs['im_info'].shape)) blobs_out = net.forward( data=blobs['data'].astype(np.float32, copy=False), im_info=blobs['im_info'].astype(np.float32, copy=False)) scale = blobs['im_info'][0, 2] boxes = blobs_out['rois'][:, 1:].copy() / scale scores = blobs_out['scores'].copy() return boxes, scores def imdb_proposals(net, imdb): """Generate RPN proposals on all images in an imdb.""" _t = Timer() imdb_boxes = [[] for _ in xrange(imdb.num_images)] for i in xrange(imdb.num_images): im = cv2.imread(imdb.image_path_at(i)) _t.tic() imdb_boxes[i], scores = im_proposals(net, im) _t.toc() print 'im_proposals: {:d}/{:d} {:.3f}s' \ .format(i + 1, imdb.num_images, _t.average_time) if 0: dets = np.hstack((imdb_boxes[i], scores)) # from IPython import embed; embed() _vis_proposals(im, dets[:3, :], thresh=0.9) plt.show() return imdb_boxes def imdb_proposals_det(net, imdb): """Generate RPN proposals on all images in an imdb.""" _t = Timer() imdb_boxes = [[] for _ in xrange(imdb.num_images)] for i in xrange(imdb.num_images): im = cv2.imread(imdb.image_path_at(i)) _t.tic() boxes, scores = im_proposals(net, im) _t.toc() print 'im_proposals: {:d}/{:d} {:.3f}s' \ .format(i + 1, imdb.num_images, _t.average_time) dets = np.hstack((boxes, scores)) imdb_boxes[i] = dets if 0: # from IPython import embed; embed() _vis_proposals(im, dets[:3, :], thresh=0.9) plt.show() return imdb_boxes
EricssonResearch/scott-eu
simulation-ros/src/turtlebot2i/turtlebot2i_msdn/msdn/faster_rcnn/rpn_msr/generate.py
Python
apache-2.0
4,403
''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re Test.Summary = ''' Test tls origin session reuse ''' # Define default ATS ts1 = Test.MakeATSProcess("ts1", select_ports=True, enable_tls=True) ts2 = Test.MakeATSProcess("ts2", select_ports=True, enable_tls=True) ts3 = Test.MakeATSProcess("ts3", select_ports=True, enable_tls=True) ts4 = Test.MakeATSProcess("ts4", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") # Add info the origin server responses request_header = { 'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', 'timestamp': '1469733493.993', 'body': '' } response_header = { 'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', 'body': 'curl test' } server.addResponse("sessionlog.json", request_header, response_header) # add ssl materials like key, certificates for the server ts1.addSSLfile("ssl/server.pem") ts1.addSSLfile("ssl/server.key") ts2.addSSLfile("ssl/server.pem") ts2.addSSLfile("ssl/server.key") ts3.addSSLfile("ssl/server.pem") ts3.addSSLfile("ssl/server.key") ts4.addSSLfile("ssl/server.pem") ts4.addSSLfile("ssl/server.key") ts1.Disk.remap_config.AddLine( 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) ) ts2.Disk.remap_config.AddLines([ 'map /reuse_session https://127.0.0.1:{0}'.format(ts1.Variables.ssl_port), 'map /remove_oldest https://127.0.1.1:{0}'.format(ts1.Variables.ssl_port) ]) ts3.Disk.remap_config.AddLine( 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) ) ts4.Disk.remap_config.AddLine( 'map / https://127.0.0.1:{0}'.format(ts3.Variables.ssl_port) ) ts1.Disk.ssl_multicert_config.AddLine( 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' ) ts2.Disk.ssl_multicert_config.AddLine( 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' ) ts3.Disk.ssl_multicert_config.AddLine( 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' ) ts4.Disk.ssl_multicert_config.AddLine( 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' ) ts1.Disk.records_config.update({ 'proxy.config.http.cache.http': 0, 'proxy.config.ssl.server.cert.path': '{0}'.format(ts1.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts1.Variables.SSLDir), 'proxy.config.exec_thread.autoconfig.scale': 1.0, 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 4096, 'proxy.config.ssl.session_cache.num_buckets': 256, 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, 'proxy.config.ssl.session_cache.timeout': 0, 'proxy.config.ssl.session_cache.auto_clear': 1, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.origin_session_cache': 1, 'proxy.config.ssl.origin_session_cache.size': 1, 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', }) ts2.Disk.records_config.update({ 'proxy.config.http.cache.http': 0, 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), 'proxy.config.exec_thread.autoconfig.scale': 1.0, 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 4096, 'proxy.config.ssl.session_cache.num_buckets': 256, 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, 'proxy.config.ssl.session_cache.timeout': 0, 'proxy.config.ssl.session_cache.auto_clear': 1, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.origin_session_cache': 1, 'proxy.config.ssl.origin_session_cache.size': 1, 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', }) ts3.Disk.records_config.update({ 'proxy.config.http.cache.http': 0, 'proxy.config.ssl.server.cert.path': '{0}'.format(ts3.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts3.Variables.SSLDir), 'proxy.config.exec_thread.autoconfig.scale': 1.0, 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 4096, 'proxy.config.ssl.session_cache.num_buckets': 256, 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, 'proxy.config.ssl.session_cache.timeout': 0, 'proxy.config.ssl.session_cache.auto_clear': 1, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.origin_session_cache': 1, 'proxy.config.ssl.origin_session_cache.size': 1, 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', }) ts4.Disk.records_config.update({ 'proxy.config.http.cache.http': 0, 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', 'proxy.config.ssl.server.cert.path': '{0}'.format(ts4.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts4.Variables.SSLDir), 'proxy.config.exec_thread.autoconfig.scale': 1.0, 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 4096, 'proxy.config.ssl.session_cache.num_buckets': 256, 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, 'proxy.config.ssl.session_cache.timeout': 0, 'proxy.config.ssl.session_cache.auto_clear': 1, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.origin_session_cache': 0, 'proxy.config.ssl.origin_session_cache.size': 1, 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', }) tr = Test.AddTestRun('new session then reuse') tr.Processes.Default.Command = 'curl https://127.0.0.1:{0}/reuse_session -k && curl https://127.0.0.1:{0}/reuse_session -k'.format( ts2.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts1) tr.Processes.Default.StartBefore(ts2) tr.Processes.Default.Streams.All = Testers.ContainsExpression('curl test', 'Making sure the basics still work') ts2.Streams.All = Testers.ContainsExpression('new session to origin', '') ts2.Streams.All += Testers.ContainsExpression('reused session to origin', '') tr.StillRunningAfter = server tr.StillRunningAfter += ts1 tr.StillRunningAfter += ts2 tr = Test.AddTestRun('remove oldest session, new session then reuse') tr.Processes.Default.Command = 'curl https://127.0.0.1:{0}/remove_oldest -k && curl https://127.0.0.1:{0}/remove_oldest -k'.format( ts2.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression('curl test', 'Making sure the basics still work') ts2.Streams.All = Testers.ContainsExpression('remove oldest session', '') ts2.Streams.All += Testers.ContainsExpression('new session to origin', '') ts2.Streams.All += Testers.ContainsExpression('reused session to origin', '') tr.StillRunningAfter = server tr = Test.AddTestRun('disable origin session reuse, reuse should fail') tr.Processes.Default.Command = 'curl https://127.0.0.1:{0} -k && curl https://127.0.0.1:{0} -k'.format(ts4.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(ts3) tr.Processes.Default.StartBefore(ts4) tr.Processes.Default.Streams.All = Testers.ContainsExpression('curl test', 'Making sure the basics still work') ts4.Streams.All = Testers.ContainsExpression('new session to origin', '') ts4.Streams.All += Testers.ExcludesExpression('reused session to origin', '')
duke8253/trafficserver
tests/gold_tests/tls/tls_origin_session_reuse.test.py
Python
apache-2.0
8,300
''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os Test.Summary = ''' Test for using of runroot from traffic_layout. ''' Test.ContinueOnFail = True Test.SkipUnless(Test.Variables.BINDIR.startswith(Test.Variables.PREFIX), "need to guarantee bin path starts with prefix for runroot") # create two runroot for testing path = os.path.join(Test.RunDirectory, "runroot") tr = Test.AddTestRun() tr.Processes.Default.Command = "$ATS_BIN/traffic_layout init --path " + path f = tr.Disk.File(os.path.join(path, "runroot.yaml")) f.Exists = True path2 = os.path.join(Test.RunDirectory, "runroot2") tr = Test.AddTestRun() tr.Processes.Default.Command = "$ATS_BIN/traffic_layout init --path " + path2 f = tr.Disk.File(os.path.join(path2, "runroot.yaml")) f.Exists = True # 1. --run-root use path cmd tr = Test.AddTestRun("use runroot via commandline") tr.Processes.Default.Command = "$ATS_BIN/traffic_layout info --run-root=" + path tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "commandline runroot path") # 2. use cwd as runroot tr = Test.AddTestRun("use runroot via cwd") tr.Processes.Default.Command = "cd " + path + ";" + "$ATS_BIN/traffic_layout info" tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "cwd runroot path") # 4. use path directly bin bin_path = Test.Variables.BINDIR[Test.Variables.BINDIR.find(Test.Variables.PREFIX) + len(Test.Variables.PREFIX) + 1:] tr = Test.AddTestRun("use runroot via bin executable") tr.Processes.Default.Command = os.path.join(path, os.path.join(bin_path, "traffic_layout") + " info") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "bin path") # 3. TS_RUNROOT ENV variable tr = Test.AddTestRun("use runroot via TS_RUNROOT") tr.Processes.Default.Env["TS_RUNROOT"] = path2 tr.Processes.Default.Command = "$ATS_BIN/traffic_layout info" tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path2, "$TS_RUNROOT Env path")
duke8253/trafficserver
tests/gold_tests/runroot/runroot_use.test.py
Python
apache-2.0
2,902
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from barf.arch import ARCH_X86_MODE_32 from barf.arch.x86.x86base import X86ArchitectureInformation from barf.arch.x86.x86parser import X86Parser from barf.arch.x86.x86translator import X86Translator from barf.core.reil import ReilCpuInvalidAddressError from barf.core.reil import ReilCpuZeroDivisionError from barf.core.reil import ReilEmulator from barf.core.reil import ReilMemory from barf.core.reil import ReilMemoryEx from barf.core.reil import ReilMnemonic from barf.core.reil import ReilParser from barf.core.reil import ReilRegisterOperand from barf.core.reil import ReilContainer from barf.core.reil import ReilSequence class ReilMemoryTests(unittest.TestCase): def test_write_read_byte_1(self): address_size = 32 memory = ReilMemory(address_size) addr = 0x00001000 write_val = 0xdeadbeef memory.write(addr, 32 / 8, write_val) read_val = memory.read(addr, 32 / 8) self.assertEqual(write_val, read_val) def test_write_read_byte_2(self): address_size = 32 memory = ReilMemory(address_size) addr = 0x00001000 write_val = 0xdeadbeef memory.write(addr, 32 / 8, write_val) read_val = memory.read(addr, 32 / 8) self.assertEqual(write_val, read_val) addr = 0x00001001 write_val = 0x1234 memory.write(addr, 16 / 8, write_val) read_val = memory.read(addr, 16 / 8) self.assertEqual(write_val, read_val) def test_write_read_byte_3(self): address_size = 32 memory = ReilMemory(address_size) addr = 0x00001000 write_val = 0xdeadbeefcafecafe memory.write(addr, 64 / 8, write_val) read_val = memory.read(addr, 64 / 8) self.assertEqual(write_val, read_val) def test_write_read_byte_4(self): address_size = 32 memory = ReilMemoryEx(address_size) addr0 = 0x00001000 write_val = 0xdeadbeef memory.write(addr0, 32 / 8, write_val) read_val = memory.read(addr0, 32 / 8) self.assertEqual(write_val, read_val) addr1 = 0x00004000 write_val = 0xdeadbeef memory.write(addr1, 32 / 8, write_val) read_val = memory.read(addr1, 32 / 8) self.assertEqual(write_val, read_val) addrs = memory.read_inverse(0xdeadbeef, 32 / 8) self.assertEqual(addr0, addrs[0]) self.assertEqual(addr1, addrs[1]) class ReilEmulatorTests(unittest.TestCase): def setUp(self): self._arch_info = X86ArchitectureInformation(ARCH_X86_MODE_32) self._emulator = ReilEmulator(self._arch_info) self._asm_parser = X86Parser() self._reil_parser = ReilParser() self._translator = X86Translator() def test_add(self): asm_instrs = self._asm_parser.parse("add eax, ebx") self.__set_address(0xdeadbeef, [asm_instrs]) reil_instrs = self._translator.translate(asm_instrs) regs_initial = { "eax" : 0x1, "ebx" : 0x2, } regs_final, _ = self._emulator.execute_lite( reil_instrs, context=regs_initial ) self.assertEqual(regs_final["eax"], 0x3) self.assertEqual(regs_final["ebx"], 0x2) def test_loop(self): # 0x08048060 : b8 00 00 00 00 mov eax,0x0 # 0x08048065 : bb 0a 00 00 00 mov ebx,0xa # 0x0804806a : 83 c0 01 add eax,0x1 # 0x0804806d : 83 eb 01 sub ebx,0x1 # 0x08048070 : 83 fb 00 cmp ebx,0x0 # 0x08048073 : 75 f5 jne 0x0804806a asm_instrs_str = [(0x08048060, "mov eax,0x0", 5)] asm_instrs_str += [(0x08048065, "mov ebx,0xa", 5)] asm_instrs_str += [(0x0804806a, "add eax,0x1", 3)] asm_instrs_str += [(0x0804806d, "sub ebx,0x1", 3)] asm_instrs_str += [(0x08048070, "cmp ebx,0x0", 3)] asm_instrs_str += [(0x08048073, "jne 0x0804806a", 2)] asm_instrs = [] for addr, asm, size in asm_instrs_str: asm_instr = self._asm_parser.parse(asm) asm_instr.address = addr asm_instr.size = size asm_instrs.append(asm_instr) reil_instrs = self.__translate(asm_instrs) regs_final, _ = self._emulator.execute( reil_instrs, start=0x08048060 << 8 ) self.assertEqual(regs_final["eax"], 0xa) self.assertEqual(regs_final["ebx"], 0x0) def test_mov(self): asm_instrs = [self._asm_parser.parse("mov eax, 0xdeadbeef")] asm_instrs += [self._asm_parser.parse("mov al, 0x12")] asm_instrs += [self._asm_parser.parse("mov ah, 0x34")] self.__set_address(0xdeadbeef, asm_instrs) reil_instrs = self._translator.translate(asm_instrs[0]) reil_instrs += self._translator.translate(asm_instrs[1]) reil_instrs += self._translator.translate(asm_instrs[2]) regs_initial = { "eax" : 0xffffffff, } regs_final, _ = self._emulator.execute_lite(reil_instrs, context=regs_initial) self.assertEqual(regs_final["eax"], 0xdead3412) def test_pre_hanlder(self): def pre_hanlder(emulator, instruction, parameter): paramter.append(True) asm = ["mov eax, ebx"] x86_instrs = map(self._asm_parser.parse, asm) self.__set_address(0xdeadbeef, x86_instrs) reil_instrs = map(self._translator.translate, x86_instrs) paramter = [] self._emulator.set_instruction_pre_handler(pre_hanlder, paramter) reil_ctx_out, reil_mem_out = self._emulator.execute_lite( reil_instrs[0] ) self.assertTrue(len(paramter) > 0) def test_post_hanlder(self): def post_hanlder(emulator, instruction, parameter): paramter.append(True) asm = ["mov eax, ebx"] x86_instrs = map(self._asm_parser.parse, asm) self.__set_address(0xdeadbeef, x86_instrs) reil_instrs = map(self._translator.translate, x86_instrs) paramter = [] self._emulator.set_instruction_post_handler(post_hanlder, paramter) reil_ctx_out, reil_mem_out = self._emulator.execute_lite( reil_instrs[0] ) self.assertTrue(len(paramter) > 0) def test_zero_division_error_1(self): asm_instrs = [self._asm_parser.parse("div ebx")] self.__set_address(0xdeadbeef, asm_instrs) reil_instrs = self._translator.translate(asm_instrs[0]) regs_initial = { "eax" : 0x2, "edx" : 0x2, "ebx" : 0x0, } self.assertRaises(ReilCpuZeroDivisionError, self._emulator.execute_lite, reil_instrs, context=regs_initial) def test_zero_division_error_2(self): instrs = ["mod [DWORD eax, DWORD ebx, DWORD t0]"] reil_instrs = self._reil_parser.parse(instrs) regs_initial = { "eax" : 0x2, "ebx" : 0x0, } self.assertRaises(ReilCpuZeroDivisionError, self._emulator.execute_lite, reil_instrs, context=regs_initial) def test_invalid_address_error_1(self): asm_instrs = [self._asm_parser.parse("jmp eax")] self.__set_address(0xdeadbeef, asm_instrs) reil_instrs = self.__translate(asm_instrs) regs_initial = { "eax" : 0xffffffff, } self.assertRaises(ReilCpuInvalidAddressError, self._emulator.execute, reil_instrs, start=0xdeadbeef << 8, registers=regs_initial) def test_invalid_address_error_2(self): asm_instrs = [self._asm_parser.parse("mov eax, 0xdeadbeef")] self.__set_address(0xdeadbeef, asm_instrs) reil_instrs = self.__translate(asm_instrs) regs_initial = { "eax" : 0xffffffff, } self.assertRaises(ReilCpuInvalidAddressError, self._emulator.execute, reil_instrs, start=0xdeadbef0 << 8, registers=regs_initial) # Auxiliary methods # ======================================================================== # def __set_address(self, address, asm_instrs): addr = address for asm_instr in asm_instrs: asm_instr.address = addr addr += 1 def __translate(self, asm_instrs): instr_container = ReilContainer() asm_instr_last = None instr_seq_prev = None for asm_instr in asm_instrs: instr_seq = ReilSequence() for reil_instr in self._translator.translate(asm_instr): instr_seq.append(reil_instr) if instr_seq_prev: instr_seq_prev.next_sequence_address = instr_seq.address instr_container.add(instr_seq) instr_seq_prev = instr_seq if instr_seq_prev: if asm_instr_last: instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8 # instr_container.dump() return instr_container class ReilEmulatorTaintTests(unittest.TestCase): def setUp(self): self._arch_info = X86ArchitectureInformation(ARCH_X86_MODE_32) self._emulator = ReilEmulator(self._arch_info) self._asm_parser = X86Parser() self._translator = X86Translator() def test_arithmetic(self): asm_instrs = self._asm_parser.parse("add eax, ebx") self.__set_address(0xdeadbeef, [asm_instrs]) reil_instrs = self._translator.translate(asm_instrs) regs_initial = { "eax" : 0x1, "ebx" : 0x2, } self._emulator.set_register_taint("ebx", True) regs_final, _ = self._emulator.execute_lite( reil_instrs, context=regs_initial ) self.assertEqual(self._emulator.get_register_taint("eax"), True) def test_store_mem_1(self): asm_instrs = self._asm_parser.parse("mov [eax], ebx") self.__set_address(0xdeadbeef, [asm_instrs]) reil_instrs = self._translator.translate(asm_instrs) regs_initial = { "eax" : 0xcafecafe, "ebx" : 0x2, } self._emulator.set_register_taint("ebx", True) regs_final, _ = self._emulator.execute_lite( reil_instrs, context=regs_initial ) self.assertEqual(self._emulator.get_memory_taint(regs_initial['eax'], 4), True) def test_store_mem_2(self): asm_instrs = self._asm_parser.parse("mov [eax], ebx") self.__set_address(0xdeadbeef, [asm_instrs]) reil_instrs = self._translator.translate(asm_instrs) regs_initial = { "eax" : 0xcafecafe, "ebx" : 0x2, } self._emulator.set_register_taint("eax", True) regs_final, _ = self._emulator.execute_lite( reil_instrs, context=regs_initial ) self.assertEqual(self._emulator.get_memory_taint(regs_initial['eax'], 4), False) def test_load_mem_1(self): asm_instrs = self._asm_parser.parse("mov eax, [ebx]") self.__set_address(0xdeadbeef, [asm_instrs]) reil_instrs = self._translator.translate(asm_instrs) regs_initial = { "eax" : 0x2, "ebx" : 0xcafecafe, } self._emulator.set_memory_taint(regs_initial["ebx"], 4, True) regs_final, _ = self._emulator.execute_lite( reil_instrs, context=regs_initial ) self.assertEqual(self._emulator.get_register_taint("eax"), True) def __set_address(self, address, asm_instrs): addr = address for asm_instr in asm_instrs: asm_instr.address = addr addr += 1 def main(): unittest.main() if __name__ == '__main__': main()
gitttt/barf-project
barf/tests/core/reil/test_reilemulator.py
Python
bsd-2-clause
13,141
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from __future__ import print_function, division, unicode_literals, absolute_import from builtins import str, open, range import os.path as op import pickle import numpy as np import networkx as nx import scipy.io as sio from ... import logging from ...utils.filemanip import split_filename from ...utils.misc import package_check from ..base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) iflogger = logging.getLogger('interface') have_cmp = True try: package_check('cmp') except Exception as e: have_cmp = False else: import cmp def read_unknown_ntwk(ntwk): if not isinstance(ntwk, nx.classes.graph.Graph): path, name, ext = split_filename(ntwk) if ext == '.pck': ntwk = nx.read_gpickle(ntwk) elif ext == '.graphml': ntwk = nx.read_graphml(ntwk) return ntwk def remove_all_edges(ntwk): ntwktmp = ntwk.copy() edges = ntwktmp.edges_iter() for edge in edges: ntwk.remove_edge(edge[0], edge[1]) return ntwk def fix_keys_for_gexf(orig): """ GEXF Networks can be read in Gephi, however, the keys for the node and edge IDs must be converted to strings """ import networkx as nx ntwk = nx.Graph() nodes = orig.nodes_iter() edges = orig.edges_iter() for node in nodes: newnodedata = {} newnodedata.update(orig.node[node]) if 'dn_fsname' in orig.node[node]: newnodedata['label'] = orig.node[node]['dn_fsname'] ntwk.add_node(str(node), newnodedata) if 'dn_position' in ntwk.node[str(node)] and 'dn_position' in newnodedata: ntwk.node[str(node)]['dn_position'] = str(newnodedata['dn_position']) for edge in edges: data = {} data = orig.edge[edge[0]][edge[1]] ntwk.add_edge(str(edge[0]), str(edge[1]), data) if 'fiber_length_mean' in ntwk.edge[str(edge[0])][str(edge[1])]: ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str(data['fiber_length_mean']) if 'fiber_length_std' in ntwk.edge[str(edge[0])][str(edge[1])]: ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_std'] = str(data['fiber_length_std']) if 'number_of_fibers' in ntwk.edge[str(edge[0])][str(edge[1])]: ntwk.edge[str(edge[0])][str(edge[1])]['number_of_fibers'] = str(data['number_of_fibers']) if 'value' in ntwk.edge[str(edge[0])][str(edge[1])]: ntwk.edge[str(edge[0])][str(edge[1])]['value'] = str(data['value']) return ntwk def add_dicts_by_key(in_dict1, in_dict2): """ Combines two dictionaries and adds the values for those keys that are shared """ both = {} for key1 in in_dict1: for key2 in in_dict2: if key1 == key2: both[key1] = in_dict1[key1] + in_dict2[key2] return both def average_networks(in_files, ntwk_res_file, group_id): """ Sums the edges of input networks and divides by the number of networks Writes the average network as .pck and .gexf and returns the name of the written networks """ import networkx as nx import os.path as op iflogger.info(("Creating average network for group: " "{grp}").format(grp=group_id)) matlab_network_list = [] if len(in_files) == 1: avg_ntwk = read_unknown_ntwk(in_files[0]) else: count_to_keep_edge = np.round(len(in_files) / 2.0) iflogger.info(("Number of networks: {L}, an edge must occur in at " "least {c} to remain in the " "average network").format(L=len(in_files), c=count_to_keep_edge)) ntwk_res_file = read_unknown_ntwk(ntwk_res_file) iflogger.info(("{n} Nodes found in network resolution " "file").format(n=ntwk_res_file.number_of_nodes())) ntwk = remove_all_edges(ntwk_res_file) counting_ntwk = ntwk.copy() # Sums all the relevant variables for index, subject in enumerate(in_files): tmp = nx.read_gpickle(subject) iflogger.info(('File {s} has {n} ' 'edges').format(s=subject, n=tmp.number_of_edges())) edges = tmp.edges_iter() for edge in edges: data = {} data = tmp.edge[edge[0]][edge[1]] data['count'] = 1 if ntwk.has_edge(edge[0], edge[1]): current = {} current = ntwk.edge[edge[0]][edge[1]] data = add_dicts_by_key(current, data) ntwk.add_edge(edge[0], edge[1], data) nodes = tmp.nodes_iter() for node in nodes: data = {} data = ntwk.node[node] if 'value' in tmp.node[node]: data['value'] = data['value'] + tmp.node[node]['value'] ntwk.add_node(node, data) # Divides each value by the number of files nodes = ntwk.nodes_iter() edges = ntwk.edges_iter() iflogger.info(('Total network has {n} ' 'edges').format(n=ntwk.number_of_edges())) avg_ntwk = nx.Graph() newdata = {} for node in nodes: data = ntwk.node[node] newdata = data if 'value' in data: newdata['value'] = data['value'] / len(in_files) ntwk.node[node]['value'] = newdata avg_ntwk.add_node(node, newdata) edge_dict = {} edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes())) for edge in edges: data = ntwk.edge[edge[0]][edge[1]] if ntwk.edge[edge[0]][edge[1]]['count'] >= count_to_keep_edge: for key in list(data.keys()): if not key == 'count': data[key] = data[key] / len(in_files) ntwk.edge[edge[0]][edge[1]] = data avg_ntwk.add_edge(edge[0], edge[1], data) edge_dict['count'][edge[0] - 1][edge[1] - 1] = ntwk.edge[edge[0]][edge[1]]['count'] iflogger.info('After thresholding, the average network has has {n} edges'.format(n=avg_ntwk.number_of_edges())) avg_edges = avg_ntwk.edges_iter() for edge in avg_edges: data = avg_ntwk.edge[edge[0]][edge[1]] for key in list(data.keys()): if not key == 'count': edge_dict[key] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes())) edge_dict[key][edge[0] - 1][edge[1] - 1] = data[key] for key in list(edge_dict.keys()): tmp = {} network_name = group_id + '_' + key + '_average.mat' matlab_network_list.append(op.abspath(network_name)) tmp[key] = edge_dict[key] sio.savemat(op.abspath(network_name), tmp) iflogger.info('Saving average network for key: {k} as {out}'.format(k=key, out=op.abspath(network_name))) # Writes the networks and returns the name network_name = group_id + '_average.pck' nx.write_gpickle(avg_ntwk, op.abspath(network_name)) iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) avg_ntwk = fix_keys_for_gexf(avg_ntwk) network_name = group_id + '_average.gexf' nx.write_gexf(avg_ntwk, op.abspath(network_name)) iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) return network_name, matlab_network_list def compute_node_measures(ntwk, calculate_cliques=False): """ These return node-based measures """ iflogger.info('Computing node measures:') measures = {} iflogger.info('...Computing degree...') measures['degree'] = np.array(list(ntwk.degree().values())) iflogger.info('...Computing load centrality...') measures['load_centrality'] = np.array(list(nx.load_centrality(ntwk).values())) iflogger.info('...Computing betweenness centrality...') measures['betweenness_centrality'] = np.array(list(nx.betweenness_centrality(ntwk).values())) iflogger.info('...Computing degree centrality...') measures['degree_centrality'] = np.array(list(nx.degree_centrality(ntwk).values())) iflogger.info('...Computing closeness centrality...') measures['closeness_centrality'] = np.array(list(nx.closeness_centrality(ntwk).values())) # iflogger.info('...Computing eigenvector centrality...') # measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values()) iflogger.info('...Computing triangles...') measures['triangles'] = np.array(list(nx.triangles(ntwk).values())) iflogger.info('...Computing clustering...') measures['clustering'] = np.array(list(nx.clustering(ntwk).values())) iflogger.info('...Computing k-core number') measures['core_number'] = np.array(list(nx.core_number(ntwk).values())) iflogger.info('...Identifying network isolates...') isolate_list = nx.isolates(ntwk) binarized = np.zeros((ntwk.number_of_nodes(), 1)) for value in isolate_list: value = value - 1 # Zero indexing binarized[value] = 1 measures['isolates'] = binarized if calculate_cliques: iflogger.info('...Calculating node clique number') measures['node_clique_number'] = np.array(list(nx.node_clique_number(ntwk).values())) iflogger.info('...Computing number of cliques for each node...') measures['number_of_cliques'] = np.array(list(nx.number_of_cliques(ntwk).values())) return measures def compute_edge_measures(ntwk): """ These return edge-based measures """ iflogger.info('Computing edge measures:') measures = {} # iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges)) # measures['google_matrix'] = nx.google_matrix(ntwk) # iflogger.info('...Computing hub matrix...') # measures['hub_matrix'] = nx.hub_matrix(ntwk) # iflogger.info('...Computing authority matrix...') # measures['authority_matrix'] = nx.authority_matrix(ntwk) return measures def compute_dict_measures(ntwk): """ Returns a dictionary """ iflogger.info('Computing measures which return a dictionary:') measures = {} iflogger.info('...Computing rich club coefficient...') measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk) return measures def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False): """ Returns a single value per network """ iflogger.info('Computing single valued measures:') measures = {} iflogger.info('...Computing degree assortativity (pearson number) ...') try: measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk) except AttributeError: # For NetworkX 1.6 measures['degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(ntwk) iflogger.info('...Computing degree assortativity...') try: measures['degree_assortativity'] = nx.degree_assortativity(ntwk) except AttributeError: measures['degree_assortativity'] = nx.degree_assortativity_coefficient(ntwk) iflogger.info('...Computing transitivity...') measures['transitivity'] = nx.transitivity(ntwk) iflogger.info('...Computing number of connected_components...') measures['number_connected_components'] = nx.number_connected_components(ntwk) iflogger.info('...Computing graph density...') measures['graph_density'] = nx.density(ntwk) iflogger.info('...Recording number of edges...') measures['number_of_edges'] = nx.number_of_edges(ntwk) iflogger.info('...Recording number of nodes...') measures['number_of_nodes'] = nx.number_of_nodes(ntwk) iflogger.info('...Computing average clustering...') measures['average_clustering'] = nx.average_clustering(ntwk) if nx.is_connected(ntwk): iflogger.info('...Calculating average shortest path length...') measures['average_shortest_path_length'] = nx.average_shortest_path_length(ntwk, weighted) else: iflogger.info('...Calculating average shortest path length...') measures['average_shortest_path_length'] = nx.average_shortest_path_length(nx.connected_component_subgraphs(ntwk)[0], weighted) if calculate_cliques: iflogger.info('...Computing graph clique number...') measures['graph_clique_number'] = nx.graph_clique_number(ntwk) # out of memory error return measures def compute_network_measures(ntwk): measures = {} # iflogger.info('Identifying k-core') # measures['k_core'] = nx.k_core(ntwk) # iflogger.info('Identifying k-shell') # measures['k_shell'] = nx.k_shell(ntwk) # iflogger.info('Identifying k-crust') # measures['k_crust'] = nx.k_crust(ntwk) return measures def add_node_data(node_array, ntwk): node_ntwk = nx.Graph() newdata = {} for idx, data in ntwk.nodes_iter(data=True): if not int(idx) == 0: newdata['value'] = node_array[int(idx) - 1] data.update(newdata) node_ntwk.add_node(int(idx), data) return node_ntwk def add_edge_data(edge_array, ntwk, above=0, below=0): edge_ntwk = ntwk.copy() data = {} for x, row in enumerate(edge_array): for y in range(0, np.max(np.shape(edge_array[x]))): if not edge_array[x, y] == 0: data['value'] = edge_array[x, y] if data['value'] <= below or data['value'] >= above: if edge_ntwk.has_edge(x + 1, y + 1): old_edge_dict = edge_ntwk.edge[x + 1][y + 1] edge_ntwk.remove_edge(x + 1, y + 1) data.update(old_edge_dict) edge_ntwk.add_edge(x + 1, y + 1, data) return edge_ntwk class NetworkXMetricsInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='Input network') out_k_core = File('k_core', usedefault=True, desc='Computed k-core network stored as a NetworkX pickle.') out_k_shell = File('k_shell', usedefault=True, desc='Computed k-shell network stored as a NetworkX pickle.') out_k_crust = File('k_crust', usedefault=True, desc='Computed k-crust network stored as a NetworkX pickle.') treat_as_weighted_graph = traits.Bool(True, usedefault=True, desc='Some network metrics can be calculated while considering only a binarized version of the graph') compute_clique_related_measures = traits.Bool(False, usedefault=True, desc='Computing clique-related measures (e.g. node clique number) can be very time consuming') out_global_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format') out_node_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format') out_edge_metrics_matlab = File(genfile=True, desc='Output edge metrics in MATLAB .mat format') out_pickled_extra_measures = File('extra_measures', usedefault=True, desc='Network measures for group 1 that return dictionaries stored as a Pickle.') class NetworkXMetricsOutputSpec(TraitedSpec): gpickled_network_files = OutputMultiPath(File(desc='Output gpickled network files')) matlab_matrix_files = OutputMultiPath(File(desc='Output network metrics in MATLAB .mat format')) global_measures_matlab = File(desc='Output global metrics in MATLAB .mat format') node_measures_matlab = File(desc='Output node metrics in MATLAB .mat format') edge_measures_matlab = File(desc='Output edge metrics in MATLAB .mat format') node_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all node-based measures')) edge_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all edge-based measures')) k_networks = OutputMultiPath(File(desc='Output gpickled network files for the k-core, k-shell, and k-crust networks')) k_core = File(desc='Computed k-core network stored as a NetworkX pickle.') k_shell = File(desc='Computed k-shell network stored as a NetworkX pickle.') k_crust = File(desc='Computed k-crust network stored as a NetworkX pickle.') pickled_extra_measures = File(desc='Network measures for the group that return dictionaries, stored as a Pickle.') matlab_dict_measures = OutputMultiPath(File(desc='Network measures for the group that return dictionaries, stored as matlab matrices.')) class NetworkXMetrics(BaseInterface): """ Calculates and outputs NetworkX-based measures for an input network Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> nxmetrics = cmtk.NetworkXMetrics() >>> nxmetrics.inputs.in_file = 'subj1.pck' >>> nxmetrics.run() # doctest: +SKIP """ input_spec = NetworkXMetricsInputSpec output_spec = NetworkXMetricsOutputSpec def _run_interface(self, runtime): global gpickled, nodentwks, edgentwks, kntwks, matlab gpickled = list() nodentwks = list() edgentwks = list() kntwks = list() matlab = list() ntwk = nx.read_gpickle(self.inputs.in_file) # Each block computes, writes, and saves a measure # The names are then added to the output .pck file list # In the case of the degeneracy networks, they are given specified output names calculate_cliques = self.inputs.compute_clique_related_measures weighted = self.inputs.treat_as_weighted_graph global_measures = compute_singlevalued_measures(ntwk, weighted, calculate_cliques) if isdefined(self.inputs.out_global_metrics_matlab): global_out_file = op.abspath(self.inputs.out_global_metrics_matlab) else: global_out_file = op.abspath(self._gen_outfilename('globalmetrics', 'mat')) sio.savemat(global_out_file, global_measures, oned_as='column') matlab.append(global_out_file) node_measures = compute_node_measures(ntwk, calculate_cliques) for key in list(node_measures.keys()): newntwk = add_node_data(node_measures[key], ntwk) out_file = op.abspath(self._gen_outfilename(key, 'pck')) nx.write_gpickle(newntwk, out_file) nodentwks.append(out_file) if isdefined(self.inputs.out_node_metrics_matlab): node_out_file = op.abspath(self.inputs.out_node_metrics_matlab) else: node_out_file = op.abspath(self._gen_outfilename('nodemetrics', 'mat')) sio.savemat(node_out_file, node_measures, oned_as='column') matlab.append(node_out_file) gpickled.extend(nodentwks) edge_measures = compute_edge_measures(ntwk) for key in list(edge_measures.keys()): newntwk = add_edge_data(edge_measures[key], ntwk) out_file = op.abspath(self._gen_outfilename(key, 'pck')) nx.write_gpickle(newntwk, out_file) edgentwks.append(out_file) if isdefined(self.inputs.out_edge_metrics_matlab): edge_out_file = op.abspath(self.inputs.out_edge_metrics_matlab) else: edge_out_file = op.abspath(self._gen_outfilename('edgemetrics', 'mat')) sio.savemat(edge_out_file, edge_measures, oned_as='column') matlab.append(edge_out_file) gpickled.extend(edgentwks) ntwk_measures = compute_network_measures(ntwk) for key in list(ntwk_measures.keys()): if key == 'k_core': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck')) if key == 'k_shell': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck')) if key == 'k_crust': out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck')) nx.write_gpickle(ntwk_measures[key], out_file) kntwks.append(out_file) gpickled.extend(kntwks) out_pickled_extra_measures = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck')) dict_measures = compute_dict_measures(ntwk) iflogger.info('Saving extra measure file to {path} in Pickle format'.format(path=op.abspath(out_pickled_extra_measures))) file = open(out_pickled_extra_measures, 'w') pickle.dump(dict_measures, file) file.close() iflogger.info('Saving MATLAB measures as {m}'.format(m=matlab)) # Loops through the measures which return a dictionary, # converts the keys and values to a Numpy array, # stacks them together, and saves them in a MATLAB .mat file via Scipy global dicts dicts = list() for idx, key in enumerate(dict_measures.keys()): for idxd, keyd in enumerate(dict_measures[key].keys()): if idxd == 0: nparraykeys = np.array(keyd) nparrayvalues = np.array(dict_measures[key][keyd]) else: nparraykeys = np.append(nparraykeys, np.array(keyd)) values = np.array(dict_measures[key][keyd]) nparrayvalues = np.append(nparrayvalues, values) nparray = np.vstack((nparraykeys, nparrayvalues)) out_file = op.abspath(self._gen_outfilename(key, 'mat')) npdict = {} npdict[key] = nparray sio.savemat(out_file, npdict, oned_as='column') dicts.append(out_file) return runtime def _list_outputs(self): outputs = self.output_spec().get() outputs["k_core"] = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck')) outputs["k_shell"] = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck')) outputs["k_crust"] = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck')) outputs["gpickled_network_files"] = gpickled outputs["k_networks"] = kntwks outputs["node_measure_networks"] = nodentwks outputs["edge_measure_networks"] = edgentwks outputs["matlab_dict_measures"] = dicts outputs["global_measures_matlab"] = op.abspath(self._gen_outfilename('globalmetrics', 'mat')) outputs["node_measures_matlab"] = op.abspath(self._gen_outfilename('nodemetrics', 'mat')) outputs["edge_measures_matlab"] = op.abspath(self._gen_outfilename('edgemetrics', 'mat')) outputs["matlab_matrix_files"] = [outputs["global_measures_matlab"], outputs["node_measures_matlab"], outputs["edge_measures_matlab"]] outputs["pickled_extra_measures"] = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck')) return outputs def _gen_outfilename(self, name, ext): return name + '.' + ext class AverageNetworksInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath(File(exists=True), mandatory=True, desc='Networks for a group of subjects') resolution_network_file = File(exists=True, desc='Parcellation files from Connectome Mapping Toolkit. This is not necessary' ', but if included, the interface will output the statistical maps as networkx graphs.') group_id = traits.Str('group1', usedefault=True, desc='ID for group') out_gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') out_gexf_groupavg = File(desc='Average network saved as a .gexf file') class AverageNetworksOutputSpec(TraitedSpec): gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') gexf_groupavg = File(desc='Average network saved as a .gexf file') matlab_groupavgs = OutputMultiPath(File(desc='Average network saved as a .gexf file')) class AverageNetworks(BaseInterface): """ Calculates and outputs the average network given a set of input NetworkX gpickle files This interface will only keep an edge in the averaged network if that edge is present in at least half of the input networks. Example ------- >>> import nipype.interfaces.cmtk as cmtk >>> avg = cmtk.AverageNetworks() >>> avg.inputs.in_files = ['subj1.pck', 'subj2.pck'] >>> avg.run() # doctest: +SKIP """ input_spec = AverageNetworksInputSpec output_spec = AverageNetworksOutputSpec def _run_interface(self, runtime): if isdefined(self.inputs.resolution_network_file): ntwk_res_file = self.inputs.resolution_network_file else: ntwk_res_file = self.inputs.in_files[0] global matlab_network_list network_name, matlab_network_list = average_networks(self.inputs.in_files, ntwk_res_file, self.inputs.group_id) return runtime def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_gpickled_groupavg): outputs["gpickled_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'pck')) else: outputs["gpickled_groupavg"] = op.abspath(self.inputs.out_gpickled_groupavg) if not isdefined(self.inputs.out_gexf_groupavg): outputs["gexf_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'gexf')) else: outputs["gexf_groupavg"] = op.abspath(self.inputs.out_gexf_groupavg) outputs["matlab_groupavgs"] = matlab_network_list return outputs def _gen_outfilename(self, name, ext): return name + '.' + ext
carolFrohlich/nipype
nipype/interfaces/cmtk/nx.py
Python
bsd-3-clause
26,027
import json from django.contrib.auth.models import User from django.http import HttpRequest from django.test import TestCase from tastypie.api import Api from tastypie.exceptions import NotRegistered, BadRequest from tastypie.resources import ModelResource from tastypie.serializers import Serializer from core.models import Note from core.utils import adjust_schema class NoteResource(ModelResource): class Meta: resource_name = 'notes' queryset = Note.objects.filter(is_active=True) class UserResource(ModelResource): class Meta: resource_name = 'users' queryset = User.objects.all() class ApiTestCase(TestCase): urls = 'core.tests.api_urls' def test_register(self): # NOTE: these have all been registered in core.tests.api_urls api = Api() self.assertEqual(len(api._registry), 0) api.register(NoteResource()) self.assertEqual(len(api._registry), 1) self.assertEqual(sorted(api._registry.keys()), ['notes']) api.register(UserResource()) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) api.register(UserResource()) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) self.assertEqual(len(api._canonicals), 2) api.register(UserResource(), canonical=False) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) self.assertEqual(len(api._canonicals), 2) self.assertRaises(ValueError, api.register, NoteResource) def test_global_registry(self): api = Api() self.assertEqual(len(api._registry), 0) api.register(NoteResource()) self.assertEqual(len(api._registry), 1) self.assertEqual(sorted(api._registry.keys()), ['notes']) api.register(UserResource()) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) api.register(UserResource()) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) self.assertEqual(len(api._canonicals), 2) api.register(UserResource(), canonical=False) self.assertEqual(len(api._registry), 2) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) self.assertEqual(len(api._canonicals), 2) def test_unregister(self): api = Api() api.register(NoteResource()) api.register(UserResource(), canonical=False) self.assertEqual(sorted(api._registry.keys()), ['notes', 'users']) self.assertEqual(len(api._canonicals), 1) api.unregister('users') self.assertEqual(len(api._registry), 1) self.assertEqual(sorted(api._registry.keys()), ['notes']) self.assertEqual(len(api._canonicals), 1) api.unregister('notes') self.assertEqual(len(api._registry), 0) self.assertEqual(sorted(api._registry.keys()), []) api.unregister('users') self.assertEqual(len(api._registry), 0) self.assertEqual(sorted(api._registry.keys()), []) def test_canonical_resource_for(self): api = Api() note_resource = NoteResource() user_resource = UserResource() api.register(note_resource) api.register(user_resource) self.assertEqual(len(api._canonicals), 2) self.assertEqual(isinstance(api.canonical_resource_for('notes'), NoteResource), True) api.unregister(user_resource._meta.resource_name) self.assertRaises(NotRegistered, api.canonical_resource_for, 'users') def test_urls(self): api = Api() api.register(NoteResource()) api.register(UserResource()) patterns = api.urls self.assertEqual(len(patterns), 3) self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v1_top_level']) self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']]) api = Api(api_name='v2') api.register(NoteResource()) api.register(UserResource()) patterns = api.urls self.assertEqual(len(patterns), 3) self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v2_top_level']) self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']]) def test_top_level(self): api = Api() api.register(NoteResource()) api.register(UserResource()) request = HttpRequest() resp = api.top_level(request) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content.decode('utf-8'), '{"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}}') def test_top_level_include_schema_content(self): api = Api() note_resource = NoteResource() user_resource = UserResource() api.register(note_resource) api.register(user_resource) request = HttpRequest() request.GET = {'fullschema': 'true'} resp = api.top_level(request) self.assertEqual(resp.status_code, 200) content = json.loads(resp.content.decode('utf-8')) content['notes']['schema'] = adjust_schema(content['notes']['schema']) content['users']['schema'] = adjust_schema(content['users']['schema']) dummy_request = HttpRequest() dummy_request.method = 'GET' notes_schema = adjust_schema(json.loads(note_resource.get_schema(dummy_request).content.decode('utf-8'))) user_schema = adjust_schema(json.loads(user_resource.get_schema(dummy_request).content.decode('utf-8'))) self.assertEqual(content['notes']['list_endpoint'], '/api/v1/notes/') self.assertEqual(content['notes']['schema'], notes_schema) self.assertEqual(content['users']['list_endpoint'], '/api/v1/users/') self.assertEqual(content['users']['schema'], user_schema) def test_top_level_jsonp(self): api = Api() api.register(NoteResource()) api.register(UserResource()) request = HttpRequest() request.META = {'HTTP_ACCEPT': 'text/javascript'} request.GET = {'callback': 'foo'} resp = api.top_level(request) self.assertEqual(resp.status_code, 200) self.assertEqual(resp['content-type'].split(';')[0], 'text/javascript') self.assertEqual(resp.content.decode('utf-8'), 'foo({"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}})') request = HttpRequest() request.META = {'HTTP_ACCEPT': 'text/javascript'} request.GET = {'callback': ''} # Regression: We expect this, which is fine, but this used to # be an import error. with self.assertRaises(BadRequest): api.top_level(request) def test_custom_api_serializer(self): """Confirm that an Api can use a custom serializer""" # Origin: https://github.com/django-tastypie/django-tastypie/pull/817 class JSONSerializer(Serializer): formats = ('json', ) api = Api(serializer_class=JSONSerializer) api.register(NoteResource()) request = HttpRequest() request.META = {'HTTP_ACCEPT': 'text/javascript'} resp = api.top_level(request) self.assertEqual(resp.status_code, 200) self.assertEqual(resp['content-type'], 'application/json', msg="Expected application/json response but received %s" % resp['content-type']) request = HttpRequest() request.META = {'HTTP_ACCEPT': 'application/xml'} resp = api.top_level(request) self.assertEqual(resp.status_code, 200) self.assertEqual(resp['content-type'], 'application/json', msg="Expected application/json response but received %s" % resp['content-type'])
doselect/django-tastypie
tests/core/tests/api.py
Python
bsd-3-clause
8,771
from tastypie.validation import Validation from tastytools.exceptions import ValidationError, MissingField class FieldsValidation(Validation): def __init__(self, required=None, validated=None, **kwargs): if required is None: required = [] if validated is None: validated = [] all_methods = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH'] self.required_fields = {} self.validated_fields = {} dicts = {'required': self.required_fields, 'validated': self.validated_fields} self.map_method_validations(self.required_fields, required, all_methods) self.map_method_validations(self.validated_fields, validated, all_methods) for key, value in kwargs.items(): for arr_name in ['required', 'validated']: if key[:len(arr_name)] == arr_name: methods = self.parse_methods_key(key, arr_name) self.map_method_validations(dicts[arr_name], value, methods) Validation.__init__(self) def parse_methods_key(self, key, prefix): prefix_len = len(prefix) + 1 # prefix + underscore methods = key[prefix_len:].split('_') return [method.upper() for method in methods] def map_method_validations(self, target_dict, fields_to_add, methods): for method in methods: res_fields = target_dict.setdefault(method, []) for field in fields_to_add: res_fields.append(field) def is_valid(self, bundle, request): if not bundle.data: return {'__all__': 'Missing data.'} required_errors = self.validate_required(bundle, request) validation_errors = self.validate_fields(bundle, request) errors = required_errors + validation_errors if errors: return {'errors' : errors} return {} def validate_fields(self, bundle, request=None): errors = [] for field in self.validated_fields[request.method]: validation_func = getattr(self, '%s_is_valid' % field) try: validation_func(bundle.data.get(field, None), bundle, request) except ValidationError, error: errors.append(error.get_dict()) return errors def validate_required(self, bundle, request=None): errors = [] for required_field in self.required_fields[request.method]: if required_field not in bundle.data: error = MissingField(field_name=required_field) errors.append(error.get_dict()) return errors @staticmethod def uri_to_pk(uri): if uri is None: return None # convert everything to lists multiple = not isinstance(uri, basestring) uris = uri if multiple else [uri] # handle all passed URIs converted = [] for one_uri in uris: try: # hopefully /api/v1/<resource_name>/<pk>/ converted.append(int(one_uri.split('/')[-2])) except (IndexError, ValueError): raise ValueError("URI %s could not be converted to PK integer." % one_uri) # convert back to original format return converted if multiple else converted[0]
rtucker-mozilla/inventory
vendor-local/src/django-tastytools/tastytools/validation.py
Python
bsd-3-clause
3,369
# -*- coding:utf-8 -*- ''' 同步排序队列的测试: 实际上, PriorityQueue就是用heapq实现的阻塞式队列 ''' import time import bisect from heapq import * import threading import queue from collections import deque """ 在满足功能要求的情况下, heapq的方式最快也最简洁效率最稳定 均为 100次循环,每次循环先插入1000个,然后弹出所有 #这个不是使用环境 在正式使用环境下,因为排序方式不需要全部检查,所以开销会比测试的要小 heapq: 1.4s priorityQueue: 5.5s list,每次插入后重新排队,且未lock: 75s 线程同步: 75s #基本一样?,说明时间主要耗在sort上,加锁与否影响很小 bisect insort: 1.9s (未线程同步) bisect 手工连接: 4.4s(未线程同步) bisect insort: 2.4s (线程同步) 附: duque: 0.5s (未做排序) list: 0.58s (未做lock) list: 0.95s (做同步) 说明同步开销在0.5s 当测试为10000次,每次循环插入30个时: heapq: 3.7s priorityQueue: 17s list,每次插入后重新排队,且未lock: 8s #说明数量少时list sort效率很高 10.21 bisect insort: 3.9s (未线程同步) bisect 手工连接: 4.96s(未线程同步) bisect insort: 5.22s (线程同步) 附未做排序的方法: duque: 1.56s (,线程安全) list: 1.87s (未做lock) list: 3.34s (做同步) 当测试为20000次,每次循环插入5个时: heapq: 1.16s priorityQueue: 6.5s list,每次插入后重新排队,且未lock: 1.14s #说明数量少时list sort效率很高 #同步 1.95s bisect insort: 1.0s (未线程同步) bisect 手工连接: 1.37s(未线程同步) bisect insort: 1.35s (线程同步) 附未做排序的方法: duque: 0.59s (,线程安全) list: 0.67s (未做lock) list: 1.12s (做同步) """ QSIZE = 6 class Command(object): def __init__(self,etime): self._exec_time = etime def exec(self,itime): if itime > self._exec_time: print('error!') def key(self): return self._exec_time def __lt__(self,other): return self._exec_time < other.exec_time def __gt__(self,other): return self._exec_time > other.exec_time def qcost(times=100000): tbegin = time.time() for i in range(times): q = deque() for it in range(QSIZE): q.append(Command(it)) while len(q) > 0: it = q.pop() it.exec(0) print('qcost:%f' % (time.time()-tbegin)) lock = threading.Lock() def lcost(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): q.append(Command(it)) while len(q) > 0: it = q.pop() it.exec(0) print('lcost:%f' % (time.time()-tbegin)) def lcost0(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): lock.acquire() #比with开销要少总的10% q.append(Command(it)) lock.release() while len(q) > 0: lock.acquire() #比with开销要少总的10% it = q.pop() lock.release() it.exec(0) print('lcost:%f' % (time.time()-tbegin)) def hcost(times=100000): tbegin = time.time() for i in range(times): h = [] for it in range(QSIZE): lock.acquire() #比with开销要少总的10% heappush(h,(it,Command(it))) #heappush(h,Command(it)) lock.release() while len(h) > 0: lock.acquire() it = heappop(h) lock.release() it[1].exec(0) print('hcost:%f' % (time.time()-tbegin)) def hcost1(times=100000): ''' 比host0慢20%以上 ''' tbegin = time.time() for i in range(times): h = [] for it in range(QSIZE): lock.acquire() #比with开销要少总的10% heappush(h,Command(it)) lock.release() while len(h) > 0: lock.acquire() it = heappop(h) lock.release() it.exec(0) print('hcost:%f' % (time.time()-tbegin)) def pcost(times=100000): tbegin = time.time() for i in range(times): p = queue.PriorityQueue() for it in range(QSIZE): p.put(Command(it)) while p.qsize() > 0: it = p.get() it.exec(0) print('pcost:%f' % (time.time()-tbegin)) def scost(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): q.append(Command(it)) q.sort() while len(q) > 0: it = q.pop() it.exec(0) print('scost:%f' % (time.time()-tbegin)) def scost0(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): lock.acquire() #比with开销要少总的10% q.append(Command(it)) q.sort() lock.release() while len(q) > 0: lock.acquire() #比with开销要少总的10% it = q.pop() lock.release() it.exec(0) print('scost:%f' % (time.time()-tbegin)) def bcost(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): #q.append(Command(it)) bisect.insort_left(q,Command(it)) while len(q) > 000: it = q.pop() it.exec(0) print('bcost:%f' % (time.time()-tbegin)) def bcost0(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): lock.acquire() bisect.insort_left(q,Command(it)) lock.release() while len(q) > 000: lock.acquire() it = q.pop() lock.release() it.exec(0) print('bcost:%f' % (time.time()-tbegin)) def bcost2(times=100000): tbegin = time.time() for i in range(times): q = [] for it in range(QSIZE): #q.append(Command(it)) cit = Command(it) i = bisect.bisect_left(q,cit) q = q[:i] + [cit] + q[i:] while len(q) > 000: it = q.pop() it.exec(0) print('bcost:%f' % (time.time()-tbegin))
lovelylain/pyctp
example/pyctp2/testbed/tqueue.py
Python
mit
6,474
# Copyright (C) Ivan Kravets <me@ikravets.com> # See LICENSE for details. """ SPL The ST Standard Peripheral Library provides a set of functions for handling the peripherals on the STM32 Cortex-M3 family. The idea is to save the user (the new user, in particular) having to deal directly with the registers. http://www.st.com/web/en/catalog/tools/FM147/CL1794/SC961/SS1743?sc=stm32embeddedsoftware """ from os.path import join from SCons.Script import DefaultEnvironment env = DefaultEnvironment() env.Replace( PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-spl") ) env.VariantDirWrap( join("$BUILD_DIR", "FrameworkSPLInc"), join("$PLATFORMFW_DIR", "${BOARD_OPTIONS['build']['core']}", "variants", "${BOARD_OPTIONS['build']['variant']}", "inc") ) env.Append( CPPPATH=[ join("$BUILD_DIR", "FrameworkSPLInc"), join("$BUILD_DIR", "FrameworkSPL") ] ) envsafe = env.Clone() envsafe.Append( CPPPATH=["$BUILDSRC_DIR"], CPPDEFINES=[ "USE_STDPERIPH_DRIVER" ] ) # # Target: Build SPL Library # extra_flags = env.get("BOARD_OPTIONS", {}).get("build", {}).get("extra_flags") src_filter_patterns = ["+<*>"] if "STM32F40_41xxx" in extra_flags: src_filter_patterns += ["-<stm32f4xx_fmc.c>"] if "STM32F427_437xx" in extra_flags: src_filter_patterns += ["-<stm32f4xx_fsmc.c>"] elif "STM32F303xC" in extra_flags: src_filter_patterns += ["-<stm32f30x_hrtim.c>"] elif "STM32L1XX_MD" in extra_flags: src_filter_patterns += ["-<stm32l1xx_flash_ramfunc.c>"] libs = [] libs.append(envsafe.BuildLibrary( join("$BUILD_DIR", "FrameworkSPL"), join("$PLATFORMFW_DIR", "${BOARD_OPTIONS['build']['core']}", "variants", "${BOARD_OPTIONS['build']['variant']}", "src"), src_filter=" ".join(src_filter_patterns) )) env.Append(LIBS=libs)
mcanthony/platformio
platformio/builder/scripts/frameworks/spl.py
Python
mit
1,823
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Deprecated.""" from wtforms import StringField from ..field_base import WebDepositField # from ..validation_utils import number_validate __all__ = ['PagesNumberField'] class PagesNumberField(WebDepositField, StringField): """Deprecated.""" def __init__(self, **kwargs): """Deprecated.""" import warnings warnings.warn("Field has been deprecated", PendingDeprecationWarning) defaults = dict( icon='th', widget_classes="form-control" # FIXME validators=[ # number_validate(error_message='Pages must be a number!')] ) defaults.update(kwargs) super(PagesNumberField, self).__init__(**defaults)
SamiHiltunen/invenio-deposit
invenio_deposit/fields/pages_number.py
Python
gpl-2.0
1,526
from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \ cpython_only, captured_stdout import io import re from re import Scanner import sre_compile import sre_constants import sys import string import traceback import unittest from weakref import proxy # Misc tests from Tim Peters' re.doc # WARNING: Don't change details in these tests if you don't know # what you're doing. Some of these tests were carefully modeled to # cover most of the code. class S(str): def __getitem__(self, index): return S(super().__getitem__(index)) class B(bytes): def __getitem__(self, index): return B(super().__getitem__(index)) class ReTests(unittest.TestCase): def assertTypedEqual(self, actual, expect, msg=None): self.assertEqual(actual, expect, msg) def recurse(actual, expect): if isinstance(expect, (tuple, list)): for x, y in zip(actual, expect): recurse(x, y) else: self.assertIs(type(actual), type(expect), msg) recurse(actual, expect) def test_keep_buffer(self): # See bug 14212 b = bytearray(b'x') it = re.finditer(b'a', b) with self.assertRaises(BufferError): b.extend(b'x'*400) list(it) del it gc_collect() b.extend(b'x'*400) def test_weakref(self): s = 'QabbbcR' x = re.compile('ab+c') y = proxy(x) self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR')) def test_search_star_plus(self): self.assertEqual(re.search('x*', 'axx').span(0), (0, 0)) self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) self.assertEqual(re.search('x', 'aaa'), None) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) self.assertEqual(re.match('a+', 'xxx'), None) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1) def test_basic_re_sub(self): self.assertTypedEqual(re.sub('y', 'a', 'xyz'), 'xaz') self.assertTypedEqual(re.sub('y', S('a'), S('xyz')), 'xaz') self.assertTypedEqual(re.sub(b'y', b'a', b'xyz'), b'xaz') self.assertTypedEqual(re.sub(b'y', B(b'a'), B(b'xyz')), b'xaz') self.assertTypedEqual(re.sub(b'y', bytearray(b'a'), bytearray(b'xyz')), b'xaz') self.assertTypedEqual(re.sub(b'y', memoryview(b'a'), memoryview(b'xyz')), b'xaz') for y in ("\xe0", "\u0430", "\U0001d49c"): self.assertEqual(re.sub(y, 'a', 'x%sz' % y), 'xaz') self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), '9.3 -3 24x100y') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), '9.3 -3 23x99y') self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n') self.assertEqual(re.sub('.', r"\n", 'x'), '\n') s = r"\1\1" self.assertEqual(re.sub('(.)', s, 'x'), 'xx') self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s) self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s) self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'), '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest') def test_bug_449964(self): # fails for group followed by other escape self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'), 'xx\bxx\b') def test_bug_449000(self): # Test for sub() on escaped characters self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') def test_bug_1661(self): # Verify that flags do not get silently ignored with compiled patterns pattern = re.compile('.') self.assertRaises(ValueError, re.match, pattern, 'A', re.I) self.assertRaises(ValueError, re.search, pattern, 'A', re.I) self.assertRaises(ValueError, re.findall, pattern, 'A', re.I) self.assertRaises(ValueError, re.compile, pattern, re.I) def test_bug_3629(self): # A regex that triggered a bug in the sre-code validator re.compile("(?P<quote>)(?(quote))") def test_sub_template_numeric_escape(self): # bug 776311 and friends self.assertEqual(re.sub('x', r'\0', 'x'), '\0') self.assertEqual(re.sub('x', r'\000', 'x'), '\000') self.assertEqual(re.sub('x', r'\001', 'x'), '\001') self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\111', 'x'), '\111') self.assertEqual(re.sub('x', r'\117', 'x'), '\117') self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111') self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1') self.assertEqual(re.sub('x', r'\00', 'x'), '\x00') self.assertEqual(re.sub('x', r'\07', 'x'), '\x07') self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a') self.assertEqual(re.sub('x', r'\400', 'x'), '\0') self.assertEqual(re.sub('x', r'\777', 'x'), '\377') self.assertRaises(re.error, re.sub, 'x', r'\1', 'x') self.assertRaises(re.error, re.sub, 'x', r'\8', 'x') self.assertRaises(re.error, re.sub, 'x', r'\9', 'x') self.assertRaises(re.error, re.sub, 'x', r'\11', 'x') self.assertRaises(re.error, re.sub, 'x', r'\18', 'x') self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\90', 'x') self.assertRaises(re.error, re.sub, 'x', r'\99', 'x') self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8' self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1' self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0' # in python2.3 (etc), these loop endlessly in sre_parser.py self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), 'xz8') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), 'xza') def test_qualified_re_sub(self): self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb') self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa') def test_bug_114660(self): self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), 'hello there') def test_bug_462270(self): # Test for empty sub() behaviour, see SF bug #462270 self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-') self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d') def test_symbolic_groups(self): re.compile('(?P<a>x)(?P=a)(?(a)y)') re.compile('(?P<a1>x)(?P=a1)(?(a1)y)') self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)') self.assertRaises(re.error, re.compile, '(?Px)') self.assertRaises(re.error, re.compile, '(?P=)') self.assertRaises(re.error, re.compile, '(?P=1)') self.assertRaises(re.error, re.compile, '(?P=a)') self.assertRaises(re.error, re.compile, '(?P=a1)') self.assertRaises(re.error, re.compile, '(?P=a.)') self.assertRaises(re.error, re.compile, '(?P<)') self.assertRaises(re.error, re.compile, '(?P<>)') self.assertRaises(re.error, re.compile, '(?P<1>)') self.assertRaises(re.error, re.compile, '(?P<a.>)') self.assertRaises(re.error, re.compile, '(?())') self.assertRaises(re.error, re.compile, '(?(a))') self.assertRaises(re.error, re.compile, '(?(1a))') self.assertRaises(re.error, re.compile, '(?(a.))') # New valid/invalid identifiers in Python 3 re.compile('(?P<µ>x)(?P=µ)(?(µ)y)') re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)') self.assertRaises(re.error, re.compile, '(?P<©>x)') def test_symbolic_refs(self): self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx') self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx') # New valid/invalid identifiers in Python 3 self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx') self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx') def test_re_subn(self): self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0)) self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) def test_re_split(self): for string in ":a:b::c", S(":a:b::c"): self.assertTypedEqual(re.split(":", string), ['', 'a', 'b', '', 'c']) self.assertTypedEqual(re.split(":*", string), ['', 'a', 'b', 'c']) self.assertTypedEqual(re.split("(:*)", string), ['', ':', 'a', ':', 'b', '::', 'c']) for string in (b":a:b::c", B(b":a:b::c"), bytearray(b":a:b::c"), memoryview(b":a:b::c")): self.assertTypedEqual(re.split(b":", string), [b'', b'a', b'b', b'', b'c']) self.assertTypedEqual(re.split(b":*", string), [b'', b'a', b'b', b'c']) self.assertTypedEqual(re.split(b"(:*)", string), [b'', b':', b'a', b':', b'b', b'::', b'c']) for a, b, c in ("\xe0\xdf\xe7", "\u0430\u0431\u0432", "\U0001d49c\U0001d49e\U0001d4b5"): string = ":%s:%s::%s" % (a, b, c) self.assertEqual(re.split(":", string), ['', a, b, '', c]) self.assertEqual(re.split(":*", string), ['', a, b, c]) self.assertEqual(re.split("(:*)", string), ['', ':', a, ':', b, '::', c]) self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c']) self.assertEqual(re.split("(:)*", ":a:b::c"), ['', ':', 'a', ':', 'b', ':', 'c']) self.assertEqual(re.split("([b:]+)", ":a:b::c"), ['', ':', 'a', ':b::', 'c']) self.assertEqual(re.split("(b)|(:+)", ":a:b::c"), ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']) self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', '', 'c']) def test_qualified_re_split(self): self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) self.assertEqual(re.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) self.assertEqual(re.split("(:*)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) def test_re_findall(self): self.assertEqual(re.findall(":+", "abc"), []) for string in "a:b::c:::d", S("a:b::c:::d"): self.assertTypedEqual(re.findall(":+", string), [":", "::", ":::"]) self.assertTypedEqual(re.findall("(:+)", string), [":", "::", ":::"]) self.assertTypedEqual(re.findall("(:)(:*)", string), [(":", ""), (":", ":"), (":", "::")]) for string in (b"a:b::c:::d", B(b"a:b::c:::d"), bytearray(b"a:b::c:::d"), memoryview(b"a:b::c:::d")): self.assertTypedEqual(re.findall(b":+", string), [b":", b"::", b":::"]) self.assertTypedEqual(re.findall(b"(:+)", string), [b":", b"::", b":::"]) self.assertTypedEqual(re.findall(b"(:)(:*)", string), [(b":", b""), (b":", b":"), (b":", b"::")]) for x in ("\xe0", "\u0430", "\U0001d49c"): xx = x * 2 xxx = x * 3 string = "a%sb%sc%sd" % (x, xx, xxx) self.assertEqual(re.findall("%s+" % x, string), [x, xx, xxx]) self.assertEqual(re.findall("(%s+)" % x, string), [x, xx, xxx]) self.assertEqual(re.findall("(%s)(%s*)" % (x, x), string), [(x, ""), (x, x), (x, xx)]) def test_bug_117612(self): self.assertEqual(re.findall(r"(a|(b))", "aba"), [("a", ""),("b", "b"),("a", "")]) def test_re_match(self): for string in 'a', S('a'): self.assertEqual(re.match('a', string).groups(), ()) self.assertEqual(re.match('(a)', string).groups(), ('a',)) self.assertEqual(re.match('(a)', string).group(0), 'a') self.assertEqual(re.match('(a)', string).group(1), 'a') self.assertEqual(re.match('(a)', string).group(1, 1), ('a', 'a')) for string in b'a', B(b'a'), bytearray(b'a'), memoryview(b'a'): self.assertEqual(re.match(b'a', string).groups(), ()) self.assertEqual(re.match(b'(a)', string).groups(), (b'a',)) self.assertEqual(re.match(b'(a)', string).group(0), b'a') self.assertEqual(re.match(b'(a)', string).group(1), b'a') self.assertEqual(re.match(b'(a)', string).group(1, 1), (b'a', b'a')) for a in ("\xe0", "\u0430", "\U0001d49c"): self.assertEqual(re.match(a, a).groups(), ()) self.assertEqual(re.match('(%s)' % a, a).groups(), (a,)) self.assertEqual(re.match('(%s)' % a, a).group(0), a) self.assertEqual(re.match('(%s)' % a, a).group(1), a) self.assertEqual(re.match('(%s)' % a, a).group(1, 1), (a, a)) pat = re.compile('((a)|(b))(c)?') self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None)) self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None)) self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c')) self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c')) self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c')) # A single group m = re.match('(a)', 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(1), 'a') self.assertEqual(m.group(1, 1), ('a', 'a')) pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?') self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', None)) self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) def test_re_fullmatch(self): # Issue 16203: Proposal: add re.fullmatch() method. self.assertEqual(re.fullmatch(r"a", "a").span(), (0, 1)) for string in "ab", S("ab"): self.assertEqual(re.fullmatch(r"a|ab", string).span(), (0, 2)) for string in b"ab", B(b"ab"), bytearray(b"ab"), memoryview(b"ab"): self.assertEqual(re.fullmatch(br"a|ab", string).span(), (0, 2)) for a, b in "\xe0\xdf", "\u0430\u0431", "\U0001d49c\U0001d49e": r = r"%s|%s" % (a, a + b) self.assertEqual(re.fullmatch(r, a + b).span(), (0, 2)) self.assertEqual(re.fullmatch(r".*?$", "abc").span(), (0, 3)) self.assertEqual(re.fullmatch(r".*?", "abc").span(), (0, 3)) self.assertEqual(re.fullmatch(r"a.*?b", "ab").span(), (0, 2)) self.assertEqual(re.fullmatch(r"a.*?b", "abb").span(), (0, 3)) self.assertEqual(re.fullmatch(r"a.*?b", "axxb").span(), (0, 4)) self.assertIsNone(re.fullmatch(r"a+", "ab")) self.assertIsNone(re.fullmatch(r"abc$", "abc\n")) self.assertIsNone(re.fullmatch(r"abc\Z", "abc\n")) self.assertIsNone(re.fullmatch(r"(?m)abc$", "abc\n")) self.assertEqual(re.fullmatch(r"ab(?=c)cd", "abcd").span(), (0, 4)) self.assertEqual(re.fullmatch(r"ab(?<=b)cd", "abcd").span(), (0, 4)) self.assertEqual(re.fullmatch(r"(?=a|ab)ab", "ab").span(), (0, 2)) self.assertEqual( re.compile(r"bc").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3)) self.assertEqual( re.compile(r".*?$").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3)) self.assertEqual( re.compile(r".*?").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3)) def test_re_groupref_exists(self): self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(), ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(), ('a', '')) # Tests for bug #1177831: exercise groups other than the first group p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))') self.assertEqual(p.match('abc').groups(), ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) self.assertEqual(p.match('abd'), None) self.assertEqual(p.match('ac'), None) def test_re_groupref(self): self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(), ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), (None, None)) def test_groupdict(self): self.assertEqual(re.match('(?P<first>first) (?P<second>second)', 'first second').groupdict(), {'first':'first', 'second':'second'}) def test_expand(self): self.assertEqual(re.match("(?P<first>first) (?P<second>second)", "first second") .expand(r"\2 \1 \g<second> \g<first>"), "second first second first") def test_repeat_minmax(self): self.assertEqual(re.match("^(\w){1}$", "abc"), None) self.assertEqual(re.match("^(\w){1}?$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^x{1}$", "xxx"), None) self.assertEqual(re.match("^x{1}?$", "xxx"), None) self.assertEqual(re.match("^x{1,2}$", "xxx"), None) self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertEqual(re.match("^x{}$", "xxx"), None) self.assertNotEqual(re.match("^x{}$", "x{}"), None) def test_getattr(self): self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)") self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U) self.assertEqual(re.compile("(?i)(a)(b)").groups, 2) self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {}) self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex, {'first': 1, 'other': 2}) self.assertEqual(re.match("(a)", "a").pos, 0) self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) self.assertNotEqual(re.match("(a)", "a").re, None) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.UNICODE).group(0), "1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1), "abc") # There's a word boundary at the start of a string. self.assertTrue(re.match(r"\b", "abc")) # A non-empty string includes a non-boundary zero-length match. self.assertTrue(re.search(r"\B", "abc")) # There is no non-boundary match at the start of a string. self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. self.assertEqual(re.search(r"\B", ""), None) # This one is questionable and different from the perlre behaviour, # but describes current behavior. self.assertEqual(re.search(r"\b", ""), None) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) self.assertEqual(len(re.findall(r"\B", "a")), 0) # If there are no words, there are no boundaries self.assertEqual(len(re.findall(r"\b", " ")), 0) self.assertEqual(len(re.findall(r"\b", " ")), 0) # Can match around the whitespace. self.assertEqual(len(re.findall(r"\B", " ")), 2) def test_bigcharset(self): self.assertEqual(re.match("([\u2222\u2223])", "\u2222").group(1), "\u2222") self.assertEqual(re.match("([\u2222\u2223])", "\u2222", re.UNICODE).group(1), "\u2222") r = '[%s]' % ''.join(map(chr, range(256, 2**16, 255))) self.assertEqual(re.match(r, "\uff01", re.UNICODE).group(), "\uff01") def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) self.assertIsNotNone(r.match('1000')) self.assertIsNotNone(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), "a\nb") self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0), "a\n\nb") def test_non_consuming(self): self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a") def test_ignore_case(self): self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa") self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa") def test_category(self): self.assertEqual(re.match(r"(\s)", " ").group(1), " ") def test_getlower(self): import _sre self.assertEqual(_sre.getlower(ord('A'), 0), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") def test_not_literal(self): self.assertEqual(re.search("\s([^a])", " b").group(1), "b") self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb") def test_search_coverage(self): self.assertEqual(re.search("\s(b)", " b").group(1), "b") self.assertEqual(re.search("a\s", "a ").group(0), "a ") def assertMatch(self, pattern, text, match=None, span=None, matcher=re.match): if match is None and span is None: # the pattern matches the whole text match = text span = (0, len(text)) elif match is None or span is None: raise ValueError('If match is not None, span should be specified ' '(and vice versa).') m = matcher(pattern, text) self.assertTrue(m) self.assertEqual(m.group(), match) self.assertEqual(m.span(), span) def test_re_escape(self): alnum_chars = string.ascii_letters + string.digits + '_' p = ''.join(chr(i) for i in range(256)) for c in p: if c in alnum_chars: self.assertEqual(re.escape(c), c) elif c == '\x00': self.assertEqual(re.escape(c), '\\000') else: self.assertEqual(re.escape(c), '\\' + c) self.assertMatch(re.escape(c), c) self.assertMatch(re.escape(p), p) def test_re_escape_byte(self): alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii') p = bytes(range(256)) for i in p: b = bytes([i]) if b in alnum_chars: self.assertEqual(re.escape(b), b) elif i == 0: self.assertEqual(re.escape(b), b'\\000') else: self.assertEqual(re.escape(b), b'\\' + b) self.assertMatch(re.escape(b), b) self.assertMatch(re.escape(p), p) def test_re_escape_non_ascii(self): s = 'xxx\u2620\u2620\u2620xxx' s_escaped = re.escape(s) self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx') self.assertMatch(s_escaped, s) self.assertMatch('.%s+.' % re.escape('\u2620'), s, 'x\u2620\u2620\u2620x', (2, 7), re.search) def test_re_escape_non_ascii_bytes(self): b = 'y\u2620y\u2620y'.encode('utf-8') b_escaped = re.escape(b) self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y') self.assertMatch(b_escaped, b) res = re.findall(re.escape('\u2620'.encode('utf-8')), b) self.assertEqual(len(res), 2) def pickle_test(self, pickle): oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') s = pickle.dumps(oldpat) newpat = pickle.loads(s) self.assertEqual(oldpat, newpat) def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) self.assertEqual(re.L, re.LOCALE) self.assertEqual(re.M, re.MULTILINE) self.assertEqual(re.S, re.DOTALL) self.assertEqual(re.X, re.VERBOSE) def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: self.assertNotEqual(re.compile('^pattern$', flag), None) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"\%03o" % i, chr(i))) self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8")) self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i))) self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z")) if i < 0x10000: self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i))) self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i))) self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\0", "\000")) self.assertIsNotNone(re.match(r"\08", "\0008")) self.assertIsNotNone(re.match(r"\01", "\001")) self.assertIsNotNone(re.match(r"\018", "\0018")) self.assertIsNotNone(re.match(r"\567", chr(0o167))) self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") self.assertRaises(re.error, re.match, r"\u123", "") self.assertRaises(re.error, re.match, r"\u123z", "") self.assertRaises(re.error, re.match, r"\U0001234", "") self.assertRaises(re.error, re.match, r"\U0001234z", "") self.assertRaises(re.error, re.match, r"\U00110000", "") def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i))) if i < 0x10000: self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") self.assertRaises(re.error, re.match, r"[\U0001234z]", "") self.assertRaises(re.error, re.match, r"[\U00110000]", "") def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) self.assertIsNotNone(re.match(br"\u", b'u')) self.assertIsNotNone(re.match(br"\U", b'U')) self.assertIsNotNone(re.match(br"\0", b"\000")) self.assertIsNotNone(re.match(br"\08", b"\0008")) self.assertIsNotNone(re.match(br"\01", b"\001")) self.assertIsNotNone(re.match(br"\018", b"\0018")) self.assertIsNotNone(re.match(br"\567", bytes([0o167]))) self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") def test_sre_byte_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match(br"[\u]", b'u')) self.assertIsNotNone(re.match(br"[\U]", b'U')) self.assertRaises(re.error, re.match, br"[\911]", "") self.assertRaises(re.error, re.match, br"[\x1z]", "") def test_bug_113254(self): self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1)) def test_bug_527371(self): # bug described in patches 527371/672491 self.assertEqual(re.match(r'(a)?a','a').lastindex, None) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a') self.assertEqual(re.match("((a))", "a").lastindex, 1) def test_bug_545855(self): # bug 545855 -- This pattern failed to cause a compile error as it # should, instead provoking a TypeError. self.assertRaises(re.error, re.compile, 'foo[a-') def test_bug_418626(self): # bugs 418626 at al. -- Testing Greg Chapman's addition of op code # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of # pattern '*?' on a long string. self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001) self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0), 20003) self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001) # non-simple '*?' still used to hit the recursion limit, before the # non-recursive scheme was implemented. self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001) def test_bug_612074(self): pat="["+re.escape("\u2039")+"]" self.assertEqual(re.compile(pat) and 1, 1) def test_stack_overflow(self): # nasty cases that used to overflow the straightforward recursive # implementation of repeated groups. self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x') self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x') self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x') def test_unlimited_zero_width_repeat(self): # Issue #9669 self.assertIsNone(re.match(r'(?:a?)*y', 'z')) self.assertIsNone(re.match(r'(?:a?)+y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}y', 'z')) self.assertIsNone(re.match(r'(?:a?)*?y', 'z')) self.assertIsNone(re.match(r'(?:a?)+?y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z')) def test_scanner(self): def s_ident(scanner, token): return token def s_operator(scanner, token): return "op%s" % token def s_float(scanner, token): return float(token) def s_int(scanner, token): return int(token) scanner = Scanner([ (r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", None), ]) self.assertNotEqual(scanner.scanner.scanner("").pattern, None) self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) def test_bug_448951(self): # bug 448951 (similar to 429357, but with single char match) # (Also test greedy matches.) for op in '','?','*': self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(), (None, None)) self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(), ('a:', 'a')) def test_bug_725106(self): # capturing groups in alternatives in repeats self.assertEqual(re.match('^((a)|b)*', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(), ('b', None)) def test_bug_725149(self): # mark_stack_base restoring before restoring marks self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(), ('a', None)) self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(), ('a', None, None)) def test_bug_764548(self): # bug 764548, re.compile() barfs on str/unicode subclasses class my_unicode(str): pass pat = re.compile(my_unicode("abc")) self.assertEqual(pat.match("xyz"), None) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", 1, 10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=1, endpos=10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", endpos=10, pos=1) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=3, endpos=8) self.assertEqual([item.group(0) for item in iter], ["::", "::"]) def test_bug_926075(self): self.assertTrue(re.compile('bug_926075') is not re.compile(b'bug_926075')) def test_bug_931848(self): pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"') self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) def test_bug_581080(self): iter = re.finditer(r"\s", "a b") self.assertEqual(next(iter).span(), (1,2)) self.assertRaises(StopIteration, next, iter) scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) self.assertEqual(scanner.search(), None) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") self.assertEqual(next(iter).span(), (0, 4)) self.assertEqual(next(iter).span(), (4, 4)) self.assertRaises(StopIteration, next, iter) def test_bug_6561(self): # '\d' should match characters in Unicode category 'Nd' # (Number, Decimal Digit), but not those in 'Nl' (Number, # Letter) or 'No' (Number, Other). decimal_digits = [ '\u0037', # '\N{DIGIT SEVEN}', category 'Nd' '\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd' '\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' ] for x in decimal_digits: self.assertEqual(re.match('^\d$', x).group(0), x) not_decimal_digits = [ '\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl' '\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl' '\u2082', # '\N{SUBSCRIPT TWO}', category 'No' '\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' ] for x in not_decimal_digits: self.assertIsNone(re.match('^\d$', x)) def test_empty_array(self): # SF buf 1647541 import array for typecode in 'bBuhHiIlLfd': a = array.array(typecode) self.assertEqual(re.compile(b"bla").match(a), None) self.assertEqual(re.compile(b"").match(a).groups(), ()) def test_inline_flags(self): # Bug #1700 upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) self.assertNotEqual(q, None) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" pattern = re.compile('$') self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') pattern = re.compile('$', re.MULTILINE) self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' ) self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') def test_bytes_str_mixing(self): # Mixing str and bytes is disallowed pat = re.compile('.') bpat = re.compile(b'.') self.assertRaises(TypeError, pat.match, b'b') self.assertRaises(TypeError, bpat.match, 'b') self.assertRaises(TypeError, pat.sub, b'b', 'c') self.assertRaises(TypeError, pat.sub, 'b', b'c') self.assertRaises(TypeError, pat.sub, b'b', b'c') self.assertRaises(TypeError, bpat.sub, b'b', 'c') self.assertRaises(TypeError, bpat.sub, 'b', b'c') self.assertRaises(TypeError, bpat.sub, 'b', 'c') def test_ascii_and_unicode_flag(self): # String patterns for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\w', flags) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\xc0', re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('\w', re.ASCII) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\w') self.assertEqual(pat.match('\xe0'), None) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', re.IGNORECASE) self.assertEqual(pat.match(b'\xe0'), None) pat = re.compile(b'\w') self.assertEqual(pat.match(b'\xe0'), None) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) self.assertRaises(ValueError, re.compile, b'(?u)\w') self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII) self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII) self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE) self.assertRaises(ValueError, re.compile, '(?au)\w') def test_bug_6509(self): # Replacement strings of both types must parse properly. # all strings pat = re.compile('a(\w)') self.assertEqual(pat.sub('b\\1', 'ac'), 'bc') pat = re.compile('a(.)') self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234') pat = re.compile('..') self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str') # all bytes pat = re.compile(b'a(\w)') self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc') pat = re.compile(b'a(.)') self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD') pat = re.compile(b'..') self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes') def test_dealloc(self): # issue 3299: check for segfault in debug build import _sre # the overflow limit is different on wide and narrow builds and it # depends on the definition of SRE_CODE (see sre.h). # 2**128 should be big enough to overflow on both. For smaller values # a RuntimeError is raised instead of OverflowError. long_overflow = 2**128 self.assertRaises(TypeError, re.finditer, "a", {}) self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow]) self.assertRaises(TypeError, _sre.compile, {}, 0, []) def test_search_dot_unicode(self): self.assertIsNotNone(re.search("123.*-", '123abc-')) self.assertIsNotNone(re.search("123.*-", '123\xe9-')) self.assertIsNotNone(re.search("123.*-", '123\u20ac-')) self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-')) self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) def test_compile(self): # Test return value when given string and pattern as parameter pattern = re.compile('random pattern') self.assertIsInstance(pattern, re._pattern_type) same_pattern = re.compile(pattern) self.assertIsInstance(same_pattern, re._pattern_type) self.assertIs(same_pattern, pattern) # Test behaviour when not given a string or pattern as parameter self.assertRaises(TypeError, re.compile, 0) def test_bug_13899(self): # Issue #13899: re pattern r"[\A]" should work like "A" but matches # nothing. Ditto B and Z. self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'), ['A', 'B', '\b', 'C', 'Z']) @bigmemtest(size=_2G, memuse=1) def test_large_search(self, size): # Issue #10182: indices were 32-bit-truncated. s = 'a' * size m = re.search('$', s) self.assertIsNotNone(m) self.assertEqual(m.start(), size) self.assertEqual(m.end(), size) # The huge memuse is because of re.sub() using a list and a join() # to create the replacement result. @bigmemtest(size=_2G, memuse=16 + 2) def test_large_subn(self, size): # Issue #10182: indices were 32-bit-truncated. s = 'a' * size r, n = re.subn('', '', s) self.assertEqual(r, s) self.assertEqual(n, size + 1) def test_bug_16688(self): # Issue 16688: Backreferences make case-insensitive regex fail on # non-ASCII strings. self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a']) self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2)) def test_repeat_minmax_overflow(self): # Issue #13169 string = "x" * 100000 self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536)) # 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t. self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128)) @cpython_only def test_repeat_minmax_overflow_maxrepeat(self): try: from _sre import MAXREPEAT except ImportError: self.skipTest('requires _sre.MAXREPEAT constant') string = "x" * 100000 self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string)) self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(), (0, 100000)) self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string)) self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT) self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT) self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT) def test_backref_group_name_in_exception(self): # Issue 17341: Poor error message when compiling invalid regex with self.assertRaisesRegex(sre_constants.error, '<foo>'): re.compile('(?P=<foo>)') def test_group_name_in_exception(self): # Issue 17341: Poor error message when compiling invalid regex with self.assertRaisesRegex(sre_constants.error, '\?foo'): re.compile('(?P<?foo>)') def test_issue17998(self): for reps in '*', '+', '?', '{1}': for mod in '', '?': pattern = '.' + reps + mod + 'yz' self.assertEqual(re.compile(pattern, re.S).findall('xyz'), ['xyz'], msg=pattern) pattern = pattern.encode() self.assertEqual(re.compile(pattern, re.S).findall(b'xyz'), [b'xyz'], msg=pattern) def test_match_repr(self): for string in '[abracadabra]', S('[abracadabra]'): m = re.search(r'(.+)(.*?)\1', string) self.assertEqual(repr(m), "<%s.%s object; " "span=(1, 12), match='abracadabra'>" % (type(m).__module__, type(m).__qualname__)) for string in (b'[abracadabra]', B(b'[abracadabra]'), bytearray(b'[abracadabra]'), memoryview(b'[abracadabra]')): m = re.search(rb'(.+)(.*?)\1', string) self.assertEqual(repr(m), "<%s.%s object; " "span=(1, 12), match=b'abracadabra'>" % (type(m).__module__, type(m).__qualname__)) first, second = list(re.finditer("(aa)|(bb)", "aa bb")) self.assertEqual(repr(first), "<%s.%s object; " "span=(0, 2), match='aa'>" % (type(second).__module__, type(first).__qualname__)) self.assertEqual(repr(second), "<%s.%s object; " "span=(3, 5), match='bb'>" % (type(second).__module__, type(second).__qualname__)) def test_bug_2537(self): # issue 2537: empty submatches for outer_op in ('{0,}', '*', '+', '{1,187}'): for inner_op in ('{0,}', '*', '?'): r = re.compile("^((x|y)%s)%s" % (inner_op, outer_op)) m = r.match("xyyzy") self.assertEqual(m.group(0), "xyy") self.assertEqual(m.group(1), "") self.assertEqual(m.group(2), "y") def test_debug_flag(self): with captured_stdout() as out: re.compile('foo', re.DEBUG) self.assertEqual(out.getvalue().splitlines(), ['literal 102 ', 'literal 111 ', 'literal 111 ']) # Debug output is output again even a second time (bypassing # the cache -- issue #20426). with captured_stdout() as out: re.compile('foo', re.DEBUG) self.assertEqual(out.getvalue().splitlines(), ['literal 102 ', 'literal 111 ', 'literal 111 ']) def test_keyword_parameters(self): # Issue #20283: Accepting the string keyword parameter. pat = re.compile(r'(ab)') self.assertEqual( pat.match(string='abracadabra', pos=7, endpos=10).span(), (7, 9)) self.assertEqual( pat.fullmatch(string='abracadabra', pos=7, endpos=9).span(), (7, 9)) self.assertEqual( pat.search(string='abracadabra', pos=3, endpos=10).span(), (7, 9)) self.assertEqual( pat.findall(string='abracadabra', pos=3, endpos=10), ['ab']) self.assertEqual( pat.split(string='abracadabra', maxsplit=1), ['', 'ab', 'racadabra']) self.assertEqual( pat.scanner(string='abracadabra', pos=3, endpos=10).search().span(), (7, 9)) def test_bug_20998(self): # Issue #20998: Fullmatch of repeated single character pattern # with ignore case. self.assertEqual(re.fullmatch('[a-c]+', 'ABC', re.I).span(), (0, 3)) class PatternReprTests(unittest.TestCase): def check(self, pattern, expected): self.assertEqual(repr(re.compile(pattern)), expected) def check_flags(self, pattern, flags, expected): self.assertEqual(repr(re.compile(pattern, flags)), expected) def test_without_flags(self): self.check('random pattern', "re.compile('random pattern')") def test_single_flag(self): self.check_flags('random pattern', re.IGNORECASE, "re.compile('random pattern', re.IGNORECASE)") def test_multiple_flags(self): self.check_flags('random pattern', re.I|re.S|re.X, "re.compile('random pattern', " "re.IGNORECASE|re.DOTALL|re.VERBOSE)") def test_unicode_flag(self): self.check_flags('random pattern', re.U, "re.compile('random pattern')") self.check_flags('random pattern', re.I|re.S|re.U, "re.compile('random pattern', " "re.IGNORECASE|re.DOTALL)") def test_inline_flags(self): self.check('(?i)pattern', "re.compile('(?i)pattern', re.IGNORECASE)") def test_unknown_flags(self): self.check_flags('random pattern', 0x123000, "re.compile('random pattern', 0x123000)") self.check_flags('random pattern', 0x123000|re.I, "re.compile('random pattern', re.IGNORECASE|0x123000)") def test_bytes(self): self.check(b'bytes pattern', "re.compile(b'bytes pattern')") self.check_flags(b'bytes pattern', re.A, "re.compile(b'bytes pattern', re.ASCII)") def test_quotes(self): self.check('random "double quoted" pattern', '''re.compile('random "double quoted" pattern')''') self.check("random 'single quoted' pattern", '''re.compile("random 'single quoted' pattern")''') self.check('''both 'single' and "double" quotes''', '''re.compile('both \\'single\\' and "double" quotes')''') def test_long_pattern(self): pattern = 'Very %spattern' % ('long ' * 1000) r = repr(re.compile(pattern)) self.assertLess(len(r), 300) self.assertEqual(r[:30], "re.compile('Very long long lon") r = repr(re.compile(pattern, re.I)) self.assertLess(len(r), 300) self.assertEqual(r[:30], "re.compile('Very long long lon") self.assertEqual(r[-16:], ", re.IGNORECASE)") class ImplementationTest(unittest.TestCase): """ Test implementation details of the re module. """ def test_overlap_table(self): f = sre_compile._generate_overlap_table self.assertEqual(f(""), []) self.assertEqual(f("a"), [0]) self.assertEqual(f("abcd"), [0, 0, 0, 0]) self.assertEqual(f("aaaa"), [0, 1, 2, 3]) self.assertEqual(f("ababba"), [0, 0, 1, 2, 0, 1]) self.assertEqual(f("abcabdac"), [0, 0, 0, 1, 2, 0, 1, 0]) def run_re_tests(): from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR if verbose: print('Running re_tests test suite') else: # To save time, only run the first and last 10 tests #tests = tests[:10] + tests[-10:] pass for t in tests: sys.stdout.flush() pattern = s = outcome = repl = expected = None if len(t) == 5: pattern, s, outcome, repl, expected = t elif len(t) == 3: pattern, s, outcome = t else: raise ValueError('Test tuples should have 3 or 5 fields', t) try: obj = re.compile(pattern) except re.error: if outcome == SYNTAX_ERROR: pass # Expected a syntax error else: print('=== Syntax error:', t) except KeyboardInterrupt: raise KeyboardInterrupt except: print('*** Unexpected error ***', t) if verbose: traceback.print_exc(file=sys.stdout) else: try: result = obj.search(s) except re.error as msg: print('=== Unexpected exception', t, repr(msg)) if outcome == SYNTAX_ERROR: # This should have been a syntax error; forget it. pass elif outcome == FAIL: if result is None: pass # No match, as expected else: print('=== Succeeded incorrectly', t) elif outcome == SUCCEED: if result is not None: # Matched, as expected, so now we compute the # result string and compare it to our expected result. start, end = result.span(0) vardict={'found': result.group(0), 'groups': result.group(), 'flags': result.re.flags} for i in range(1, 100): try: gi = result.group(i) # Special hack because else the string concat fails: if gi is None: gi = "None" except IndexError: gi = "Error" vardict['g%d' % i] = gi for i in result.re.groupindex.keys(): try: gi = result.group(i) if gi is None: gi = "None" except IndexError: gi = "Error" vardict[i] = gi repl = eval(repl, vardict) if repl != expected: print('=== grouping error', t, end=' ') print(repr(repl) + ' should be ' + repr(expected)) else: print('=== Failed incorrectly', t) # Try the match with both pattern and string converted to # bytes, and check that it still succeeds. try: bpat = bytes(pattern, "ascii") bs = bytes(s, "ascii") except UnicodeEncodeError: # skip non-ascii tests pass else: try: bpat = re.compile(bpat) except Exception: print('=== Fails on bytes pattern compile', t) if verbose: traceback.print_exc(file=sys.stdout) else: bytes_result = bpat.search(bs) if bytes_result is None: print('=== Fails on bytes pattern match', t) # Try the match with the search area limited to the extent # of the match and see if it still succeeds. \B will # break (because it won't match at the end or start of a # string), so we'll ignore patterns that feature it. if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \ and result is not None: obj = re.compile(pattern) result = obj.search(s, result.start(0), result.end(0) + 1) if result is None: print('=== Failed on range-limited match', t) # Try the match with IGNORECASE enabled, and check that it # still succeeds. obj = re.compile(pattern, re.IGNORECASE) result = obj.search(s) if result is None: print('=== Fails on case-insensitive match', t) # Try the match with LOCALE enabled, and check that it # still succeeds. if '(?u)' not in pattern: obj = re.compile(pattern, re.LOCALE) result = obj.search(s) if result is None: print('=== Fails on locale-sensitive match', t) # Try the match with UNICODE locale enabled, and check # that it still succeeds. obj = re.compile(pattern, re.UNICODE) result = obj.search(s) if result is None: print('=== Fails on unicode-sensitive match', t) def test_main(): run_unittest(__name__) run_re_tests() if __name__ == "__main__": test_main()
PennartLoettring/Poettrix
rootfs/usr/lib/python3.4/test/test_re.py
Python
gpl-2.0
68,165
import numpy import os import argparse import makeXYZ parser = argparse.ArgumentParser(description="Take an XYZ file as input, rotate it in various directions and output a bunch of XYZ files into separate folders") parser.add_argument("-o", "--output", default="output/", help="produced files and folders will be saved here (default: %(default)s)") args = parser.parse_args() #parametersContent = '''# Parameters for a system from a paper # ## name | epsilon (kcal/mol) | sigma (A) | mass (amu) | charge (e) #atom C 0.07000 3.55000 12.01100 0.00000 #atom H 0.03350 2.42000 1.00800 0.00000 #atom O 0.11080 2.98504 15.99940 0.00000 #atom N 0.19200 3.31988 14.00670 0.00000 #atom S 0.43560 3.63599 32.06500 0.00000 #atom F 0.11080 2.90789 18.99840 0.00000 #atom B 0.10500 3.63000 10.81000 0.00000 #atom X 20.00000 3.55000 12.01100 0.02100 #atom T 0.19200 3.15000 15.99900 -0.02100 # ## Boron parameters guessed from Baowan & Hill, IET Micro & Nano Letters 2:46 (2007) ## Carbon, oxygen and hydrogen parameters from original CHARMM force field # ## Pair style to overwrite and default LJ-mixing ## atom1 | atom2 | pair_style | parameters (eps,sig for LJ; De,a,re for Morse) ## pair_ovwrt C T morse 1 2 3 ## pair_ovwrt H T lj 1 2 # ## Tip harmonic constraint ## force constant (kcal/mol) | distance (A) #harm 0.72000 0.00 # ## Additional parameters for making the molecules flexible # ## We need to know the topology, so list the possible bonds and their expected length ## atom1 | atom2 | exp. length (A) ## topobond C C 1.430 ## topobond C H 1.095 ## topobond C B 1.534 # ## bonds are assumed harmonic and in their equilibrium position (in the xyz file) ## force constant (kcal/mol) #bond 25.000 # ## angles are assumed harmonic and in their equilibrium position (in the xyz file) ## force constant (kcal/mol) #angle 0.2500 # ## dihedrals are assumed harmonic and in their equilibrium position (in the xyz file) ## force constant (kcal/mol) #dihedral 0.2500 # ## substrate support using a 10-4 wall potential ## epsilon (kcal/mol) | sigma (A) | lambda (A) | r_cut (A) | lateral constant (kcal/mol) #substrate 0.100 3.0 3.0 7.5 0.01''' # if args.output.endswith('/'): output_folder = args.output else: output_folder = args.output + '/' if not os.path.exists(output_folder): os.makedirs(output_folder) # if args.input.endswith('/'): # input_folder = args.input # else: # input_folder = args.input + '/' #parameterFile = open(output_folder + 'parameters.dat', "w+") #parameterFile.write(parametersContent) makeXYZ.makeIt(output_folder)
SINGROUP/readAFM
databaseCode/toyDB/twoDiffAtomsHC/doXYZstuff.py
Python
gpl-3.0
3,212
# Copyright 2012-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License", # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for choosing which member of a replica set to read from.""" import random from pymongo.errors import ConfigurationError _PRIMARY = 0 _PRIMARY_PREFERRED = 1 _SECONDARY = 2 _SECONDARY_PREFERRED = 3 _NEAREST = 4 class ReadPreference: """An enum that defines the read preference modes supported by PyMongo. Used in three cases: :class:`~pymongo.mongo_client.MongoClient` connected to a single host: * `PRIMARY`: Queries are allowed if the host is standalone or the replica set primary. * All other modes allow queries to standalone servers, to the primary, or to secondaries. :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a sharded cluster of replica sets: * `PRIMARY`: Queries are sent to the primary of a shard. * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, otherwise a secondary. * `SECONDARY`: Queries are distributed among shard secondaries. An error is raised if no secondaries are available. * `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries, or the primary if no secondary is available. * `NEAREST`: Queries are distributed among all members of a shard. :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: * `PRIMARY`: Queries are sent to the primary of the replica set. * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, otherwise a secondary. * `SECONDARY`: Queries are distributed among secondaries. An error is raised if no secondaries are available. * `SECONDARY_PREFERRED`: Queries are distributed among secondaries, or the primary if no secondary is available. * `NEAREST`: Queries are distributed among all members. """ PRIMARY = _PRIMARY PRIMARY_PREFERRED = _PRIMARY_PREFERRED SECONDARY = _SECONDARY SECONDARY_ONLY = _SECONDARY SECONDARY_PREFERRED = _SECONDARY_PREFERRED NEAREST = _NEAREST # For formatting error messages modes = { _PRIMARY: 'PRIMARY', _PRIMARY_PREFERRED: 'PRIMARY_PREFERRED', _SECONDARY: 'SECONDARY', _SECONDARY_PREFERRED: 'SECONDARY_PREFERRED', _NEAREST: 'NEAREST', } _mongos_modes = [ 'primary', 'primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest', ] def mongos_mode(mode): return _mongos_modes[mode] def mongos_enum(enum): return _mongos_modes.index(enum) def select_primary(members): for member in members: if member.is_primary: return member return None def select_member_with_tags(members, tags, secondary_only, latency): candidates = [] for candidate in members: if secondary_only and candidate.is_primary: continue if not (candidate.is_primary or candidate.is_secondary): # In RECOVERING or similar state continue if candidate.matches_tags(tags): candidates.append(candidate) if not candidates: return None # ping_time is in seconds fastest = min([candidate.get_avg_ping_time() for candidate in candidates]) near_candidates = [ candidate for candidate in candidates if candidate.get_avg_ping_time() - fastest <= latency / 1000.] return random.choice(near_candidates) def select_member(members, mode=ReadPreference.PRIMARY, tag_sets=None, latency=15): """Return a Member or None. """ if tag_sets is None: tag_sets = [{}] if mode == _PRIMARY: if tag_sets != [{}]: raise ConfigurationError("PRIMARY cannot be combined with tags") return select_primary(members) elif mode == _PRIMARY_PREFERRED: # Recurse. candidate_primary = select_member(members, _PRIMARY, [{}], latency) if candidate_primary: return candidate_primary else: return select_member(members, _SECONDARY, tag_sets, latency) elif mode == _SECONDARY: for tags in tag_sets: candidate = select_member_with_tags(members, tags, True, latency) if candidate: return candidate return None elif mode == _SECONDARY_PREFERRED: # Recurse. candidate_secondary = select_member( members, _SECONDARY, tag_sets, latency) if candidate_secondary: return candidate_secondary else: return select_member(members, _PRIMARY, [{}], latency) elif mode == _NEAREST: for tags in tag_sets: candidate = select_member_with_tags(members, tags, False, latency) if candidate: return candidate # Ran out of tags. return None else: raise ConfigurationError("Invalid mode %s" % repr(mode)) """Commands that may be sent to replica-set secondaries, depending on ReadPreference and tags. All other commands are always run on the primary. """ secondary_ok_commands = frozenset([ "group", "aggregate", "collstats", "dbstats", "count", "distinct", "geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate", "text", "parallelcollectionscan" ]) class MovingAverage(object): def __init__(self, samples): """Immutable structure to track a 5-sample moving average. """ self.samples = samples[-5:] assert self.samples self.average = sum(self.samples) / float(len(self.samples)) def clone_with(self, sample): """Get a copy of this instance plus a new sample""" return MovingAverage(self.samples + [sample]) def get(self): return self.average def _validate_tag_sets(tag_sets): """Validate tag sets for a MongoReplicaSetClient. """ if tag_sets is None: return tag_sets if not isinstance(tag_sets, list): raise TypeError(( "Tag sets %r invalid, must be a list") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError(( "Tag sets %r invalid, must be None or contain at least one set of" " tags") % (tag_sets,)) for tags in tag_sets: if not isinstance(tags, dict): raise TypeError( "Tag set %r invalid, must be an instance of dict, or" "bson.son.SON" % (tags,)) return tag_sets class _ServerMode(object): """Base class for all read preferences. """ __slots__ = ("__mongos_mode", "__mode", "__tag_sets") def __init__(self, mode, tag_sets=None): if mode == _PRIMARY and tag_sets is not None: raise ConfigurationError("Read preference primary " "cannot be combined with tags") self.__mongos_mode = _mongos_modes[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) @property def name(self): """The name of this read preference. """ return self.__class__.__name__ @property def document(self): """Read preference as a document. """ if self.__tag_sets in (None, [{}]): return {'mode': self.__mongos_mode} return {'mode': self.__mongos_mode, 'tags': self.__tag_sets} @property def mode(self): """The mode of this read preference instance. """ return self.__mode @property def tag_sets(self): """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value ``"ny"``. To specify a priority-order for tag sets, provide a list of tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag set, ``{}``, means "read from any member that matches the mode, ignoring tags." MongoReplicaSetClient tries each set of tags in turn until it finds a set of tags with at least one matching member. .. seealso:: `Data-Center Awareness <http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_ """ if self.__tag_sets: return list(self.__tag_sets) return [{}] def __repr__(self): return "%s(tag_sets=%r)" % ( self.name, self.__tag_sets) def __eq__(self, other): if isinstance(other, _ServerMode): return (self.mode == other.mode and self.tag_sets == other.tag_sets) raise NotImplementedError def __ne__(self, other): return not self == other def __getstate__(self): """Return value of object for pickling. Needed explicitly because __slots__() defined. """ return {'mode': self.__mode, 'tag_sets': self.__tag_sets} def __setstate__(self, value): """Restore from pickling.""" self.__mode = value['mode'] self.__mongos_mode = _mongos_modes[self.__mode] self.__tag_sets = _validate_tag_sets(value['tag_sets']) class Primary(_ServerMode): """Primary read preference. * When directly connected to one mongod queries are allowed if the server is standalone or a replica set primary. * When connected to a mongos queries are sent to the primary of a shard. * When connected to a replica set queries are sent to the primary of the replica set. .. versionadded:: 2.9 """ def __init__(self): super(Primary, self).__init__(_PRIMARY) def __repr__(self): return "Primary()" def __eq__(self, other): if isinstance(other, _ServerMode): return other.mode == _PRIMARY raise NotImplementedError class PrimaryPreferred(_ServerMode): """PrimaryPreferred read preference. * When directly connected to one mongod queries are allowed to standalone servers, to a replica set primary, or to replica set secondaries. * When connected to a mongos queries are sent to the primary of a shard if available, otherwise a shard secondary. * When connected to a replica set queries are sent to the primary if available, otherwise a secondary. :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use if the primary is not available. .. versionadded:: 2.9 """ def __init__(self, tag_sets=None): super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets) class Secondary(_ServerMode): """Secondary read preference. * When directly connected to one mongod queries are allowed to standalone servers, to a replica set primary, or to replica set secondaries. * When connected to a mongos queries are distributed among shard secondaries. An error is raised if no secondaries are available. * When connected to a replica set queries are distributed among secondaries. An error is raised if no secondaries are available. :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use with this read_preference .. versionadded:: 2.9 """ def __init__(self, tag_sets=None): super(Secondary, self).__init__(_SECONDARY, tag_sets) class SecondaryPreferred(_ServerMode): """SecondaryPreferred read preference. * When directly connected to one mongod queries are allowed to standalone servers, to a replica set primary, or to replica set secondaries. * When connected to a mongos queries are distributed among shard secondaries, or the shard primary if no secondary is available. * When connected to a replica set queries are distributed among secondaries, or the primary if no secondary is available. :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use with this read_preference .. versionadded:: 2.9 """ def __init__(self, tag_sets=None): super(SecondaryPreferred, self).__init__(_SECONDARY_PREFERRED, tag_sets) class Nearest(_ServerMode): """Nearest read preference. * When directly connected to one mongod queries are allowed to standalone servers, to a replica set primary, or to replica set secondaries. * When connected to a mongos queries are distributed among all members of a shard. * When connected to a replica set queries are distributed among all members. :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use with this read_preference .. versionadded:: 2.9 """ def __init__(self, tag_sets=None): super(Nearest, self).__init__(_NEAREST, tag_sets)
kastriothaliti/techstitution
venv/lib/python3.5/site-packages/pymongo/read_preferences.py
Python
gpl-3.0
13,084
import csv import itertools import logging import operator try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import psycopg2 from openerp.osv import orm, fields from openerp.tools.translate import _ FIELDS_RECURSION_LIMIT = 2 ERROR_PREVIEW_BYTES = 200 _logger = logging.getLogger(__name__) class ir_import(orm.TransientModel): _name = 'base_import.import' # allow imports to survive for 12h in case user is slow _transient_max_hours = 12.0 _columns = { 'res_model': fields.char('Model'), 'file': fields.binary( 'File', help="File to check and/or import, raw binary (not base64)"), 'file_name': fields.char('File Name'), 'file_type': fields.char('File Type'), } def get_fields(self, cr, uid, model, context=None, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (Odoo) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int landing: depth of recursion into o2m fields """ model_obj = self.pool[model] fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], }] fields_got = model_obj.fields_get(cr, uid, context=context) blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD] for name, field in fields_got.iteritems(): if name in blacklist: continue # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.itervalues())): continue f = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], } if field['type'] in ('many2many', 'many2one'): f['fields'] = [ dict(f, name='id', string=_("External ID")), dict(f, name='.id', string=_("Database ID")), ] elif field['type'] == 'one2many' and depth: f['fields'] = self.get_fields( cr, uid, field['relation'], context=context, depth=depth-1) if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'): f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []}) fields.append(f) # TODO: cache on model? return fields def _read_csv(self, record, options): """ Returns a CSV-parsed iterator of all empty lines in the file :throws csv.Error: if an error is detected during CSV parsing :throws UnicodeDecodeError: if ``options.encoding`` is incorrect """ csv_iterator = csv.reader( StringIO(record.file), quotechar=str(options['quoting']), delimiter=str(options['separator'])) csv_nonempty = itertools.ifilter(None, csv_iterator) # TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet encoding = options.get('encoding', 'utf-8') return itertools.imap( lambda row: [item.decode(encoding) for item in row], csv_nonempty) def _match_header(self, header, fields, options): """ Attempts to match a given header to a field of the imported model. :param str header: header name from the CSV file :param fields: :param dict options: :returns: an empty list if the header couldn't be matched, or all the fields to traverse :rtype: list(Field) """ for field in fields: # FIXME: should match all translations & original # TODO: use string distance (levenshtein? hamming?) if header.lower() == field['name'].lower() \ or header.lower() == field['string'].lower(): return [field] if '/' not in header: return [] # relational field path traversal = [] subfields = fields # Iteratively dive into fields tree for section in header.split('/'): # Strip section in case spaces are added around '/' for # readability of paths match = self._match_header(section.strip(), subfields, options) # Any match failure, exit if not match: return [] # prep subfields for next iteration within match[0] field = match[0] subfields = field['fields'] traversal.append(field) return traversal def _match_headers(self, rows, fields, options): """ Attempts to match the imported model's fields to the titles of the parsed CSV file, if the file is supposed to have headers. Will consume the first line of the ``rows`` iterator. Returns a pair of (None, None) if headers were not requested or the list of headers and a dict mapping cell indices to key paths in the ``fields`` tree :param Iterator rows: :param dict fields: :param dict options: :rtype: (None, None) | (list(str), dict(int: list(str))) """ if not options.get('headers'): return None, None headers = next(rows) return headers, dict( (index, [field['name'] for field in self._match_header(header, fields, options)] or None) for index, header in enumerate(headers) ) def parse_preview(self, cr, uid, id, options, count=10, context=None): """ Generates a preview of the uploaded files, and performs fields-matching between the import's file data and the model's columns. If the headers are not requested (not options.headers), ``matches`` and ``headers`` are both ``False``. :param id: identifier of the import :param int count: number of preview lines to generate :param options: format-specific options. CSV: {encoding, quoting, separator, headers} :type options: {str, str, str, bool} :returns: {fields, matches, headers, preview} | {error, preview} :rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str} """ (record,) = self.browse(cr, uid, [id], context=context) fields = self.get_fields(cr, uid, record.res_model, context=context) try: rows = self._read_csv(record, options) headers, matches = self._match_headers(rows, fields, options) # Match should have consumed the first row (iif headers), get # the ``count`` next rows for preview preview = list(itertools.islice(rows, count)) assert preview, "CSV file seems to have no content" return { 'fields': fields, 'matches': matches or False, 'headers': headers or False, 'preview': preview, } except Exception, e: # Due to lazy generators, UnicodeDecodeError (for # instance) may only be raised when serializing the # preview to a list in the return. _logger.debug("Error during CSV parsing preview", exc_info=True) return { 'error': str(e), # iso-8859-1 ensures decoding will always succeed, # even if it yields non-printable characters. This is # in case of UnicodeDecodeError (or csv.Error # compounded with UnicodeDecodeError) 'preview': record.file[:ERROR_PREVIEW_BYTES] .decode( 'iso-8859-1'), } def _convert_import_data(self, record, fields, options, context=None): """ Extracts the input browse_record and fields list (with ``False``-y placeholders for fields to *not* import) into a format Model.import_data can use: a fields list without holes and the precisely matching data matrix :param browse_record record: :param list(str|bool): fields :returns: (data, fields) :rtype: (list(list(str)), list(str)) :raises ValueError: in case the import data could not be converted """ # Get indices for non-empty fields indices = [index for index, field in enumerate(fields) if field] if not indices: raise ValueError(_("You must configure at least one field to import")) # If only one index, itemgetter will return an atom rather # than a 1-tuple if len(indices) == 1: mapper = lambda row: [row[indices[0]]] else: mapper = operator.itemgetter(*indices) # Get only list of actually imported fields import_fields = filter(None, fields) rows_to_import = self._read_csv(record, options) if options.get('headers'): rows_to_import = itertools.islice( rows_to_import, 1, None) data = [ row for row in itertools.imap(mapper, rows_to_import) # don't try inserting completely empty rows (e.g. from # filtering out o2m fields) if any(row) ] return data, import_fields def do(self, cr, uid, id, fields, options, dryrun=False, context=None): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: list({type, message, record}) """ cr.execute('SAVEPOINT import') (record,) = self.browse(cr, uid, [id], context=context) try: data, import_fields = self._convert_import_data( record, fields, options, context=context) except ValueError, e: return [{ 'type': 'error', 'message': unicode(e), 'record': False, }] _logger.info('importing %d rows...', len(data)) import_result = self.pool[record.res_model].load( cr, uid, import_fields, data, context=context) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: cr.execute('ROLLBACK TO SAVEPOINT import') else: cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass return import_result['messages']
Jgarcia-IAS/SAT
openerp/addons/base_import/models.py
Python
agpl-3.0
13,840
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from operator import itemgetter import time from openerp import models, api from openerp.osv import osv, orm, fields from openerp.tools.misc import attrgetter from openerp.exceptions import UserError # ------------------------------------------------------------------------- # Properties # ------------------------------------------------------------------------- TYPE2FIELD = { 'char': 'value_text', 'float': 'value_float', 'boolean': 'value_integer', 'integer': 'value_integer', 'text': 'value_text', 'binary': 'value_binary', 'many2one': 'value_reference', 'date': 'value_datetime', 'datetime': 'value_datetime', 'selection': 'value_text', } class ir_property(osv.osv): _name = 'ir.property' _columns = { 'name': fields.char('Name', select=1), 'res_id': fields.char('Resource', help="If not set, acts as a default value for new resources", select=1), 'company_id': fields.many2one('res.company', 'Company', select=1), 'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1), 'value_float' : fields.float('Value'), 'value_integer' : fields.integer('Value'), 'value_text' : fields.text('Value'), # will contain (char, text) 'value_binary' : fields.binary('Value'), 'value_reference': fields.char('Value'), 'value_datetime' : fields.datetime('Value'), 'type' : fields.selection([('char', 'Char'), ('float', 'Float'), ('boolean', 'Boolean'), ('integer', 'Integer'), ('text', 'Text'), ('binary', 'Binary'), ('many2one', 'Many2One'), ('date', 'Date'), ('datetime', 'DateTime'), ('selection', 'Selection'), ], 'Type', required=True, select=1), } _defaults = { 'type': 'many2one', } def _update_values(self, cr, uid, ids, values): value = values.pop('value', None) if not value: return values prop = None type_ = values.get('type') if not type_: if ids: prop = self.browse(cr, uid, ids[0]) type_ = prop.type else: type_ = self._defaults['type'] field = TYPE2FIELD.get(type_) if not field: raise UserError(_('Invalid type')) if field == 'value_reference': if isinstance(value, orm.BaseModel): value = '%s,%d' % (value._name, value.id) elif isinstance(value, (int, long)): field_id = values.get('fields_id') if not field_id: if not prop: raise ValueError() field_id = prop.fields_id else: field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id) value = '%s,%d' % (field_id.relation, value) values[field] = value return values def write(self, cr, uid, ids, values, context=None): return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context) def create(self, cr, uid, values, context=None): return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context) def get_by_record(self, cr, uid, record, context=None): if record.type in ('char', 'text', 'selection'): return record.value_text elif record.type == 'float': return record.value_float elif record.type == 'boolean': return bool(record.value_integer) elif record.type == 'integer': return record.value_integer elif record.type == 'binary': return record.value_binary elif record.type == 'many2one': if not record.value_reference: return False model, resource_id = record.value_reference.split(',') value = self.pool[model].browse(cr, uid, int(resource_id), context=context) return value.exists() elif record.type == 'datetime': return record.value_datetime elif record.type == 'date': if not record.value_datetime: return False return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S')) return False def get(self, cr, uid, name, model, res_id=False, context=None): domain = self._get_domain(cr, uid, name, model, context=context) if domain is not None: domain = [('res_id', '=', res_id)] + domain #make the search with company_id asc to make sure that properties specific to a company are given first nid = self.search(cr, uid, domain, limit=1, order='company_id asc', context=context) if not nid: return False record = self.browse(cr, uid, nid[0], context=context) return self.get_by_record(cr, uid, record, context=context) return False def _get_domain(self, cr, uid, prop_name, model, context=None): context = context or {} cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model)) res = cr.fetchone() if not res: return None cid = context.get('force_company') if not cid: company = self.pool.get('res.company') cid = company._company_default_get(cr, uid, model, res[0], context=context) return [('fields_id', '=', res[0]), ('company_id', 'in', [cid, False])] @api.model def get_multi(self, name, model, ids): """ Read the property field `name` for the records of model `model` with the given `ids`, and return a dictionary mapping `ids` to their corresponding value. """ if not ids: return {} domain = self._get_domain(name, model) if domain is None: return dict.fromkeys(ids, False) # retrieve the values for the given ids and the default value, too refs = {('%s,%s' % (model, id)): id for id in ids} refs[False] = False domain += [('res_id', 'in', list(refs))] # note: order by 'company_id asc' will return non-null values first props = self.search(domain, order='company_id asc') result = {} for prop in props: # for a given res_id, take the first property only id = refs.pop(prop.res_id, None) if id is not None: result[id] = self.get_by_record(prop) # set the default value to the ids that are not in result default_value = result.pop(False, False) for id in ids: result.setdefault(id, default_value) return result @api.model def set_multi(self, name, model, values): """ Assign the property field `name` for the records of model `model` with `values` (dictionary mapping record ids to their value). """ def clean(value): return value.id if isinstance(value, models.BaseModel) else value if not values: return domain = self._get_domain(name, model) if domain is None: raise Exception() # retrieve the default value for the field default_value = clean(self.get(name, model)) # retrieve the properties corresponding to the given record ids self._cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", (name, model)) field_id = self._cr.fetchone()[0] company_id = self.env['res.company']._company_default_get(model, field_id) refs = {('%s,%s' % (model, id)): id for id in values} props = self.search([ ('fields_id', '=', field_id), ('company_id', '=', company_id), ('res_id', 'in', list(refs)), ]) # modify existing properties for prop in props: id = refs.pop(prop.res_id) value = clean(values[id]) if value == default_value: prop.unlink() elif value != clean(prop.get_by_record(prop)): prop.write({'value': value}) # create new properties for records that do not have one yet for ref, id in refs.iteritems(): value = clean(values[id]) if value != default_value: self.create({ 'fields_id': field_id, 'company_id': company_id, 'res_id': ref, 'name': name, 'value': value, 'type': self.env[model]._fields[name].type, }) @api.model def search_multi(self, name, model, operator, value): """ Return a domain for the records that match the given condition. """ field = self.env[model]._fields[name] if field.type == 'many2one': comodel = field.comodel_name def makeref(value): return value and '%s,%s' % (comodel, value) if operator in ('=', '!=', '<=', '<', '>', '>='): value = makeref(value) elif operator in ('in', 'not in'): value = map(makeref, value) elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike', 'not ilike'): # most probably inefficient... but correct target = self.env[comodel] target_names = target.name_search(value, operator=operator, limit=None) target_ids = map(itemgetter(0), target_names) operator, value = 'in', map(makeref, target_ids) # retrieve the properties that match the condition domain = self._get_domain(name, model) if domain is None: raise Exception() props = self.search(domain + [(TYPE2FIELD[field.type], operator, value)]) # retrieve the records corresponding to the properties that match good_ids = [] default_matches = False for prop in props: if prop.res_id: res_model, res_id = prop.res_id.split(',') good_ids.append(int(res_id)) else: default_matches = True if default_matches: # exclude all records with a property that does not match all_ids = [] props = self.search(domain + [('res_id', '!=', False)]) for prop in props: res_model, res_id = prop.res_id.split(',') all_ids.append(int(res_id)) bad_ids = list(set(all_ids) - set(good_ids)) return [('id', 'not in', bad_ids)] else: return [('id', 'in', good_ids)]
addition-it-solutions/project-all
openerp/addons/base/res/ir_property.py
Python
agpl-3.0
12,186
# This is a component of LinuxCNC # Copyright 2011 Michael Haberler <git@mah.priv.at> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import emccanon import interpreter def failingepilog(self,*args, **words): emccanon.MESSAGE("failing_epilog returning INTERP_ERROR") self.set_errormsg("A failed Python epilog returning INTERP_ERROR") return interpreter.INTERP_ERROR
bmwiedemann/linuxcnc-mirror
tests/remap/fail/epilog/remap.py
Python
lgpl-2.1
1,063
# # Copyright 2015 Hewlett Packard # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('dns_control_exchange', default='central', help="Exchange name for DNS notifications."), ] cfg.CONF.register_opts(OPTS) SERVICE = 'dns' class DnsMetricsNotificationBase(plugin_base.NotificationBase): """Base class for DNSaaS(Designate) notifications.""" @staticmethod def get_targets(conf): """Return a sequence of oslo.messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=conf.dns_control_exchange) for topic in conf.notification_topics] class DomainExists(DnsMetricsNotificationBase): """Handles DNS domain exists notification. Emits a sample for a measurable audit interval. """ event_types = ['%s.domain.exists' % SERVICE] def process_notification(self, message): period_start = timeutils.normalize_time(timeutils.parse_isotime( message['payload']['audit_period_beginning'])) period_end = timeutils.normalize_time(timeutils.parse_isotime( message['payload']['audit_period_ending'])) period_difference = timeutils.delta_seconds(period_start, period_end) yield sample.Sample.from_notification( name=message['event_type'], type=sample.TYPE_CUMULATIVE, unit='s', volume=period_difference, resource_id=message['payload']['id'], user_id=message['_context_user'], project_id=message['payload']['tenant_id'], message=message)
cernops/ceilometer
ceilometer/dns/notifications.py
Python
apache-2.0
2,410
import json from django import VERSION from django.core.serializers.json import DjangoJSONEncoder from django.http import (HttpResponse as BaseHttpResponse, HttpResponseBadRequest as Base400) class ContentTypeMixin(object): def __init__(self, *args, **kwargs): if VERSION < (1, 5) and 'content_type' in kwargs: kwargs['mimetype'] = kwargs.pop('content_type') super(ContentTypeMixin, self).__init__(*args, **kwargs) class HttpResponse(ContentTypeMixin, BaseHttpResponse): pass class HttpResponseBadRequest(ContentTypeMixin, Base400): pass class JsonResponse(HttpResponse): # Django < 1.7 does not have JsonResponse # https://github.com/django/django/commit/024213 # https://github.com/django/django/blob/master/LICENSE def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs): if safe and not isinstance(data, dict): raise TypeError( 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False.' ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault('content_type', 'application/json') data = json.dumps(data, cls=encoder, **json_dumps_params) super(JsonResponse, self).__init__(content=data, **kwargs)
johnseekins/graphite-web
webapp/graphite/compat.py
Python
apache-2.0
1,404
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from nova.tests.virt.xenapi import stubs from nova.virt import fake from nova.virt import xenapi class XenAPIDriverTestCase(stubs.XenAPITestBase): """Unit tests for Driver operations.""" def host_stats(self, refresh=True): return {'host_memory_total': 3 * 1024 * 1024, 'host_memory_free_computed': 2 * 1024 * 1024, 'disk_total': 4 * 1024 * 1024 * 1024, 'disk_used': 5 * 1024 * 1024 * 1024, 'host_hostname': 'somename', 'supported_instances': 'x86_64', 'host_cpu_info': {'cpu_count': 50}} def test_available_resource(self): self.flags(xenapi_connection_url='test_url', xenapi_connection_password='test_pass') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) driver._session.product_version = (6, 8, 2) self.stubs.Set(driver, 'get_host_stats', self.host_stats) resources = driver.get_available_resource(None) self.assertEqual(6008002, resources['hypervisor_version']) self.assertEqual(0, resources['vcpus']) self.assertEqual(3, resources['memory_mb']) self.assertEqual(4, resources['local_gb']) self.assertEqual(0, resources['vcpus_used']) self.assertEqual(3 - 2, resources['memory_mb_used']) self.assertEqual(5, resources['local_gb_used']) self.assertEqual('xen', resources['hypervisor_type']) self.assertEqual('somename', resources['hypervisor_hostname']) self.assertEqual(50, resources['cpu_info']) def test_overhead(self): self.flags(xenapi_connection_url='test_url', xenapi_connection_password='test_pass') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) instance = {'memory_mb': 30720} # expected memory overhead per: # https://wiki.openstack.org/wiki/XenServer/Overhead expected = math.ceil(251.832) overhead = driver.estimate_instance_overhead(instance) self.assertEqual(expected, overhead['memory_mb'])
TieWei/nova
nova/tests/virt/xenapi/test_driver.py
Python
apache-2.0
2,891
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging from battor import battor_error from battor import battor_wrapper from py_utils import cloud_storage from devil.android import battery_utils from py_trace_event import trace_time from telemetry.internal.platform import tracing_agent from telemetry.internal.util import atexit_with_log from tracing.trace_data import trace_data def _ReenableChargingIfNeeded(battery): if not battery.GetCharging(): battery.SetCharging(True) logging.info('Charging status checked at exit.') class BattOrTracingAgent(tracing_agent.TracingAgent): """A tracing agent for getting power data from a BattOr device. BattOrTracingAgent allows Telemetry to issue high-level tracing commands (StartTracing, StopTracing, RecordClockSyncMarker) to BattOrs, which are high-frequency power monitors used for battery testing. """ def __init__(self, platform_backend): super(BattOrTracingAgent, self).__init__(platform_backend) self._platform_backend = platform_backend android_device = ( platform_backend.device if platform_backend.GetOSName() == 'android' else None) self._battery = ( battery_utils.BatteryUtils(platform_backend.device) if platform_backend.GetOSName() == 'android' else None) self._battor = battor_wrapper.BattOrWrapper( platform_backend.GetOSName(), android_device=android_device, serial_log_bucket=cloud_storage.TELEMETRY_OUTPUT) @classmethod def IsSupported(cls, platform_backend): """Returns True if BattOr tracing is available.""" if platform_backend.GetOSName() == 'android': # TODO(rnephew): When we pass BattOr device map into Telemetry, change # this to reflect that. return battor_wrapper.IsBattOrConnected( 'android', android_device=platform_backend.device) return battor_wrapper.IsBattOrConnected(platform_backend.GetOSName()) def StartAgentTracing(self, config, timeout): """Start tracing on the BattOr. Args: config: A TracingConfig instance. timeout: number of seconds that this tracing agent should try to start tracing until timing out. Returns: True if the tracing agent started successfully. """ if not config.enable_battor_trace: return False try: if self._battery: self._battery.SetCharging(False) atexit_with_log.Register(_ReenableChargingIfNeeded, self._battery) self._battor.StartShell() self._battor.StartTracing() return True except battor_error.BattOrError: if self._battery: self._battery.SetCharging(True) raise def StopAgentTracing(self): """Stops tracing on the BattOr.""" try: self._battor.StopTracing() finally: if self._battery: self._battery.SetCharging(True) def SupportsExplicitClockSync(self): return self._battor.SupportsExplicitClockSync() def RecordClockSyncMarker(self, sync_id, record_controller_clock_sync_marker_callback): """Records a clock sync marker in the BattOr trace. Args: sync_id: Unique id for sync event. record_controller_clock_sync_marker_callback: Function that takes a sync ID and a timestamp as arguments. This function typically will record the tracing controller clock sync marker. """ timestamp = trace_time.Now() try: self._battor.RecordClockSyncMarker(sync_id) except battor_error.BattOrError: logging.critical( 'Error while clock syncing with BattOr. Killing BattOr shell.') self._battor.KillBattOrShell() raise record_controller_clock_sync_marker_callback(sync_id, timestamp) def CollectAgentTraceData(self, trace_data_builder, timeout=None): data = self._battor.CollectTraceData(timeout=timeout) trace_data_builder.AddTraceFor(trace_data.BATTOR_TRACE_PART, data)
benschmaus/catapult
telemetry/telemetry/internal/platform/tracing_agent/battor_tracing_agent.py
Python
bsd-3-clause
4,035
"""Helpers for listening to events.""" import functools as ft from datetime import timedelta from ..core import HomeAssistant, callback from ..const import ( ATTR_NOW, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, MATCH_ALL) from ..util import dt as dt_util from ..util.async import run_callback_threadsafe # PyLint does not like the use of threaded_listener_factory # pylint: disable=invalid-name def threaded_listener_factory(async_factory): """Convert an async event helper to a threaded one.""" @ft.wraps(async_factory) def factory(*args, **kwargs): """Call async event helper safely.""" hass = args[0] if not isinstance(hass, HomeAssistant): raise TypeError('First parameter needs to be a hass instance') async_remove = run_callback_threadsafe( hass.loop, ft.partial(async_factory, *args, **kwargs)).result() def remove(): """Threadsafe removal.""" run_callback_threadsafe(hass.loop, async_remove).result() return remove return factory def async_track_state_change(hass, entity_ids, action, from_state=None, to_state=None): """Track specific state changes. entity_ids, from_state and to_state can be string or list. Use list to match multiple. Returns a function that can be called to remove the listener. Must be run within the event loop. """ from_state = _process_state_match(from_state) to_state = _process_state_match(to_state) # Ensure it is a lowercase list with entity ids we want to match on if entity_ids == MATCH_ALL: pass elif isinstance(entity_ids, str): entity_ids = (entity_ids.lower(),) else: entity_ids = tuple(entity_id.lower() for entity_id in entity_ids) @callback def state_change_listener(event): """The listener that listens for specific state changes.""" if entity_ids != MATCH_ALL and \ event.data.get('entity_id') not in entity_ids: return if event.data.get('old_state') is not None: old_state = event.data['old_state'].state else: old_state = None if event.data.get('new_state') is not None: new_state = event.data['new_state'].state else: new_state = None if _matcher(old_state, from_state) and _matcher(new_state, to_state): hass.async_run_job(action, event.data.get('entity_id'), event.data.get('old_state'), event.data.get('new_state')) return hass.bus.async_listen(EVENT_STATE_CHANGED, state_change_listener) track_state_change = threaded_listener_factory(async_track_state_change) def async_track_point_in_time(hass, action, point_in_time): """Add a listener that fires once after a spefic point in time.""" utc_point_in_time = dt_util.as_utc(point_in_time) @callback def utc_converter(utc_now): """Convert passed in UTC now to local now.""" hass.async_run_job(action, dt_util.as_local(utc_now)) return async_track_point_in_utc_time(hass, utc_converter, utc_point_in_time) track_point_in_time = threaded_listener_factory(async_track_point_in_time) def async_track_point_in_utc_time(hass, action, point_in_time): """Add a listener that fires once after a specific point in UTC time.""" # Ensure point_in_time is UTC point_in_time = dt_util.as_utc(point_in_time) @callback def point_in_time_listener(event): """Listen for matching time_changed events.""" now = event.data[ATTR_NOW] if now < point_in_time or hasattr(point_in_time_listener, 'run'): return # Set variable so that we will never run twice. # Because the event bus might have to wait till a thread comes # available to execute this listener it might occur that the # listener gets lined up twice to be executed. This will make # sure the second time it does nothing. point_in_time_listener.run = True async_unsub() hass.async_run_job(action, now) async_unsub = hass.bus.async_listen(EVENT_TIME_CHANGED, point_in_time_listener) return async_unsub track_point_in_utc_time = threaded_listener_factory( async_track_point_in_utc_time) def async_track_sunrise(hass, action, offset=None): """Add a listener that will fire a specified offset from sunrise daily.""" from homeassistant.components import sun offset = offset or timedelta() def next_rise(): """Return the next sunrise.""" next_time = sun.next_rising_utc(hass) + offset while next_time < dt_util.utcnow(): next_time = next_time + timedelta(days=1) return next_time @callback def sunrise_automation_listener(now): """Called when it's time for action.""" nonlocal remove remove = async_track_point_in_utc_time( hass, sunrise_automation_listener, next_rise()) hass.async_run_job(action) remove = async_track_point_in_utc_time( hass, sunrise_automation_listener, next_rise()) def remove_listener(): """Remove sunset listener.""" remove() return remove_listener track_sunrise = threaded_listener_factory(async_track_sunrise) def async_track_sunset(hass, action, offset=None): """Add a listener that will fire a specified offset from sunset daily.""" from homeassistant.components import sun offset = offset or timedelta() def next_set(): """Return next sunrise.""" next_time = sun.next_setting_utc(hass) + offset while next_time < dt_util.utcnow(): next_time = next_time + timedelta(days=1) return next_time @callback def sunset_automation_listener(now): """Called when it's time for action.""" nonlocal remove remove = async_track_point_in_utc_time( hass, sunset_automation_listener, next_set()) hass.async_run_job(action) remove = async_track_point_in_utc_time( hass, sunset_automation_listener, next_set()) def remove_listener(): """Remove sunset listener.""" remove() return remove_listener track_sunset = threaded_listener_factory(async_track_sunset) def async_track_utc_time_change(hass, action, year=None, month=None, day=None, hour=None, minute=None, second=None, local=False): """Add a listener that will fire if time matches a pattern.""" # We do not have to wrap the function with time pattern matching logic # if no pattern given if all(val is None for val in (year, month, day, hour, minute, second)): @callback def time_change_listener(event): """Fire every time event that comes in.""" hass.async_run_job(action, event.data[ATTR_NOW]) return hass.bus.async_listen(EVENT_TIME_CHANGED, time_change_listener) pmp = _process_time_match year, month, day = pmp(year), pmp(month), pmp(day) hour, minute, second = pmp(hour), pmp(minute), pmp(second) @callback def pattern_time_change_listener(event): """Listen for matching time_changed events.""" now = event.data[ATTR_NOW] if local: now = dt_util.as_local(now) mat = _matcher # pylint: disable=too-many-boolean-expressions if mat(now.year, year) and \ mat(now.month, month) and \ mat(now.day, day) and \ mat(now.hour, hour) and \ mat(now.minute, minute) and \ mat(now.second, second): hass.async_run_job(action, now) return hass.bus.async_listen(EVENT_TIME_CHANGED, pattern_time_change_listener) track_utc_time_change = threaded_listener_factory(async_track_utc_time_change) def async_track_time_change(hass, action, year=None, month=None, day=None, hour=None, minute=None, second=None): """Add a listener that will fire if UTC time matches a pattern.""" return async_track_utc_time_change(hass, action, year, month, day, hour, minute, second, local=True) track_time_change = threaded_listener_factory(async_track_time_change) def _process_state_match(parameter): """Wrap parameter in a tuple if it is not one and returns it.""" if parameter is None or parameter == MATCH_ALL: return MATCH_ALL elif isinstance(parameter, str) or not hasattr(parameter, '__iter__'): return (parameter,) else: return tuple(parameter) def _process_time_match(parameter): """Wrap parameter in a tuple if it is not one and returns it.""" if parameter is None or parameter == MATCH_ALL: return MATCH_ALL elif isinstance(parameter, str) and parameter.startswith('/'): return parameter elif isinstance(parameter, str) or not hasattr(parameter, '__iter__'): return (parameter,) else: return tuple(parameter) def _matcher(subject, pattern): """Return True if subject matches the pattern. Pattern is either a tuple of allowed subjects or a `MATCH_ALL`. """ if isinstance(pattern, str) and pattern.startswith('/'): try: return subject % float(pattern.lstrip('/')) == 0 except ValueError: return False return MATCH_ALL == pattern or subject in pattern
srcLurker/home-assistant
homeassistant/helpers/event.py
Python
mit
9,620
"""Base classes for cursors. These classes centralize common code. """ from vtdb import dbexceptions class BasePEP0249Cursor(object): """Cursor with common PEP0249 implementations.""" def __init__(self): self._clear_common_state() self._conn = None def callproc(self): """For PEP 0249.""" raise dbexceptions.NotSupportedError def executemany(self, sql, params_list): """For PEP 0249.""" _ = sql, params_list raise dbexceptions.NotSupportedError def nextset(self): """For PEP 0249.""" raise dbexceptions.NotSupportedError def setinputsizes(self, sizes): """For PEP 0249.""" _ = sizes def setoutputsize(self, size, column=None): """For PEP 0249.""" _ = size, column @property def rownumber(self): return self.index def __iter__(self): """For PEP 0249: To make cursors compatible to the iteration protocol.""" return self def next(self): """For PEP 0249.""" val = self.fetchone() if val is None: raise StopIteration return val def close(self): """For PEP 0249.""" raise NotImplementedError def fetchone(self): """For PEP 0249.""" raise NotImplementedError def fetchmany(self, size=None): """For PEP 0249.""" raise NotImplementedError def fetchall(self): """For PEP 0249.""" raise NotImplementedError def _clear_common_state(self): self.index = 0 @property def connection(self): if not self._conn: raise dbexceptions.ProgrammingError( 'Cannot use closed cursor %s.' % self.__class__) return self._conn class BaseListCursor(BasePEP0249Cursor): """Base cursor where results are stored as a list. Execute call should return a (results, rowcount, lastrowid, description) tuple. The fetch commands traverse self.results. """ arraysize = 1 def __init__(self): super(BaseListCursor, self).__init__() self._clear_list_state() self.effective_caller_id = None def _clear_list_state(self): self._clear_common_state() self.description = None self.lastrowid = None self.rowcount = None self.results = None def set_effective_caller_id(self, effective_caller_id): """Set the effective caller id that will be used in upcoming calls.""" self.effective_caller_id = effective_caller_id def begin(self): return self.connection.begin(self.effective_caller_id) def commit(self): return self.connection.commit() def rollback(self): return self.connection.rollback() def _check_fetch(self): if self.results is None: raise dbexceptions.ProgrammingError('Fetch called before execute.') def _handle_transaction_sql(self, sql): sql_check = sql.strip().lower() if sql_check == 'begin': self.set_effective_caller_id(self.effective_caller_id) self.begin() return True elif sql_check == 'commit': self.commit() return True elif sql_check == 'rollback': self.rollback() return True else: return False def close(self): self._clear_list_state() self._conn = None def fetchone(self): self._check_fetch() if self.index >= len(self.results): return None self.index += 1 return self.results[self.index - 1] def fetchmany(self, size=None): self._check_fetch() if self.index >= len(self.results): return [] if size is None: size = self.arraysize res = self.results[self.index:self.index + size] self.index += size return res def fetchall(self): self._check_fetch() return self.fetchmany(len(self.results) - self.index) class BaseStreamCursor(BasePEP0249Cursor): """Base cursor where results are returned as a generator. This supports large queries. An execute call returns a (generator, description) pair. The fetch functions read items from the generator until it is exhausted. """ arraysize = 1 def __init__(self): super(BaseStreamCursor, self).__init__() self._clear_stream_state() self.effective_caller_id = None def set_effective_caller_id(self, effective_caller_id): """Set the effective caller id that will be used in upcoming calls.""" self.effective_caller_id = effective_caller_id def _clear_stream_state(self): self._clear_common_state() self.description = None self.generator = None def fetchone(self): if self.description is None: raise dbexceptions.ProgrammingError('Fetch called before execute.') self.index += 1 try: return self.generator.next() except StopIteration: return None # fetchmany can be called until it returns no rows. Returning less rows # than what we asked for is also an indication we ran out, but the cursor # API in PEP249 is silent about that. def fetchmany(self, size=None): if size is None: size = self.arraysize result = [] for _ in xrange(size): row = self.fetchone() if row is None: break result.append(row) return result def fetchall(self): result = [] while True: row = self.fetchone() if row is None: break result.append(row) return result def close(self): if self.generator: self.generator.close() self._clear_stream_state() self._conn = None
danielmt/vshard
vendor/github.com/youtube/vitess/py/vtdb/base_cursor.py
Python
mit
5,310
""" This page is in the table of contents. Viewpoint move is a mouse tool to move the viewpoint in the xy plane. When the mouse is clicked and dragged on the canvas, viewpoint move will drag the scroll pane accordingly. If the shift key is also pressed, the scroll pane will be moved only in the x or y direction, whichever is largest. When the viewpoint move tool is chosen and the canvas has the focus, viewpoint move will listen to the arrow keys. Clicking in the canvas gives the canvas the focus, and when the canvas has the focus a thick black border is drawn around the canvas. When the right arrow key is pressed, viewpoint move will move the scroll pane to the right by a pixel. When the left arrow key is pressed, the scroll pane will be moved a pixel to the left. The up arrow key moves the scroll pane a pixel up and the down arow key moves the scroll pane a pixel down. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities.mouse_tool_base import MouseToolBase from fabmetheus_utilities import settings __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GPL 3.0' def getNewMouseTool(): "Get a new mouse tool." return ViewpointMove() class ViewpointMove( MouseToolBase ): "Display the line when it is clicked." def button1( self, event, shift = False ): "Print line text and connection line." self.destroyEverythingGetFocus() self.buttonOnePressedScreenCoordinate = complex( event.x, event.y ) self.scrollPaneFraction = self.window.getScrollPaneFraction() def buttonRelease1( self, event, shift = False ): "The left button was released, <ButtonRelease-1> function." self.destroyEverything() def destroyEverything(self): "Destroy items." self.buttonOnePressedScreenCoordinate = None def keyPressDown(self, event): "The down arrow was pressed." self.setScrollPaneMove( complex( 0.0, 1.0 ) ) def keyPressLeft(self, event): "The left arrow was pressed." self.setScrollPaneMove( complex( - 1.0, 0.0 ) ) def keyPressRight(self, event): "The right arrow was pressed." self.setScrollPaneMove( complex( 1.0, 0.0 ) ) def keyPressUp(self, event): "The up arrow was pressed." self.setScrollPaneMove( complex(0.0, -1.0) ) def motion( self, event, shift = False ): "The mouse moved, <Motion> function." if self.buttonOnePressedScreenCoordinate == None: return motionCoordinate = complex( event.x, event.y ) relativeMotion = motionCoordinate - self.buttonOnePressedScreenCoordinate if shift: if abs( relativeMotion.real ) > abs( relativeMotion.imag ): relativeMotion = complex( relativeMotion.real, 0.0 ) else: relativeMotion = complex( 0.0, relativeMotion.imag ) self.relativeMove( relativeMotion ) def relativeMove( self, relativeMotion ): "Move the view given the relative motion." relativeScreenMotion = complex( relativeMotion.real / float( self.window.screenSize.real ), relativeMotion.imag / float( self.window.screenSize.imag ) ) moveTo = self.scrollPaneFraction - relativeScreenMotion self.window.relayXview( settings.Tkinter.MOVETO, moveTo.real ) self.window.relayYview( settings.Tkinter.MOVETO, moveTo.imag ) def setScrollPaneMove( self, relativeMotion ): "The up arrow was pressed." self.scrollPaneFraction = self.window.getScrollPaneFraction() self.relativeMove( relativeMotion )
makerbot/ReplicatorG
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/analyze_plugins/analyze_utilities/view_move.py
Python
gpl-2.0
3,600
# -*- coding: utf-8 -*- # # This file is part of NINJA-IDE (http://ninja-ide.org). # # NINJA-IDE is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # # NINJA-IDE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>. # DISCLAIMER ABOUT READING THIS CODE: # We are not responsible for any kind of mental or emotional # damage that may arise from reading this code. import re import ast import _ast from ninja_ide.tools.logger import NinjaLogger from ninja_ide.intellisensei.analyzer import model logger = NinjaLogger('ninja_ide.tools.completion.analyzer') MAX_THRESHOLD = 3 class Analyzer(object): __mapping = { _ast.Tuple: '__builtin__.tuple', _ast.List: '__builtin__.list', _ast.ListComp: '__builtin__.list', _ast.Set: '__builtin__.set', _ast.SetComp: '__builtin__.set', _ast.Str: '__builtin__.str', _ast.Dict: '__builtin__.dict', _ast.DictComp: '__builtin__.dict', _ast.Num: '__builtin__.int', '_ast.Float': '__builtin__.float', '_ast.Bool': '__builtin__.bool', _ast.Call: model.late_resolution, _ast.Name: model.late_resolution, _ast.Attribute: model.late_resolution, } def __init__(self): self._fixed_line = -1 self.content = None # self._functions = {} def _get_valid_module(self, source, retry=0): """Try to parse the module and fix some errors if it has some.""" astModule = None try: astModule = ast.parse(source) self._fixed_line = -1 except SyntaxError as reason: line = reason.lineno - 1 if line != self._fixed_line and reason.text is not None: self._fixed_line = line new_line = '' split_source = source.splitlines() indent = re.match('^\s+', str(split_source[line])) if indent is not None: new_line = "%s%s" % (indent.group(), 'pass') split_source[line] = new_line source = '\n'.join(split_source) if retry < MAX_THRESHOLD: astModule = self._get_valid_module(source, retry + 1) return astModule def analyze(self, source, old_module=None): """Analyze the source provided and create the proper structure.""" astModule = self._get_valid_module(source) if astModule is None: return model.Module() self.content = source.split('\n') module = model.Module() for symbol in astModule.body: if symbol.__class__ is ast.Assign: assigns = self._process_assign(symbol)[0] module.add_attributes(assigns) elif symbol.__class__ in (ast.Import, ast.ImportFrom): module.add_imports(self._process_import(symbol)) elif symbol.__class__ is ast.ClassDef: module.add_class(self._process_class(symbol)) elif symbol.__class__ is ast.FunctionDef: module.add_function(self._process_function(symbol)) # elif symbol.__class__ is ast.Expr: # self._process_expression(symbol.value) if old_module is not None: self._resolve_module(module, old_module) self.content = None # self._functions = {} return module def _resolve_module(self, module, old_module): module.update_classes(old_module.classes) module.update_functions(old_module.functions) module.update_attributes(old_module.attributes) def _assign_disambiguation(self, type_name, line_content): """Provide a specific builtin for the cases were ast doesn't work.""" line = line_content.split('=') if len(line) < 2: logger.error('_assign_disambiguation, line not valid: %r' % line_content) return type_name value = line[1].strip() # TODO: We have to analyze when the assign is: x,y = 1, 2 if type_name is _ast.Num and '.' in value: type_name = '_ast.Float' elif value in ('True', 'False'): type_name = '_ast.Bool' elif value == 'None': type_name = None return type_name # def _process_expression(self, expr): # """Process expression, not assignment.""" # if expr.__class__ is not ast.Call: # return # args = expr.args # keywords = expr.keywords # ar = [] # kw = {} # for arg in args: # type_value = arg.__class__ # arg_name = '' # if type_value is ast.Call: # arg_name = expand_attribute(arg.func) # elif type_value is ast.Attribute: # arg_name = expand_attribute(arg.attr) # data_type = self.__mapping.get(type_value, model.late_resolution) # ar.append((arg_name, data_type)) # for key in keywords: # type_value = key.value.__class__ # data_type = self.__mapping.get(type_value, model.late_resolution) # kw[key.arg] = data_type # if expr.func.__class__ is ast.Attribute: # name = expand_attribute(expr.func) # else: # name = expr.func.id # self._functions[name] = (ar, kw) def _process_assign(self, symbol): """Process an ast.Assign object to extract the proper info.""" assigns = [] attributes = [] for var in symbol.targets: type_value = symbol.value.__class__ line_content = self.content[symbol.lineno - 1] if type_value in (_ast.Num, _ast.Name): type_value = self._assign_disambiguation( type_value, line_content) if type_value is None: continue data_type = self.__mapping.get(type_value, model.late_resolution) if var.__class__ == ast.Attribute: data = (var.attr, symbol.lineno, data_type, line_content, type_value) attributes.append(data) elif var.__class__ == ast.Name: data = (var.id, symbol.lineno, data_type, line_content, type_value) assigns.append(data) # if type_value is ast.Call: # self._process_expression(symbol.value) return (assigns, attributes) def _process_import(self, symbol): """Process an ast.Import and ast.ImportFrom object to extract data.""" imports = [] for imp in symbol.names: if symbol.__class__ is ast.ImportFrom: module_name = "%s.%s" % (symbol.module, imp.name) else: module_name = imp.name name = imp.asname if name is None: name = imp.name imports.append((name, module_name)) return imports def _process_class(self, symbol): """Process an ast.ClassDef object to extract data.""" clazz = model.Clazz(symbol.name) for base in symbol.bases: if base == 'object': continue name = expand_attribute(base) clazz.add_parent(name) #TODO: Decotator # for decorator in symbol.decorator_list: # clazz.decorators.append(decorator.id) # PARSE FUNCTIONS AND ATTRIBUTES for sym in symbol.body: if sym.__class__ is ast.Assign: assigns = self._process_assign(sym)[0] clazz.add_attributes(assigns) elif sym.__class__ is ast.FunctionDef: clazz.add_function(self._process_function(sym, clazz)) clazz.update_bases() clazz.update_with_parent_data() return clazz def _process_function(self, symbol, parent=None): """Process an ast.FunctionDef object to extract data.""" function = model.Function(symbol.name) #TODO: Decorators #We are not going to collect data from decorators yet. # for decorator in symbol.decorator_list: #Decorators can be: Name, Call, Attributes # function.decorators.append(decorator.id) if symbol.args.vararg is not None: assign = model.Assign(symbol.args.vararg) assign.add_data(symbol.lineno, '__builtin__.list', None, None) function.args[assign.name] = assign if symbol.args.kwarg is not None: assign = model.Assign(symbol.args.kwarg) assign.add_data(symbol.lineno, '__builtin__.dict', None, None) function.args[assign.name] = assign #We store the arguments to compare with default backwards defaults = [] for value in symbol.args.defaults: #TODO: In some cases we can have something like: a=os.path type_value = value.__class__ data_type = self.__mapping.get(type_value, None) defaults.append((data_type, type_value)) for arg in reversed(symbol.args.args): if isinstance(arg, ast.Tuple): self._parse_tuple_in_func_arg(arg, function, symbol.lineno) continue elif arg.id == 'self': continue assign = model.Assign(arg.id) data_type = (model.late_resolution, None) if defaults: data_type = defaults.pop() assign.add_data(symbol.lineno, data_type[0], None, data_type[1]) function.args[assign.name] = assign for sym in symbol.body: if sym.__class__ is ast.Assign: result = self._process_assign(sym) function.add_attributes(result[0]) if parent is not None: parent.add_attributes(result[1]) elif sym.__class__ is ast.FunctionDef: function.add_function(self._process_function(sym)) if sym.__class__ is not ast.Assign: self._search_recursive_for_types(function, sym, parent) return function def _parse_tuple_in_func_arg(self, symbol_tuple, function, lineno=0): """Parse the tuple inside a function argument call.""" for item in symbol_tuple.elts: assign = model.Assign(item.id) data_type = (model.late_resolution, None) assign.add_data(lineno, data_type[0], None, data_type[1]) function.args[assign.name] = assign def _search_recursive_for_types(self, function, symbol, parent=None): """Search for return recursively inside the function.""" if symbol.__class__ is ast.Assign: result = self._process_assign(symbol) function.add_attributes(result[0]) if parent is not None: parent.add_attributes(result[1]) elif symbol.__class__ is ast.Return: type_value = symbol.value.__class__ lineno = symbol.lineno data_type = self.__mapping.get(type_value, None) line_content = self.content[lineno - 1] if data_type != model.late_resolution: type_value = None function.add_return(lineno, data_type, line_content, type_value) elif symbol.__class__ in (ast.If, ast.For, ast.TryExcept): for sym in symbol.body: self._search_recursive_for_types(function, sym, parent) for else_item in symbol.orelse: self._search_recursive_for_types(function, else_item, parent) elif symbol.__class__ is ast.TryFinally: for sym in symbol.body: self._search_recursive_for_types(function, sym, parent) for else_item in symbol.finalbody: self._search_recursive_for_types(function, else_item, parent) # elif symbol.__class__ is ast.Expr: # self._process_expression(symbol.value) class CodeParser(ast.NodeVisitor): def analyze(self, astmodule): self.module = model.Module() self.visit(astmodule) def visit_ClassDef(self, node): self.module = self.module.create_class(node) for item in node.body: self.visit(item) self.module = self.module.close_class() return node def visit_FunctionDef(self, node): self.module.create_function(node) for item in node.body: self.visit(item) self.module = self.module.close_function() return node def visit_Name(self, node): if node.id not in self.code_names: self.code_names.append(node.id) return node def visit_Attribute(self, node): if node.attr not in self.code_names: self.code_names.append(node.attr) return node
centaurialpha/ninja-ide
ninja_ide/intellisensei/analyzer/analyzer.py
Python
gpl-3.0
13,250
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'chart_gradient04.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'column'}) chart.axis_ids = [61363712, 61365248] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({ 'values': '=Sheet1!$A$1:$A$5', 'gradient': { 'colors': ['#DDEBCF', '#9CB86E', '#156B13'], 'type': 'radial' } }) chart.add_series({'values': '=Sheet1!$B$1:$B$5'}) chart.add_series({'values': '=Sheet1!$C$1:$C$5'}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
jvrsantacruz/XlsxWriter
xlsxwriter/test/comparison/test_chart_gradient04.py
Python
bsd-2-clause
1,716
""" An example illustrating how to use the :meth:`~rdflib.graph.Graph.transitive_subjects` and :meth:`~rdflib.graph.Graph.transitive_objects` graph methods Formal definition ^^^^^^^^^^^^^^^^^^ The :meth:`~rdflib.graph.Graph.transitive_objects` method finds all nodes such that there is a path from subject to one of those nodes using only the predicate property in the triples. The :meth:`~rdflib.graph.Graph.transitive_subjects` method is similar; it finds all nodes such that there is a path from the node to the object using only the predicate property. Informal description, with an example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In brief, :meth:`~rdflib.graph.Graph.transitive_objects` walks forward in a graph using a particular property, and :meth:`~rdflib.graph.Graph.transitive_subjects` walks backward. A good example uses a property ``ex:parent``, the semantics of which are biological parentage. The :meth:`~rdflib.graph.Graph.transitive_objects` method would get all the ancestors of a particular person (all nodes such that there is a parent path between the person and the object). The :meth:`~rdflib.graph.Graph.transitive_subjects` method would get all the descendants of a particular person (all nodes such that there is a parent path between the node and the person). So, say that your URI is ``ex:person``. This example would get all of your (known) ancestors, and then get all the (known) descendants of your maternal grandmother. .. warning:: The :meth:`transitive_objects` method has the start node as the *first* argument, but the :meth:`transitive_subjects` method has the start node as the *second* argument. User-defined transitive closures ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The method :meth:`~rdflib.graph.Graph.transitiveClosure` returns transtive closures of user-defined functions. """ if __name__=='__main__': from rdflib import ConjunctiveGraph, URIRef person = URIRef('ex:person') dad = URIRef('ex:d') mom = URIRef('ex:m') momOfDad = URIRef('ex:gm0') momOfMom = URIRef('ex:gm1') dadOfDad = URIRef('ex:gf0') dadOfMom = URIRef('ex:gf1') parent = URIRef('ex:parent') g = ConjunctiveGraph() g.add((person, parent, dad)) g.add((person, parent, mom)) g.add((dad, parent, momOfDad)) g.add((dad, parent, dadOfDad)) g.add((mom, parent, momOfMom)) g.add((mom, parent, dadOfMom)) print "Parents, forward from `ex:person`:" for i in g.transitive_objects(person, parent): print i print "Parents, *backward* from `ex:gm1`:" for i in g.transitive_subjects(parent, momOfMom): print i
ssssam/rdflib
examples/transitive.py
Python
bsd-3-clause
2,627
import pymysql from pymysql.tests import base try: import unittest2 as unittest except ImportError: import unittest try: import imp reload = imp.reload except AttributeError: pass import datetime class TestOldIssues(base.PyMySQLTestCase): def test_issue_3(self): """ undefined methods datetime_or_None, date_or_None """ conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists issue3") c.execute("create table issue3 (d date, t time, dt datetime, ts timestamp)") try: c.execute("insert into issue3 (d, t, dt, ts) values (%s,%s,%s,%s)", (None, None, None, None)) c.execute("select d from issue3") self.assertEqual(None, c.fetchone()[0]) c.execute("select t from issue3") self.assertEqual(None, c.fetchone()[0]) c.execute("select dt from issue3") self.assertEqual(None, c.fetchone()[0]) c.execute("select ts from issue3") self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime)) finally: c.execute("drop table issue3") def test_issue_4(self): """ can't retrieve TIMESTAMP fields """ conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists issue4") c.execute("create table issue4 (ts timestamp)") try: c.execute("insert into issue4 (ts) values (now())") c.execute("select ts from issue4") self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime)) finally: c.execute("drop table issue4") def test_issue_5(self): """ query on information_schema.tables fails """ con = self.connections[0] cur = con.cursor() cur.execute("select * from information_schema.tables") def test_issue_6(self): """ exception: TypeError: ord() expected a character, but string of length 0 found """ # ToDo: this test requires access to db 'mysql'. kwargs = self.databases[0].copy() kwargs['db'] = "mysql" conn = pymysql.connect(**kwargs) c = conn.cursor() c.execute("select * from user") conn.close() def test_issue_8(self): """ Primary Key and Index error when selecting data """ conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists test") c.execute("""CREATE TABLE `test` (`station` int(10) NOT NULL DEFAULT '0', `dh` datetime NOT NULL DEFAULT '0000-00-00 00:00:00', `echeance` int(1) NOT NULL DEFAULT '0', `me` double DEFAULT NULL, `mo` double DEFAULT NULL, PRIMARY KEY (`station`,`dh`,`echeance`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;""") try: self.assertEqual(0, c.execute("SELECT * FROM test")) c.execute("ALTER TABLE `test` ADD INDEX `idx_station` (`station`)") self.assertEqual(0, c.execute("SELECT * FROM test")) finally: c.execute("drop table test") def test_issue_9(self): """ sets DeprecationWarning in Python 2.6 """ try: reload(pymysql) except DeprecationWarning: self.fail() def test_issue_13(self): """ can't handle large result fields """ conn = self.connections[0] cur = conn.cursor() cur.execute("drop table if exists issue13") try: cur.execute("create table issue13 (t text)") # ticket says 18k size = 18*1024 cur.execute("insert into issue13 (t) values (%s)", ("x" * size,)) cur.execute("select t from issue13") # use assertTrue so that obscenely huge error messages don't print r = cur.fetchone()[0] self.assertTrue("x" * size == r) finally: cur.execute("drop table issue13") def test_issue_15(self): """ query should be expanded before perform character encoding """ conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists issue15") c.execute("create table issue15 (t varchar(32))") try: c.execute("insert into issue15 (t) values (%s)", (u'\xe4\xf6\xfc',)) c.execute("select t from issue15") self.assertEqual(u'\xe4\xf6\xfc', c.fetchone()[0]) finally: c.execute("drop table issue15") def test_issue_16(self): """ Patch for string and tuple escaping """ conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists issue16") c.execute("create table issue16 (name varchar(32) primary key, email varchar(32))") try: c.execute("insert into issue16 (name, email) values ('pete', 'floydophone')") c.execute("select email from issue16 where name=%s", ("pete",)) self.assertEqual("floydophone", c.fetchone()[0]) finally: c.execute("drop table issue16") @unittest.skip("test_issue_17() requires a custom, legacy MySQL configuration and will not be run.") def test_issue_17(self): """ could not connect mysql use passwod """ conn = self.connections[0] host = self.databases[0]["host"] db = self.databases[0]["db"] c = conn.cursor() # grant access to a table to a user with a password try: c.execute("drop table if exists issue17") c.execute("create table issue17 (x varchar(32) primary key)") c.execute("insert into issue17 (x) values ('hello, world!')") c.execute("grant all privileges on %s.issue17 to 'issue17user'@'%%' identified by '1234'" % db) conn.commit() conn2 = pymysql.connect(host=host, user="issue17user", passwd="1234", db=db) c2 = conn2.cursor() c2.execute("select x from issue17") self.assertEqual("hello, world!", c2.fetchone()[0]) finally: c.execute("drop table issue17") class TestNewIssues(base.PyMySQLTestCase): def test_issue_34(self): try: pymysql.connect(host="localhost", port=1237, user="root") self.fail() except pymysql.OperationalError as e: self.assertEqual(2003, e.args[0]) except Exception: self.fail() def test_issue_33(self): conn = pymysql.connect(charset="utf8", **self.databases[0]) c = conn.cursor() try: c.execute(b"drop table if exists hei\xc3\x9fe".decode("utf8")) c.execute(b"create table hei\xc3\x9fe (name varchar(32))".decode("utf8")) c.execute(b"insert into hei\xc3\x9fe (name) values ('Pi\xc3\xb1ata')".decode("utf8")) c.execute(b"select name from hei\xc3\x9fe".decode("utf8")) self.assertEqual(b"Pi\xc3\xb1ata".decode("utf8"), c.fetchone()[0]) finally: c.execute(b"drop table hei\xc3\x9fe".decode("utf8")) @unittest.skip("This test requires manual intervention") def test_issue_35(self): conn = self.connections[0] c = conn.cursor() print("sudo killall -9 mysqld within the next 10 seconds") try: c.execute("select sleep(10)") self.fail() except pymysql.OperationalError as e: self.assertEqual(2013, e.args[0]) def test_issue_36(self): conn = self.connections[0] c = conn.cursor() # kill connections[0] c.execute("show processlist") kill_id = None for row in c.fetchall(): id = row[0] info = row[7] if info == "show processlist": kill_id = id break # now nuke the connection conn.kill(kill_id) # make sure this connection has broken try: c.execute("show tables") self.fail() except Exception: pass # check the process list from the other connection try: c = self.connections[1].cursor() c.execute("show processlist") ids = [row[0] for row in c.fetchall()] self.assertFalse(kill_id in ids) finally: del self.connections[0] def test_issue_37(self): conn = self.connections[0] c = conn.cursor() self.assertEqual(1, c.execute("SELECT @foo")) self.assertEqual((None,), c.fetchone()) self.assertEqual(0, c.execute("SET @foo = 'bar'")) c.execute("set @foo = 'bar'") def test_issue_38(self): conn = self.connections[0] c = conn.cursor() datum = "a" * 1024 * 1023 # reduced size for most default mysql installs try: c.execute("drop table if exists issue38") c.execute("create table issue38 (id integer, data mediumblob)") c.execute("insert into issue38 values (1, %s)", (datum,)) finally: c.execute("drop table issue38") def disabled_test_issue_54(self): conn = self.connections[0] c = conn.cursor() c.execute("drop table if exists issue54") big_sql = "select * from issue54 where " big_sql += " and ".join("%d=%d" % (i,i) for i in range(0, 100000)) try: c.execute("create table issue54 (id integer primary key)") c.execute("insert into issue54 (id) values (7)") c.execute(big_sql) self.assertEqual(7, c.fetchone()[0]) finally: c.execute("drop table issue54") class TestGitHubIssues(base.PyMySQLTestCase): def test_issue_66(self): """ 'Connection' object has no attribute 'insert_id' """ conn = self.connections[0] c = conn.cursor() self.assertEqual(0, conn.insert_id()) try: c.execute("drop table if exists issue66") c.execute("create table issue66 (id integer primary key auto_increment, x integer)") c.execute("insert into issue66 (x) values (1)") c.execute("insert into issue66 (x) values (1)") self.assertEqual(2, conn.insert_id()) finally: c.execute("drop table issue66") def test_issue_79(self): """ Duplicate field overwrites the previous one in the result of DictCursor """ conn = self.connections[0] c = conn.cursor(pymysql.cursors.DictCursor) c.execute("drop table if exists a") c.execute("drop table if exists b") c.execute("""CREATE TABLE a (id int, value int)""") c.execute("""CREATE TABLE b (id int, value int)""") a=(1,11) b=(1,22) try: c.execute("insert into a values (%s, %s)", a) c.execute("insert into b values (%s, %s)", b) c.execute("SELECT * FROM a inner join b on a.id = b.id") r = c.fetchall()[0] self.assertEqual(r['id'], 1) self.assertEqual(r['value'], 11) self.assertEqual(r['b.value'], 22) finally: c.execute("drop table a") c.execute("drop table b") def test_issue_95(self): """ Leftover trailing OK packet for "CALL my_sp" queries """ conn = self.connections[0] cur = conn.cursor() cur.execute("DROP PROCEDURE IF EXISTS `foo`") cur.execute("""CREATE PROCEDURE `foo` () BEGIN SELECT 1; END""") try: cur.execute("""CALL foo()""") cur.execute("""SELECT 1""") self.assertEqual(cur.fetchone()[0], 1) finally: cur.execute("DROP PROCEDURE IF EXISTS `foo`") def test_issue_114(self): """ autocommit is not set after reconnecting with ping() """ conn = pymysql.connect(charset="utf8", **self.databases[0]) conn.autocommit(False) c = conn.cursor() c.execute("""select @@autocommit;""") self.assertFalse(c.fetchone()[0]) conn.close() conn.ping() c.execute("""select @@autocommit;""") self.assertFalse(c.fetchone()[0]) conn.close() # Ensure autocommit() is still working conn = pymysql.connect(charset="utf8", **self.databases[0]) c = conn.cursor() c.execute("""select @@autocommit;""") self.assertFalse(c.fetchone()[0]) conn.close() conn.ping() conn.autocommit(True) c.execute("""select @@autocommit;""") self.assertTrue(c.fetchone()[0]) conn.close() def test_issue_175(self): """ The number of fields returned by server is read in wrong way """ conn = self.connections[0] cur = conn.cursor() for length in (200, 300): columns = ', '.join('c{0} integer'.format(i) for i in range(length)) sql = 'create table test_field_count ({0})'.format(columns) try: cur.execute(sql) cur.execute('select * from test_field_count') assert len(cur.description) == length finally: cur.execute('drop table if exists test_field_count') __all__ = ["TestOldIssues", "TestNewIssues", "TestGitHubIssues"] if __name__ == "__main__": import unittest unittest.main()
MonicaHsu/truvaluation
venv/lib/python2.7/site-packages/pymysql/tests/test_issues.py
Python
mit
13,281
#!/usr/bin/env python class Bitmap(object): def __init__(self): pass def __getitem__(self, item): pass def find(self, searchbox = (), tol = 0, frm = (), _type = 'default', client = None): if iterable(searchbox): if len(searchbox) != 4: raise Exception("Invalid argument") # Hier volgen gewoon de juiste calls
riwu/Simba
Projects/libmml/pymml/mml/bitmap.py
Python
gpl-3.0
402
from scapy.all import * def insert_ap(pkt): ## Done in the lfilter param # if Dot11Beacon not in pkt and Dot11ProbeResp not in pkt: # return bssid = pkt[Dot11].addr3 if bssid in aps: return p = pkt[Dot11Elt] cap = pkt.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}" "{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split('+') ssid, channel = None, None crypto = set() while isinstance(p, Dot11Elt): if p.ID == 0: ssid = p.info elif p.ID == 3: channel = ord(p.info) elif p.ID == 48: crypto.add("WPA2") elif p.ID == 221 and p.info.startswith('\x00P\xf2\x01\x01\x00'): crypto.add("WPA") p = p.payload if not crypto: if 'privacy' in cap: crypto.add("WEP") else: crypto.add("OPN") print "NEW AP: %r [%s], channed %d, %s" % (ssid, bssid, channel, ' / '.join(crypto)) aps[bssid] = (ssid, channel, crypto) aps = {} sniff(iface='mon0', prn=insert_ap, store=False, lfilter=lambda p: (Dot11Beacon in p or Dot11ProbeResp in p))
firebitsbr/infernal-twin
w-sniffer.py
Python
gpl-3.0
1,165
from __future__ import unicode_literals from frappe import _ def get_data(): return { 'heatmap': True, 'heatmap_message': _('Member Activity'), 'fieldname': 'member', 'transactions': [ { 'label': _('Membership Details'), 'items': ['Membership'] } ] }
ovresko/erpnext
erpnext/non_profit/doctype/member/member_dashboard.py
Python
gpl-3.0
277
import os import sys here = os.path.abspath(os.path.split(__file__)[0]) repo_root = os.path.abspath(os.path.join(here, os.pardir)) sys.path.insert(0, os.path.join(here)) sys.path.insert(0, os.path.join(here, "six")) sys.path.insert(0, os.path.join(here, "html5lib")) sys.path.insert(0, os.path.join(here, "wptserve")) sys.path.insert(0, os.path.join(here, "pywebsocket")) sys.path.insert(0, os.path.join(here, "third_party", "attrs", "src")) sys.path.insert(0, os.path.join(here, "third_party", "funcsigs")) sys.path.insert(0, os.path.join(here, "third_party", "pluggy")) sys.path.insert(0, os.path.join(here, "third_party", "py")) sys.path.insert(0, os.path.join(here, "third_party", "pytest")) sys.path.insert(0, os.path.join(here, "webdriver")) sys.path.insert(0, os.path.join(here, "wptrunner"))
Varentsov/servo
tests/wpt/web-platform-tests/tools/localpaths.py
Python
mpl-2.0
802
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module.""" __author__ = "keith.ray@gmail.com (Keith Ray)" import os from xml.dom import minidom, Node import gtest_test_utils import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" /> </testsuite> </testsuites> """ EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" /> </testsuite> </testsuites> """ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase): """Unit test for Google Test's XML output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, "") self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml")) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml")) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2) def _TestOutFile(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assertTrue(p.exited) self.assertEqual(0, p.exit_code) # TODO(wan@google.com): libtool causes the built test binary to be # named lt-gtest_xml_outfiles_test_ instead of # gtest_xml_outfiles_test_. To account for this possibillity, we # allow both names in the following code. We should remove this # hack when Chandler Carruth's libtool replacement tool is ready. output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assertTrue(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == "__main__": os.environ["GTEST_STACK_TRACE_DEPTH"] = "0" gtest_test_utils.Main()
fvacek/freeopcua
tests/gtest/test/gtest_xml_outfiles_test.py
Python
lgpl-3.0
5,345
#!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Opens a TCP connection to a specified server and then exits.""" import argparse import socket import sys import threading import time def main(): argp = argparse.ArgumentParser( description='Open a TCP handshake to a server') argp.add_argument('-s', '--server_host', default=None, type=str, help='Server host name or IP.') argp.add_argument('-p', '--server_port', default=0, type=int, help='Port that the server is listening on.') argp.add_argument('-t', '--timeout', default=1, type=int, help='Force process exit after this number of seconds.') args = argp.parse_args() socket.create_connection([args.server_host, args.server_port], timeout=args.timeout) if __name__ == '__main__': main()
stanley-cheung/grpc
test/cpp/naming/utils/tcp_connect.py
Python
apache-2.0
1,629
#!/usr/bin/env python # # Copyright (c) Greenplum Inc 2008. All Rights Reserved. # ''' Greenplum logging facilities. This Module contains some helper functions for setting up the python builtin logging module. Tools and libraries are expected to centralize configuration of logging through these functions. Typical usage: from gppylib import gplog logger = gplog.setup_tool_logging(EXECNAME, hostname, username, logdir) if options.verbose: gplog.enable_verbose_logging() if options.quiet: gplog.quiet_stdout_logging() logger.info("Start myTool") ... ''' import datetime import logging import os import sys #------------------------------- Public Interface -------------------------------- def get_default_logger(): """ Return the singleton default logger. If a logger has not yet been established it creates one that: - Logs output to stdout - Does not setup file logging. Typicial usage would be to call one of the setup_*_logging() functions at the beginning of a script in order to establish the exact type of logging desired, afterwhich later calls to get_default_logger() can be used to return a reference to the logger. """ global _LOGGER, _SOUT_HANDLER if _LOGGER is None: _LOGGER = logging.getLogger('default') f = _get_default_formatter() _SOUT_HANDLER = EncodingStreamHandler(sys.stdout) _SOUT_HANDLER.setFormatter(f) _LOGGER.addHandler(_SOUT_HANDLER) _LOGGER.setLevel(logging.INFO) return _LOGGER def get_unittest_logger(): """ Returns a singleton logger for use by gppylib unittests: - Does not setup stdout logging - Logs output to a file named "unittest.log" in the current directory. Much like get_default_logger, except that the default logger it creates (if one does not already exist) is different. Note: perhaps the interface for this should be cleaned up. It would be more consistent to gave a single get_default_logger() method and supply a setup_unittest_logging() function. """ global _LOGGER, _SOUT_HANDLER if _LOGGER is None: _LOGGER = logging.getLogger('default') filename="unittest.log" _set_file_logging(filename) return _LOGGER def setup_helper_tool_logging(appName,hostname,userName): """ Returns a singleton logger for use by helper tools: - Logs output to stdout - Does not log output to a file """ logger = get_default_logger() logger.name="%s:%s" % (hostname,userName) return logger def setup_hawq_tool_logging(appName,hostname,userName,logdir=None,nonuser=False): """ Returns a singleton logger for standard Greenplum tools: - Logs output to stdout - Logs output to a file, typically in ~/gpAdminLogs """ global _DEFAULT_FORMATTER global _APP_NAME_FOR_DEFAULT_FORMAT loggerName ="%s:%s" % (hostname,userName) if nonuser: appName=appName + "_" + loggerName _APP_NAME_FOR_DEFAULT_FORMAT = appName log_filename = _enable_hawqadmin_logging(appName,logdir) # # now reset the default formatter (someone may have called get_default_logger before calling setup_tool_logging) # logger = get_default_logger() logger.name = loggerName _DEFAULT_FORMATTER = None f = _get_default_formatter() _SOUT_HANDLER.setFormatter(f) _FILE_HANDLER.setFormatter(f) return logger, log_filename def setup_tool_logging(appName,hostname,userName,logdir=None,nonuser=False): """ Returns a singleton logger for standard Greenplum tools: - Logs output to stdout - Logs output to a file, typically in ~/gpAdminLogs """ global _DEFAULT_FORMATTER global _APP_NAME_FOR_DEFAULT_FORMAT loggerName ="%s:%s" % (hostname,userName) if nonuser: appName=appName + "_" + loggerName _APP_NAME_FOR_DEFAULT_FORMAT = appName _enable_gpadmin_logging(appName,logdir) # # now reset the default formatter (someone may have called get_default_logger before calling setup_tool_logging) # logger = get_default_logger() logger.name = loggerName _DEFAULT_FORMATTER = None f = _get_default_formatter() _SOUT_HANDLER.setFormatter(f) _FILE_HANDLER.setFormatter(f) return logger def enable_verbose_logging(): """ Increases the log level to be verbose. - Applies to all logging handlers (stdout/file). """ _LOGGER.setLevel(logging.DEBUG) def quiet_stdout_logging(): """ Reduce log level for stdout logging """ global _SOUT_HANDLER _SOUT_HANDLER.setLevel(logging.WARN) def very_quiet_stdout_logging(): """ Reduce log level to critical for stdout logging """ global _SOUT_HANDLER _SOUT_HANDLER.setLevel(logging.CRITICAL) def logging_is_verbose(): """ Returns true if the logging level has been set to verbose """ return _LOGGER.getEffectiveLevel() == logging.DEBUG def logging_is_quiet(): """ Returns true if the logging level has been set to quiet. """ # Todo: Currently this checks the default LOGGER, the # quiet_stdout_logging() function only sets it on the stdout # logging handler. So typical usage will never return true. return _LOGGER.getEffectiveLevel() == logging.WARN def get_logfile(): """ Returns the name of the file we are logging to, if any. """ global _FILENAME return _FILENAME def log_literal(logger, lvl, msg): """ Logs a message to a specified logger bypassing the normal formatter and writing the message exactly as passed. The intended purpose of this is for logging messages returned from remote backends that have already been formatted. """ # We assume the logger is using the two global handlers global _SOUT_HANDLER global _FILE_HANDLER # Switch to the literal formatter # # Note: the logger may or may not actually make use of both formatters, # but it is safe to always set both even if only one of them is used. f = _get_literal_formatter() _SOUT_HANDLER.setFormatter(f) _FILE_HANDLER.setFormatter(f) # Log the message logger.log(lvl, msg) # Restore default formatter f = _get_default_formatter() _SOUT_HANDLER.setFormatter(f) _FILE_HANDLER.setFormatter(f) return def get_logger_if_verbose(): if logging_is_verbose(): return get_default_logger() return None #------------------------------- Private -------------------------------- #evil global _LOGGER=None _FILENAME=None _DEFAULT_FORMATTER=None _LITERAL_FORMATTER=None _SOUT_HANDLER=None _FILE_HANDLER=None _APP_NAME_FOR_DEFAULT_FORMAT=os.path.split(sys.argv[0])[-1] def _set_file_logging(filename): """ Establishes a file output HANDLER for the default formater. NOTE: internal use only """ global _LOGGER, _SOUT_HANDLER, _FILENAME, _FILE_HANDLER _FILENAME=filename _FILE_HANDLER = EncodingFileHandler( filename, 'a') _FILE_HANDLER.setFormatter(_get_default_formatter()) _LOGGER.addHandler(_FILE_HANDLER) def _get_default_formatter(): """ Returns the default formatter, constructing it if needed. The default formatter formats things using Greenplum standard logging: <date>:<pid> <programname>:<hostname>:<username>:[LEVEL]:-message NOTE: internal use only """ global _DEFAULT_FORMATTER global _APP_NAME_FOR_DEFAULT_FORMAT if _DEFAULT_FORMATTER == None: formatStr = "%(asctime)s:%(programname)s:%(name)s-[%(levelname)-s]:-%(message)s" appName = _APP_NAME_FOR_DEFAULT_FORMAT.replace("%", "") # to make sure we don't produce a format string formatStr = formatStr.replace("%(programname)s", "%06d %s" % (os.getpid(), appName)) _DEFAULT_FORMATTER = logging.Formatter(formatStr,"%Y%m%d:%H:%M:%S") return _DEFAULT_FORMATTER def _get_literal_formatter(): """ Returns the literal formatter, constructing it if needed. The literal formatter formats the input string exactly as it was received. It is only used by the log_literal() function. NOTE: internal use only """ global _LITERAL_FORMATTER if _LITERAL_FORMATTER == None: _LITERAL_FORMATTER = logging.Formatter() return _LITERAL_FORMATTER def _enable_hawqadmin_logging(name,logdir=None): """ Sets up the file output handler for the default logger. - if logdir is not specified it uses ~/gpAdminLogs - the file is constructed as appended with "<logdir>/<name>_<date>.log" NOTE: internal use only """ global _FILE_HANDLER get_default_logger() now = datetime.date.today() if logdir is None: homeDir=os.path.expanduser("~") gpadmin_logs_dir=homeDir + "/hawqAdminLogs" else: gpadmin_logs_dir=logdir if not os.path.exists(gpadmin_logs_dir): # It looks like in ICM, the dir may exists. try: os.mkdir(gpadmin_logs_dir) except OSError, e: pass filename = "%s/%s_%s.log" % (gpadmin_logs_dir,name, now.strftime("%Y%m%d")) _set_file_logging(filename) return filename def _enable_gpadmin_logging(name,logdir=None): """ Sets up the file output handler for the default logger. - if logdir is not specified it uses ~/gpAdminLogs - the file is constructed as appended with "<logdir>/<name>_<date>.log" NOTE: internal use only """ global _FILE_HANDLER get_default_logger() now = datetime.date.today() if logdir is None: homeDir=os.path.expanduser("~") gpadmin_logs_dir=homeDir + "/hawqAdminLogs" else: gpadmin_logs_dir=logdir if not os.path.exists(gpadmin_logs_dir): # It looks like in ICM, the dir may exists. try: os.mkdir(gpadmin_logs_dir) except OSError, e: pass filename = "%s/%s_%s.log" % (gpadmin_logs_dir,name, now.strftime("%Y%m%d")) _set_file_logging(filename) class EncodingFileHandler(logging.FileHandler): """This handler makes sure that the encoding of the message is utf-8 before passing it along to the FileHandler. This will prevent encode/decode errors later on.""" def __init__(self, filename, mode='a', encoding=None, delay=0): logging.FileHandler.__init__(self, filename, mode, encoding, delay) def emit(self, record): if not isinstance(record.msg, str) and not isinstance(record.msg, unicode): record.msg = str(record.msg) if not isinstance(record.msg, unicode): record.msg = unicode(record.msg, 'utf-8') logging.FileHandler.emit(self, record) class EncodingStreamHandler(logging.StreamHandler): """This handler makes sure that the encoding of the message is utf-8 before passing it along to the StreamHandler. This will prevent encode/decode errors later on.""" def __init__(self, strm=None): logging.StreamHandler.__init__(self, strm) def emit(self, record): if not isinstance(record.msg, str) and not isinstance(record.msg, unicode): record.msg = str(record.msg) if not isinstance(record.msg, unicode): record.msg = unicode(record.msg, 'utf-8') logging.StreamHandler.emit(self, record)
hornn/interviews
tools/bin/gppylib/gplog.py
Python
apache-2.0
11,449
from collections import namedtuple import logging from pyleus.json_fields_bolt import JSONFieldsBolt log = logging.getLogger('fields_bolt') Fields = namedtuple('Fields', "url timestamp") class FieldsBolt(JSONFieldsBolt): OUTPUT_FIELDS = Fields def extract_fields(self, json_dict): log.debug(json_dict) if json_dict['request']['protocol'] != "HTTP": return None timestamp = json_dict['timestamp'] url = json_dict['request']['url'] log.debug("{0} {1}".format(url, timestamp)) return url, timestamp if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, filename='/tmp/top_urls_fields.log', filemode='a', ) FieldsBolt().run()
ecanzonieri/pyleus
examples/top_urls/top_urls/fields.py
Python
apache-2.0
753
import logging from framework.celery_tasks.handlers import enqueue_task from website import settings logger = logging.getLogger(__name__) if settings.SEARCH_ENGINE == 'elastic': import elastic_search as search_engine else: search_engine = None logger.warn('Elastic search is not set to load') def requires_search(func): def wrapped(*args, **kwargs): if search_engine is not None and not settings.RUNNING_MIGRATION: return func(*args, **kwargs) return wrapped @requires_search def search(query, index=None, doc_type=None, raw=None): index = index or settings.ELASTIC_INDEX return search_engine.search(query, index=index, doc_type=doc_type, raw=raw) @requires_search def update_node(node, index=None, bulk=False, async=True, saved_fields=None): kwargs = { 'index': index, 'bulk': bulk } if async: node_id = node._id # We need the transaction to be committed before trying to run celery tasks. # For example, when updating a Node's privacy, is_public must be True in the # database in order for method that updates the Node's elastic search document # to run correctly. if settings.USE_CELERY: enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs)) else: search_engine.update_node_async(node_id=node_id, **kwargs) else: index = index or settings.ELASTIC_INDEX return search_engine.update_node(node, **kwargs) @requires_search def bulk_update_nodes(serialize, nodes, index=None): index = index or settings.ELASTIC_INDEX search_engine.bulk_update_nodes(serialize, nodes, index=index) @requires_search def delete_node(node, index=None): index = index or settings.ELASTIC_INDEX doc_type = node.project_or_component if node.is_registration: doc_type = 'registration' elif node.is_preprint: doc_type = 'preprint' search_engine.delete_doc(node._id, node, index=index, category=doc_type) @requires_search def update_contributors_async(user_id): """Async version of update_contributors above""" if settings.USE_CELERY: enqueue_task(search_engine.update_contributors_async.s(user_id)) else: search_engine.update_contributors_async(user_id) @requires_search def update_user(user, index=None, async=True): index = index or settings.ELASTIC_INDEX if async: user_id = user.id if settings.USE_CELERY: enqueue_task(search_engine.update_user_async.s(user_id, index=index)) else: search_engine.update_user_async(user_id, index=index) else: search_engine.update_user(user, index=index) @requires_search def update_file(file_, index=None, delete=False): index = index or settings.ELASTIC_INDEX search_engine.update_file(file_, index=index, delete=delete) @requires_search def update_institution(institution, index=None): index = index or settings.ELASTIC_INDEX search_engine.update_institution(institution, index=index) @requires_search def delete_all(): search_engine.delete_all() @requires_search def delete_index(index): search_engine.delete_index(index) @requires_search def create_index(index=None): index = index or settings.ELASTIC_INDEX search_engine.create_index(index=index) @requires_search def search_contributor(query, page=0, size=10, exclude=None, current_user=None): exclude = exclude or [] result = search_engine.search_contributor(query=query, page=page, size=size, exclude=exclude, current_user=current_user) return result
aaxelb/osf.io
website/search/search.py
Python
apache-2.0
3,664
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Provides access to HDFS using the :py:class:`HdfsTarget`, a subclass of :py:class:`~luigi.target.Target`. You can configure what client by setting the "client" config under the "hdfs" section in the configuration, or using the ``--hdfs-client`` command line option. "hadoopcli" is the slowest, but should work out of the box. "snakebite" is the fastest, but requires Snakebite to be installed. Since the hdfs functionality is quite big in luigi, it's split into smaller files under ``luigi/contrib/hdfs/*.py``. But for the sake of convenience and API stability, everything is reexported under :py:mod:`luigi.contrib.hdfs`. """ # config.py from luigi.contrib.hdfs import config as hdfs_config hdfs = hdfs_config.hdfs load_hadoop_cmd = hdfs_config.load_hadoop_cmd get_configured_hadoop_version = hdfs_config.get_configured_hadoop_version get_configured_hdfs_client = hdfs_config.get_configured_hdfs_client tmppath = hdfs_config.tmppath # clients from luigi.contrib.hdfs import clients as hdfs_clients from luigi.contrib.hdfs import error as hdfs_error from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients HDFSCliError = hdfs_error.HDFSCliError call_check = hdfs_hadoopcli_clients.HdfsClient.call_check list_path = hdfs_snakebite_client.SnakebiteHdfsClient.list_path HdfsClient = hdfs_hadoopcli_clients.HdfsClient SnakebiteHdfsClient = hdfs_snakebite_client.SnakebiteHdfsClient HdfsClientCdh3 = hdfs_hadoopcli_clients.HdfsClientCdh3 HdfsClientApache1 = hdfs_hadoopcli_clients.HdfsClientApache1 create_hadoopcli_client = hdfs_hadoopcli_clients.create_hadoopcli_client get_autoconfig_client = hdfs_clients.get_autoconfig_client exists = hdfs_clients.exists rename = hdfs_clients.rename remove = hdfs_clients.remove mkdir = hdfs_clients.mkdir listdir = hdfs_clients.listdir # format.py from luigi.contrib.hdfs import format as hdfs_format HdfsReadPipe = hdfs_format.HdfsReadPipe HdfsAtomicWritePipe = hdfs_format.HdfsAtomicWritePipe HdfsAtomicWriteDirPipe = hdfs_format.HdfsAtomicWriteDirPipe PlainFormat = hdfs_format.PlainFormat PlainDirFormat = hdfs_format.PlainDirFormat Plain = hdfs_format.Plain PlainDir = hdfs_format.PlainDir CompatibleHdfsFormat = hdfs_format.CompatibleHdfsFormat # target.py from luigi.contrib.hdfs import target as hdfs_target HdfsTarget = hdfs_target.HdfsTarget
dhruvg/luigi
luigi/contrib/hdfs/__init__.py
Python
apache-2.0
2,996
# # Copyright 2012 eNovance <licensing@enovance.com> # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer import sample class InstancePollster(pollsters.BaseComputePollster): @staticmethod def get_samples(manager, cache, resources): for instance in resources: yield util.make_sample_from_instance( instance, name='instance', type=sample.TYPE_GAUGE, unit='instance', volume=1, )
eayunstack/ceilometer
ceilometer/compute/pollsters/instance.py
Python
apache-2.0
1,129
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
panmari/tensorflow
tensorflow/contrib/skflow/python/skflow/tests/__init__.py
Python
apache-2.0
620
import logging from optparse import make_option from django.core.management.base import BaseCommand, CommandError from apps.bigbang import utils class Command(BaseCommand): args = '/path/to/file1.csv /path/to/file2.csv ...' help = 'For creating product catalogues based on a CSV file' option_list = BaseCommand.option_list + ( make_option('--class', dest='product_class', help='Product class'),) def handle(self, *args, **options): logger = self._get_logger() if not args: raise CommandError("Please select a CSV file to import") product_class = options['product_class'] if not product_class: raise CommandError("Please specify a product class name") logger.info("Starting %s catalogue import", product_class) importer = utils.Importer(logger) for file_path in args: logger.info(" - Importing records from '%s'" % file_path) importer.handle(product_class, file_path) def _get_logger(self): logger = logging.getLogger(__file__) stream = logging.StreamHandler(self.stdout) logger.addHandler(stream) logger.setLevel(logging.DEBUG) return logger
elliotthill/django-oscar
sites/demo/apps/bigbang/management/commands/create_products.py
Python
bsd-3-clause
1,243
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import csv import json import logging import os import subprocess import tempfile import threading from collections import namedtuple from core.perfetto_binary_roller import binary_deps_manager from py_utils import tempfile_ext from tracing.value import histogram_set from tracing.value.histogram import Histogram TP_BINARY_NAME = 'trace_processor_shell' EXPORT_JSON_QUERY_TEMPLATE = 'select export_json(%s)\n' METRICS_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'metrics')) POWER_PROFILE_SQL = 'power_profile.sql' MetricFiles = namedtuple('MetricFiles', ('sql', 'proto', 'internal_metric')) class InvalidTraceProcessorOutput(Exception): pass # These will be set to respective paths once the files have been fetched # to avoid downloading several times during one Results Processor run. _fetched_trace_processor = None _fetched_power_profile = None _fetch_lock = threading.Lock() def _SqlString(s): """Produce a valid SQL string constant.""" return "'%s'" % s.replace("'", "''") def _EnsureTraceProcessor(trace_processor_path): global _fetched_trace_processor if trace_processor_path is None: with _fetch_lock: if not _fetched_trace_processor: _fetched_trace_processor = binary_deps_manager.FetchHostBinary( TP_BINARY_NAME) logging.info('Trace processor binary downloaded to %s', _fetched_trace_processor) trace_processor_path = _fetched_trace_processor if not os.path.isfile(trace_processor_path): raise RuntimeError("Can't find trace processor executable at %s" % trace_processor_path) return trace_processor_path def _EnsurePowerProfile(): global _fetched_power_profile with _fetch_lock: if not _fetched_power_profile: _fetched_power_profile = binary_deps_manager.FetchDataFile( POWER_PROFILE_SQL) logging.info('Device power profiles downloaded to %s', _fetched_power_profile) return _fetched_power_profile def _RunTraceProcessor(*args): """Run trace processor shell with given command line arguments.""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if p.returncode == 0: return stdout else: raise RuntimeError( 'Running trace processor failed. Command line:\n%s\nStderr:\n%s\n' % (' '.join(args), stderr)) def _CreateMetricFiles(metric_name): # Currently assuming all metric files live in tbmv3/metrics directory unless # the metrics are compiled into trace processor. We will revise this decision # later. sql_file = os.path.join(METRICS_PATH, metric_name + '.sql') proto_file = os.path.join(METRICS_PATH, metric_name + '.proto') internal_metric = False if not (os.path.isfile(sql_file) and os.path.isfile(proto_file)): # Metric files not found - metric may be compiled into trace processor. internal_metric = True return MetricFiles(sql=sql_file, proto=proto_file, internal_metric=internal_metric) def _ScopedHistogramName(metric_name, histogram_name): """Returns scoped histogram name by preprending metric name. This is useful for avoiding histogram name collision. The '_metric' suffix of the metric name is dropped from scoped name. Example: _ScopedHistogramName("console_error_metric", "js_errors") => "console_error::js_errors" """ metric_suffix = '_metric' suffix_length = len(metric_suffix) # TODO(crbug.com/1012687): Decide on whether metrics should always have # '_metric' suffix. if metric_name[-suffix_length:] == metric_suffix: scope = metric_name[:-suffix_length] else: scope = metric_name return '::'.join([scope, histogram_name]) class ProtoFieldInfo(object): def __init__(self, name, parent, repeated, field_options): self.name = name self.parent = parent self.repeated = repeated self.field_options = field_options @property def path_from_root(self): if self.parent is None: return [self] else: return self.parent.path_from_root + [self] def __repr__(self): return 'ProtoFieldInfo("%s", repeated=%s)' %(self.name, self.repeated) def _LeafFieldAnnotations(annotations, parent=None): """Yields leaf fields in the annotations tree, yielding a proto field info each time. Given the following annotation: __annotations: { a: { __field_options: { }, b: { __field_options: { unit: "count" }, __repeated: True }, c: { __field_options: { unit: "ms" } } } } It yields: ProtoFieldInfo(name="b", parent=FieldInfoForA, repeated=True, field_options={unit: "count"}) ProtoFieldInfo(name="c", parent=FieldInfoForA, repeated=False, field_options={unit: "ms"}) """ for (name, field_value) in annotations.items(): if name[:2] == "__": continue # internal fields. current_field = ProtoFieldInfo( name=name, parent=parent, repeated=field_value.get('__repeated', False), field_options=field_value.get('__field_options', {})) has_no_descendants = True for descendant in _LeafFieldAnnotations(field_value, current_field): has_no_descendants = False yield descendant if has_no_descendants: yield current_field def _PluckField(json_dict, field_path): """Returns the values of fields matching field_path from json dict. Field path is a sequence of ProtoFieldInfo starting from the root dict. Arrays are flattened along the way. For exampe, consider the following json dict: { a: { b: [ { c: 24 }, { c: 25 } ], d: 42, } } Field path (a, d) returns [42]. Field_path (a, b, c) returns [24, 25]. """ if len(field_path) == 0: return [json_dict] path_head = field_path[0] path_tail = field_path[1:] if path_head.repeated: field_values = json_dict[path_head.name] if not isinstance(field_values, list): raise InvalidTraceProcessorOutput( "Field marked as repeated but json value is not list") output = [] for field_value in field_values: output.extend(_PluckField(field_value, path_tail)) return output else: field_value = json_dict[path_head.name] if isinstance(field_value, list): raise InvalidTraceProcessorOutput( "Field not marked as repeated but json value is list") return _PluckField(field_value, path_tail) def RunQuery(trace_processor_path, trace_file, sql_command): """Run SQL query on trace using trace processor and return result. Args: trace_processor_path: path to the trace_processor executable. trace_file: path to the trace file. sql_command: string SQL command Returns: SQL query output table when executed on the proto trace as a list of dictionaries. Each item in the list represents a row in the output table. All values in the dictionary are represented as strings. Null is represented as None. Booleans are represented as '0' and '1'. Empty queries or rows return []. For example, for a SQL output table that looks like this: | "string_col" | "long_col" | "double_col" | "bool_col" | "maybe_null_col" | "StringVal1" | 123 | 12.34 | true | "[NULL]" | "StringVal2" | 124 | 34.56 | false | 25 | "StringVal3" | 125 | 68.92 | false | "[NULL]" The list of dictionaries result will look like this: [{ 'string_col': 'StringVal1', 'long_col': '123', 'double_col': '12.34', 'bool_col': '1', 'maybe_null_col': None, }, { 'string_col': 'StringVal2', 'long_col': '124', 'double_col': '34.56', 'bool_col': '0', 'maybe_null_col': '25', }, { 'string_col': 'StringVal3', 'long_col': '125', 'double_col': '68.92', 'bool_col': '0', 'maybe_null_col': None, }] """ trace_processor_path = _EnsureTraceProcessor(trace_processor_path) # Write query to temporary file because trace processor accepts # SQL query in a file. tp_output = None with tempfile_ext.NamedTemporaryFile(mode="w+") as sql_file: sql_file.write(sql_command) sql_file.close() # Run Trace Processor command_args = [ trace_processor_path, '--query-file', sql_file.name, trace_file, ] tp_output = _RunTraceProcessor(*command_args) # Trace Processor returns output string in csv format. Write # string to temporary file because reader accepts csv files. # Parse csv file into list of dictionaries because DictReader # object inconveniently requires open csv file to access data. csv_output = [] # tempfile creates and opens the file with tempfile.NamedTemporaryFile(mode="w+") as csv_file: csv_file.write(tp_output) csv_file.flush() csv_file.seek(0) csv_reader = csv.DictReader(csv_file) for row in csv_reader: # CSV file represents null values as the string '[NULL]'. # Parse these null values to None type. row_parsed = dict(row) for key, val in row_parsed.items(): if val == '[NULL]': row_parsed[key] = None csv_output.append(row_parsed) return csv_output def RunMetrics(trace_processor_path, trace_file, metric_names, fetch_power_profile=False, retain_all_samples=False): """Run TBMv3 metrics using trace processor. Args: trace_processor_path: path to the trace_processor executable. trace_file: path to the trace file. metric_names: a list of metric names (the corresponding files must exist in tbmv3/metrics directory). Returns: A HistogramSet with metric results. """ trace_processor_path = _EnsureTraceProcessor(trace_processor_path) metric_name_args = [] for metric_name in metric_names: metric_files = _CreateMetricFiles(metric_name) if metric_files.internal_metric: metric_name_args.append(metric_name) else: metric_name_args.append(metric_files.sql) command_args = [ trace_processor_path, '--run-metrics', ','.join(metric_name_args), '--metrics-output', 'json', trace_file, ] if fetch_power_profile: command_args[1:1] = ['--pre-metrics', _EnsurePowerProfile()] output = _RunTraceProcessor(*command_args) measurements = json.loads(output) histograms = histogram_set.HistogramSet() root_annotations = measurements.get('__annotations', {}) for metric_name in metric_names: full_metric_name = 'perfetto.protos.' + metric_name annotations = root_annotations.get(full_metric_name, None) metric_proto = measurements.get(full_metric_name, None) if metric_proto is None: logging.warn("Metric not found in the output: %s", metric_name) continue elif annotations is None: logging.info("Skipping metric %s because it has no field with unit.", metric_name) continue for field in _LeafFieldAnnotations(annotations): unit = field.field_options.get('unit', None) if unit is None: logging.debug('Skipping field %s to histograms because it has no unit', field.name) continue histogram_name = ':'.join([field.name for field in field.path_from_root]) samples = _PluckField(metric_proto, field.path_from_root) scoped_histogram_name = _ScopedHistogramName(metric_name, histogram_name) hist = Histogram(scoped_histogram_name, unit) if retain_all_samples: hist.max_num_sample_values = float('inf') for sample in samples: hist.AddSample(sample) histograms.AddHistogram(hist) return histograms def RunMetric(trace_processor_path, trace_file, metric_name, fetch_power_profile=False, retain_all_samples=False): return RunMetrics(trace_processor_path, trace_file, [metric_name], fetch_power_profile=fetch_power_profile, retain_all_samples=retain_all_samples) def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path): """Convert proto trace to json using trace processor. Args: trace_processor_path: path to the trace_processor executable. proto_file: path to the proto trace file. json_path: path to the output file. Returns: Output path. """ trace_processor_path = _EnsureTraceProcessor(trace_processor_path) with tempfile_ext.NamedTemporaryFile(mode='w+') as query_file: query_file.write(EXPORT_JSON_QUERY_TEMPLATE % _SqlString(json_path)) query_file.close() _RunTraceProcessor( trace_processor_path, '-q', query_file.name, proto_file, ) return json_path
scheib/chromium
tools/perf/core/tbmv3/trace_processor.py
Python
bsd-3-clause
13,087
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv from tools.translate import _ import netsvc class sale_make_invoice(osv.osv_memory): _name = "sale.make.invoice" _description = "Sales Make Invoice" _columns = { 'grouped': fields.boolean('Group the invoices', help='Check the box to group the invoices for the same customers'), 'invoice_date': fields.date('Invoice Date'), } _defaults = { 'grouped': False } def view_init(self, cr, uid, fields_list, context=None): if context is None: context = {} record_id = context and context.get('active_id', False) order = self.pool.get('sale.order').browse(cr, uid, record_id, context=context) if order.state == 'draft': raise osv.except_osv(_('Warning !'),'You can not create invoice when sales order is not confirmed.') return False def make_invoices(self, cr, uid, ids, context=None): order_obj = self.pool.get('sale.order') mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') newinv = [] if context is None: context = {} data = self.read(cr, uid, ids)[0] order_obj.action_invoice_create(cr, uid, context.get(('active_ids'), []), data['grouped'], date_inv = data['invoice_date']) wf_service = netsvc.LocalService("workflow") for id in context.get(('active_ids'), []): wf_service.trg_validate(uid, 'sale.order', id, 'manual_invoice', cr) for o in order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context): for i in o.invoice_ids: newinv.append(i.id) result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context=context)[0] result['domain'] = "[('id','in', ["+','.join(map(str,newinv))+"])]" return result sale_make_invoice() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
daniel2101/fleosa
sale/wizard/sale_make_invoice.py
Python
gpl-3.0
3,022
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2017, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """TMRegion unit tests.""" import tempfile import unittest try: import capnp except ImportError: capnp = None import numpy as np from nupic.regions.tm_region import TMRegion if capnp: from nupic.regions.tm_region_capnp import TMRegionProto class TMRegionTest(unittest.TestCase): def checkTMRegionImpl(self, impl): output1 = { "bottomUpOut": np.zeros((40,)), "topDownOut": np.zeros((10,)), "activeCells": np.zeros((40,)), "predictedActiveCells": np.zeros((40,)), "anomalyScore": np.zeros((1,)), "lrnActiveStateT": np.zeros((40,)), } output2 = { "bottomUpOut": np.zeros((40,)), "topDownOut": np.zeros((10,)), "activeCells": np.zeros((40,)), "predictedActiveCells": np.zeros((40,)), "anomalyScore": np.zeros((1,)), "lrnActiveStateT": np.zeros((40,)), } a = np.zeros(10, dtype="int32") a[[1, 3, 7]] = 1 b = np.zeros(10, dtype="int32") b[[2, 4, 8]] = 1 inputA = { "bottomUpIn": a, "resetIn": np.zeros(1), "sequenceIdIn": np.zeros(1), } inputB = { "bottomUpIn": b, "resetIn": np.zeros(1), "sequenceIdIn": np.zeros(1), } region1 = TMRegion(10, 10, 4, temporalImp=impl) region1.initialize() region1.compute(inputA, output1) proto1 = TMRegionProto.new_message() region1.writeToProto(proto1) with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = TMRegionProto.read(f) region2 = TMRegion.readFromProto(proto2) region1.compute(inputB, output1) region2.compute(inputB, output2) self.assertTrue(np.array_equal(output1["bottomUpOut"], output2["bottomUpOut"])) self.assertTrue(np.array_equal(output1["topDownOut"], output2["topDownOut"])) self.assertTrue(np.array_equal(output1["activeCells"], output2["activeCells"])) self.assertTrue(np.array_equal(output1["predictedActiveCells"], output2["predictedActiveCells"])) self.assertTrue(np.array_equal(output1["anomalyScore"], output2["anomalyScore"])) self.assertTrue(np.array_equal(output1["lrnActiveStateT"], output2["lrnActiveStateT"])) @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteReadPy(self): self.checkTMRegionImpl("py") @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteReadCpp(self): self.checkTMRegionImpl("cpp") @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteReadTMPy(self): self.checkTMRegionImpl("tm_py") @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteReadTMCpp(self): self.checkTMRegionImpl("tm_cpp") if __name__ == "__main__": unittest.main()
subutai/nupic
tests/unit/nupic/regions/tm_region_test.py
Python
agpl-3.0
4,021
# Copyright 2013, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. """A Vitess keyspace represents a sharded MySQL database.""" import struct from vtdb import keyrange_constants pack_keyspace_id = struct.Struct('!Q').pack class Keyspace(object): """Represent the SrvKeyspace object from the toposerver. Provide functions to extract sharding information from the same. """ # load this object from a SrvKeyspace object generated by vt def __init__(self, name, data): self.name = name self.partitions = data.get('Partitions', {}) self.sharding_col_name = data.get('ShardingColumnName', '') self.sharding_col_type = data.get( 'ShardingColumnType', keyrange_constants.KIT_UNSET) self.served_from = data.get('ServedFrom', None) def get_shards(self, db_type): if not db_type: raise ValueError('db_type is not set') try: return self.partitions[db_type]['ShardReferences'] except KeyError: return [] def get_shard_count(self, db_type): if not db_type: raise ValueError('db_type is not set') shards = self.get_shards(db_type) return len(shards) def get_shard_names(self, db_type): if not db_type: raise ValueError('db_type is not set') shards = self.get_shards(db_type) return [shard['Name'] for shard in shards] def keyspace_id_to_shard_name_for_db_type(self, keyspace_id, db_type): """Finds the shard for a keyspace_id. WARNING: this only works for KIT_UINT64 keyspace ids. Args: keyspace_id: A uint64 keyspace_id. db_type: Str tablet type (master, rdonly, or replica). Returns: Shard name. Raises: ValueError: On invalid keyspace_id. """ if not keyspace_id: raise ValueError('keyspace_id is not set') if not db_type: raise ValueError('db_type is not set') # Pack this into big-endian and do a byte-wise comparison. pkid = pack_keyspace_id(keyspace_id) shards = self.get_shards(db_type) for shard in shards: if 'KeyRange' not in shard or not shard['KeyRange']: # this keyrange is covering the full space return shard['Name'] if _shard_contain_kid(pkid, shard['KeyRange']['Start'], shard['KeyRange']['End']): return shard['Name'] raise ValueError( 'cannot find shard for keyspace_id %s in %s' % (keyspace_id, shards)) def _shard_contain_kid(pkid, start, end): return start <= pkid and (end == keyrange_constants.MAX_KEY or pkid < end)
theskyinflames/bpulse-go-client
vendor/github.com/youtube/vitess/py/vtdb/keyspace.py
Python
apache-2.0
2,633
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ from __future__ import print_function import io import os import platform import sys from distutils.command.build_ext import build_ext from shutil import copytree, copy, rmtree from setuptools import setup, Extension if sys.version_info < (3, 6): print("Python versions prior to 3.6 are not supported for PyFlink.", file=sys.stderr) sys.exit(-1) def remove_if_exists(file_path): if os.path.exists(file_path): if os.path.islink(file_path) or os.path.isfile(file_path): os.remove(file_path) else: assert os.path.isdir(file_path) rmtree(file_path) def copy_files(src_paths, output_directory): for src_path, file_mode in src_paths: if os.path.isdir(src_path): child_files = os.listdir(src_path) for child_file in child_files: dst_path = copy(os.path.join(src_path, child_file), output_directory) os.chmod(dst_path, file_mode) else: dst_path = copy(src_path, os.path.join(output_directory, os.path.basename(src_path))) os.chmod(dst_path, file_mode) def has_unsupported_tag(file_element): unsupported_tags = ['includes', 'exclueds'] for unsupported_tag in unsupported_tags: if file_element.getElementsByTagName(unsupported_tag): print('Unsupported <{0}></{1}> tag'.format(unsupported_tag, unsupported_tag)) return True return False def extracted_output_files(base_dir, file_path, output_directory): extracted_file_paths = [] from xml.dom.minidom import parse dom = parse(file_path) root_data = dom.documentElement file_elements = (root_data.getElementsByTagName("files")[0]).getElementsByTagName("file") # extracted <files><file></file></files> for file_element in file_elements: source = ((file_element.getElementsByTagName('source')[0]).childNodes[0]).data file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8) try: dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data if dst == output_directory: if has_unsupported_tag(file_element): sys.exit(-1) extracted_file_paths.append((os.path.join(base_dir, source), file_mode)) except IndexError: pass # extracted <fileSets><fileSet></fileSet></fileSets> file_elements = (root_data.getElementsByTagName("fileSets")[0]).getElementsByTagName("fileSet") for file_element in file_elements: source = ((file_element.getElementsByTagName('directory')[0]).childNodes[0]).data file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8) try: dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data if dst == output_directory: if has_unsupported_tag(file_element): sys.exit(-1) extracted_file_paths.append((os.path.join(base_dir, source), file_mode)) except IndexError: pass return extracted_file_paths # Currently Cython optimizing doesn't support Windows. if platform.system() == 'Windows': extensions = ([]) else: try: from Cython.Build import cythonize extensions = cythonize([ Extension( name="pyflink.fn_execution.coder_impl_fast", sources=["pyflink/fn_execution/coder_impl_fast.pyx"], include_dirs=["pyflink/fn_execution/"]), Extension( name="pyflink.fn_execution.table.aggregate_fast", sources=["pyflink/fn_execution/table/aggregate_fast.pyx"], include_dirs=["pyflink/fn_execution/table/"]), Extension( name="pyflink.fn_execution.table.window_aggregate_fast", sources=["pyflink/fn_execution/table/window_aggregate_fast.pyx"], include_dirs=["pyflink/fn_execution/table/"]), Extension( name="pyflink.fn_execution.stream_fast", sources=["pyflink/fn_execution/stream_fast.pyx"], include_dirs=["pyflink/fn_execution/"]), Extension( name="pyflink.fn_execution.beam.beam_stream_fast", sources=["pyflink/fn_execution/beam/beam_stream_fast.pyx"], include_dirs=["pyflink/fn_execution/beam"]), Extension( name="pyflink.fn_execution.beam.beam_coder_impl_fast", sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.pyx"], include_dirs=["pyflink/fn_execution/beam"]), Extension( name="pyflink.fn_execution.beam.beam_operations_fast", sources=["pyflink/fn_execution/beam/beam_operations_fast.pyx"], include_dirs=["pyflink/fn_execution/beam"]), ]) except ImportError: if os.path.exists("pyflink/fn_execution/coder_impl_fast.c"): extensions = ([ Extension( name="pyflink.fn_execution.coder_impl_fast", sources=["pyflink/fn_execution/coder_impl_fast.c"], include_dirs=["pyflink/fn_execution/"]), Extension( name="pyflink.fn_execution.table.aggregate_fast", sources=["pyflink/fn_execution/table/aggregate_fast.c"], include_dirs=["pyflink/fn_execution/table/"]), Extension( name="pyflink.fn_execution.table.window_aggregate_fast", sources=["pyflink/fn_execution/table/window_aggregate_fast.c"], include_dirs=["pyflink/fn_execution/table/"]), Extension( name="pyflink.fn_execution.stream_fast", sources=["pyflink/fn_execution/stream_fast.c"], include_dirs=["pyflink/fn_execution/"]), Extension( name="pyflink.fn_execution.beam.beam_stream_fast", sources=["pyflink/fn_execution/beam/beam_stream_fast.c"], include_dirs=["pyflink/fn_execution/beam"]), Extension( name="pyflink.fn_execution.beam.beam_coder_impl_fast", sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.c"], include_dirs=["pyflink/fn_execution/beam"]), Extension( name="pyflink.fn_execution.beam.beam_operations_fast", sources=["pyflink/fn_execution/beam/beam_operations_fast.c"], include_dirs=["pyflink/fn_execution/beam"]), ]) else: extensions = ([]) this_directory = os.path.abspath(os.path.dirname(__file__)) version_file = os.path.join(this_directory, 'pyflink/version.py') try: exec(open(version_file).read()) except IOError: print("Failed to load PyFlink version file for packaging. " + "'%s' not found!" % version_file, file=sys.stderr) sys.exit(-1) VERSION = __version__ # noqa with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f: long_description = f.read() TEMP_PATH = "deps" CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf") LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log") EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples") SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin") LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE") README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt") PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh" PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat" in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/" "ExecutionEnvironment.java") try: if in_flink_source: try: os.mkdir(TEMP_PATH) except: print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH), file=sys.stderr) sys.exit(-1) flink_version = VERSION.replace(".dev0", "-SNAPSHOT") FLINK_HOME = os.path.abspath( "../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version)) FLINK_ROOT = os.path.abspath("..") FLINK_DIST = os.path.join(FLINK_ROOT, "flink-dist") FLINK_BIN = os.path.join(FLINK_DIST, "src/main/flink-bin") EXAMPLES_PATH = os.path.join(this_directory, "pyflink/examples") LICENSE_FILE_PATH = os.path.join(FLINK_ROOT, "LICENSE") README_FILE_PATH = os.path.join(FLINK_BIN, "README.txt") FLINK_BIN_XML_FILE = os.path.join(FLINK_BIN, '../assemblies/bin.xml') # copy conf files os.mkdir(CONF_TEMP_PATH) conf_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'conf') copy_files(conf_paths, CONF_TEMP_PATH) # copy bin files os.mkdir(SCRIPTS_TEMP_PATH) script_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'bin') copy_files(script_paths, SCRIPTS_TEMP_PATH) copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_SH), os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH)) copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_BAT), os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT)) try: os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH) os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH) os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH) except BaseException: # pylint: disable=broad-except copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH) copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH) copy(README_FILE_PATH, README_FILE_TEMP_PATH) os.mkdir(LOG_TEMP_PATH) with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f: f.write("This file is used to force setuptools to include the log directory. " "You can delete it at any time after installation.") else: if not os.path.isdir(SCRIPTS_TEMP_PATH): print("The flink core files are not found. Please make sure your installation package " "is complete, or do this in the flink-python directory of the flink source " "directory.") sys.exit(-1) if VERSION.find('dev0') != -1: apache_flink_libraries_dependency = 'apache-flink-libraries==%s' % VERSION else: split_versions = VERSION.split('.') split_versions[-1] = str(int(split_versions[-1]) + 1) NEXT_VERSION = '.'.join(split_versions) apache_flink_libraries_dependency = 'apache-flink-libraries>=%s,<%s' % \ (VERSION, NEXT_VERSION) script_names = ["pyflink-shell.sh", "find-flink-home.sh"] scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names] scripts.append("pyflink/find_flink_home.py") PACKAGES = ['pyflink', 'pyflink.table', 'pyflink.util', 'pyflink.datastream', 'pyflink.common', 'pyflink.fn_execution', 'pyflink.fn_execution.beam', 'pyflink.fn_execution.datastream', 'pyflink.fn_execution.datastream.window', 'pyflink.fn_execution.table', 'pyflink.fn_execution.utils', 'pyflink.metrics', 'pyflink.conf', 'pyflink.log', 'pyflink.examples', 'pyflink.bin'] PACKAGE_DIR = { 'pyflink.conf': TEMP_PATH + '/conf', 'pyflink.log': TEMP_PATH + '/log', 'pyflink.examples': TEMP_PATH + '/examples', 'pyflink.bin': TEMP_PATH + '/bin'} PACKAGE_DATA = { 'pyflink': ['README.txt'], 'pyflink.conf': ['*'], 'pyflink.log': ['*'], 'pyflink.examples': ['*.py', '*/*.py'], 'pyflink.bin': ['*']} setup( name='apache-flink', version=VERSION, packages=PACKAGES, include_package_data=True, package_dir=PACKAGE_DIR, package_data=PACKAGE_DATA, scripts=scripts, url='https://flink.apache.org', license='https://www.apache.org/licenses/LICENSE-2.0', author='Apache Software Foundation', author_email='dev@flink.apache.org', python_requires='>=3.6', install_requires=['py4j==0.10.9.3', 'python-dateutil==2.8.0', 'apache-beam==2.27.0', 'cloudpickle==1.2.2', 'avro-python3>=1.8.1,!=1.9.2,<1.10.0', 'pandas>=1.0,<1.2.0', 'pyarrow>=0.15.1,<3.0.0', 'pytz>=2018.3', 'numpy>=1.14.3,<1.20', 'fastavro>=0.21.4,<0.24', 'requests>=2.26.0', 'protobuf<3.18', 'pemja==0.1.2;python_full_version >= "3.7"', apache_flink_libraries_dependency], cmdclass={'build_ext': build_ext}, tests_require=['pytest==4.4.1'], description='Apache Flink Python API', long_description=long_description, long_description_content_type='text/markdown', zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8'], ext_modules=extensions ) finally: if in_flink_source: remove_if_exists(TEMP_PATH) remove_if_exists(LICENSE_FILE_TEMP_PATH) remove_if_exists(README_FILE_TEMP_PATH)
apache/flink
flink-python/setup.py
Python
apache-2.0
14,871
import traceback from django.db import connection from django.http import HttpResponseNotAllowed from django.template import RequestContext from django.template import loader from django.middleware.locale import LocaleMiddleware from django.utils.translation.trans_real import parse_accept_lang_header class ExceptionLoggingMiddleware(object): def process_exception(self, request, exception): print(traceback.format_exc()) class HTTPResponseNotAllowedMiddleware(object): def process_response(self, request, response): if isinstance(response, HttpResponseNotAllowed): context = RequestContext(request) response.content = loader.render_to_string( "405.html", context_instance=context) return response class LocaleMiddlewareWithTweaks(LocaleMiddleware): """ Overrides LocaleMiddleware from django with: Khmer `km` language code in Accept-Language is rewritten to km-kh """ def process_request(self, request): accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') try: codes = [code for code, r in parse_accept_lang_header(accept)] if 'km' in codes and 'km-kh' not in codes: request.META['HTTP_ACCEPT_LANGUAGE'] = accept.replace('km', 'km-kh') except: # this might fail if i18n is disabled. pass super(LocaleMiddlewareWithTweaks, self).process_request(request) class SqlLogging: def process_response(self, request, response): from sys import stdout if stdout.isatty(): for query in connection.queries: print "\033[1;31m[%s]\033[0m \033[1m%s\033[0m" % ( query['time'], " ".join(query['sql'].split())) return response
jomolinare/kobocat
onadata/libs/utils/middleware.py
Python
bsd-2-clause
1,865
import errno import os from datetime import datetime from django.conf import settings from django.contrib.staticfiles.storage import CachedStaticFilesStorage from django.core.files import storage from django.utils import timezone class DummyStorage(storage.Storage): """ A storage class that implements get_modified_time(). """ def _save(self, name, content): return 'dummy' def delete(self, name): pass def exists(self, name): pass def get_modified_time(self, name): return datetime.datetime(1970, 1, 1, tzinfo=timezone.utc) class PathNotImplementedStorage(storage.Storage): def _save(self, name, content): return 'dummy' def _path(self, name): return os.path.join(settings.STATIC_ROOT, name) def exists(self, name): return os.path.exists(self._path(name)) def listdir(self, path): path = self._path(path) directories, files = [], [] for entry in os.listdir(path): if os.path.isdir(os.path.join(path, entry)): directories.append(entry) else: files.append(entry) return directories, files def delete(self, name): name = self._path(name) if os.path.exists(name): try: os.remove(name) except OSError as e: if e.errno != errno.ENOENT: raise def path(self, name): raise NotImplementedError class SimpleCachedStaticFilesStorage(CachedStaticFilesStorage): def file_hash(self, name, content=None): return 'deploy12345' class ExtraPatternsCachedStaticFilesStorage(CachedStaticFilesStorage): """ A storage class to test pattern substitutions with more than one pattern entry. The added pattern rewrites strings like "url(...)" to JS_URL("..."). """ patterns = tuple(CachedStaticFilesStorage.patterns) + ( ( "*.js", ( (r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", 'JS_URL("%s")'), ), ), )
filias/django
tests/staticfiles_tests/storage.py
Python
bsd-3-clause
2,085
from __future__ import with_statement import os from django.contrib.auth.models import User from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm, PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm) from django.core import mail from django.forms.fields import Field, EmailField from django.test import TestCase from django.test.utils import override_settings from django.utils.encoding import force_unicode from django.utils import translation class UserCreationFormTest(TestCase): fixtures = ['authtestdata.json'] def test_user_already_exists(self): data = { 'username': 'testclient', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["username"].errors, [force_unicode(form.error_messages['duplicate_username'])]) def test_invalid_data(self): data = { 'username': 'jsmith!', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["username"].errors, [force_unicode(form.fields['username'].error_messages['invalid'])]) def test_password_verification(self): # The verification password is incorrect. data = { 'username': 'jsmith', 'password1': 'test123', 'password2': 'test', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["password2"].errors, [force_unicode(form.error_messages['password_mismatch'])]) def test_both_passwords(self): # One (or both) passwords weren't given data = {'username': 'jsmith'} form = UserCreationForm(data) required_error = [force_unicode(Field.default_error_messages['required'])] self.assertFalse(form.is_valid()) self.assertEqual(form['password1'].errors, required_error) self.assertEqual(form['password2'].errors, required_error) data['password2'] = 'test123' form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form['password1'].errors, required_error) def test_success(self): # The success case. data = { 'username': 'jsmith@example.com', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertTrue(form.is_valid()) u = form.save() self.assertEqual(repr(u), '<User: jsmith@example.com>') UserCreationFormTest = override_settings(USE_TZ=False)(UserCreationFormTest) class AuthenticationFormTest(TestCase): fixtures = ['authtestdata.json'] def test_invalid_username(self): # The user submits an invalid username. data = { 'username': 'jsmith_does_not_exist', 'password': 'test123', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_unicode(form.error_messages['invalid_login'])]) def test_inactive_user(self): # The user is inactive. data = { 'username': 'inactive', 'password': 'password', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_unicode(form.error_messages['inactive'])]) def test_inactive_user_i18n(self): with self.settings(USE_I18N=True): with translation.override('pt-br', deactivate=True): # The user is inactive. data = { 'username': 'inactive', 'password': 'password', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_unicode(form.error_messages['inactive'])]) def test_success(self): # The success case data = { 'username': 'testclient', 'password': 'password', } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) AuthenticationFormTest = override_settings(USE_TZ=False)(AuthenticationFormTest) class SetPasswordFormTest(TestCase): fixtures = ['authtestdata.json'] def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username='testclient') data = { 'new_password1': 'abc123', 'new_password2': 'abc', } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["new_password2"].errors, [force_unicode(form.error_messages['password_mismatch'])]) def test_success(self): user = User.objects.get(username='testclient') data = { 'new_password1': 'abc123', 'new_password2': 'abc123', } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) SetPasswordFormTest = override_settings(USE_TZ=False)(SetPasswordFormTest) class PasswordChangeFormTest(TestCase): fixtures = ['authtestdata.json'] def test_incorrect_password(self): user = User.objects.get(username='testclient') data = { 'old_password': 'test', 'new_password1': 'abc123', 'new_password2': 'abc123', } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["old_password"].errors, [force_unicode(form.error_messages['password_incorrect'])]) def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username='testclient') data = { 'old_password': 'password', 'new_password1': 'abc123', 'new_password2': 'abc', } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["new_password2"].errors, [force_unicode(form.error_messages['password_mismatch'])]) def test_success(self): # The success case. user = User.objects.get(username='testclient') data = { 'old_password': 'password', 'new_password1': 'abc123', 'new_password2': 'abc123', } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) def test_field_order(self): # Regression test - check the order of fields: user = User.objects.get(username='testclient') self.assertEqual(PasswordChangeForm(user, {}).fields.keys(), ['old_password', 'new_password1', 'new_password2']) PasswordChangeFormTest = override_settings(USE_TZ=False)(PasswordChangeFormTest) class UserChangeFormTest(TestCase): fixtures = ['authtestdata.json'] def test_username_validity(self): user = User.objects.get(username='testclient') data = {'username': 'not valid'} form = UserChangeForm(data, instance=user) self.assertFalse(form.is_valid()) self.assertEqual(form['username'].errors, [force_unicode(form.fields['username'].error_messages['invalid'])]) def test_bug_14242(self): # A regression test, introduce by adding an optimization for the # UserChangeForm. class MyUserForm(UserChangeForm): def __init__(self, *args, **kwargs): super(MyUserForm, self).__init__(*args, **kwargs) self.fields['groups'].help_text = 'These groups give users different permissions' class Meta(UserChangeForm.Meta): fields = ('groups',) # Just check we can create it form = MyUserForm({}) def test_bug_17944_empty_password(self): user = User.objects.get(username='empty_password') form = UserChangeForm(instance=user) # Just check that no error is raised. form.as_table() def test_bug_17944_unmanageable_password(self): user = User.objects.get(username='unmanageable_password') form = UserChangeForm(instance=user) # Just check that no error is raised. form.as_table() def test_bug_17944_unknown_password_algorithm(self): user = User.objects.get(username='unknown_password') form = UserChangeForm(instance=user) # Just check that no error is raised. form.as_table() UserChangeFormTest = override_settings(USE_TZ=False)(UserChangeFormTest) class PasswordResetFormTest(TestCase): fixtures = ['authtestdata.json'] def create_dummy_user(self): """creates a user and returns a tuple (user_object, username, email) """ username = 'jsmith' email = 'jsmith@example.com' user = User.objects.create_user(username, email, 'test123') return (user, username, email) def test_invalid_email(self): data = {'email': 'not valid'} form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form['email'].errors, [force_unicode(EmailField.default_error_messages['invalid'])]) def test_nonexistant_email(self): # Test nonexistant email address data = {'email': 'foo@bar.com'} form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'email': [force_unicode(form.error_messages['unknown'])]}) def test_cleaned_data(self): # Regression test (user, username, email) = self.create_dummy_user() data = {'email': email} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['email'], email) def test_custom_email_subject(self): template_path = os.path.join(os.path.dirname(__file__), 'templates') with self.settings(TEMPLATE_DIRS=(template_path,)): data = {'email': 'testclient@example.com'} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override='example.com') self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, u'Custom password reset on example.com') def test_bug_5605(self): # bug #5605, preserve the case of the user name (before the @ in the # email address) when creating a user. user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test') self.assertEqual(user.email, 'tesT@example.com') user = User.objects.create_user('forms_test3', 'tesT', 'test') self.assertEqual(user.email, 'tesT') def test_inactive_user(self): #tests that inactive user cannot #receive password reset email (user, username, email) = self.create_dummy_user() user.is_active = False user.save() form = PasswordResetForm({'email': email}) self.assertFalse(form.is_valid()) def test_unusable_password(self): user = User.objects.create_user('testuser', 'test@example.com', 'test') data = {"email": "test@example.com"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) user.set_unusable_password() user.save() form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["email"].errors, [u"The user account associated with this e-mail address cannot reset the password."]) PasswordResetFormTest = override_settings(USE_TZ=False)(PasswordResetFormTest)
uiri/pxqz
venv/lib/python2.7/site-packages/django/contrib/auth/tests/forms.py
Python
gpl-3.0
12,516