code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
'''OpenGL extension NV.texgen_emboss This module customises the behaviour of the OpenGL.raw.GL.NV.texgen_emboss to provide a more Python-friendly API Overview (from the spec) This extension provides a new texture coordinate generation mode suitable for multitexture-based embossing (or bump mapping) effects. Given two texture units, this extension generates the texture coordinates of a second texture unit (an odd-numbered texture unit) as a perturbation of a first texture unit (an even-numbered texture unit one less than the second texture unit). The perturbation is based on the normal, tangent, and light vectors. The normal vector is supplied by glNormal; the light vector is supplied as a direction vector to a specified OpenGL light's position; and the tanget vector is supplied by the second texture unit's current texture coordinate. The perturbation is also scaled by program-supplied scaling constants. If both texture units are bound to the same texture representing a height field, by subtracting the difference between the resulting two filtered texels, programs can achieve a per-pixel embossing effect. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/texgen_emboss.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.NV.texgen_emboss import * from OpenGL.raw.GL.NV.texgen_emboss import _EXTENSION_NAME def glInitTexgenEmbossNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
stack-of-tasks/rbdlpy
tutorial/lib/python2.7/site-packages/OpenGL/GL/NV/texgen_emboss.py
Python
lgpl-3.0
1,741
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging import os import posixpath from contextlib import contextmanager from twitter.common.collections import OrderedSet from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.net.http.fetcher import Fetcher from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import temporary_file from pants.util.dirutil import chmod_plus_x, safe_delete, safe_open from pants.util.osutil import get_os_id _DEFAULT_PATH_BY_ID = { ('linux', 'x86_64'): ('linux', 'x86_64'), ('linux', 'amd64'): ('linux', 'x86_64'), ('linux', 'i386'): ('linux', 'i386'), ('linux', 'i686'): ('linux', 'i386'), ('darwin', '9'): ('mac', '10.5'), ('darwin', '10'): ('mac', '10.6'), ('darwin', '11'): ('mac', '10.7'), ('darwin', '12'): ('mac', '10.8'), ('darwin', '13'): ('mac', '10.9'), ('darwin', '14'): ('mac', '10.10'), ('darwin', '15'): ('mac', '10.11'), ('darwin', '16'): ('mac', '10.12'), } logger = logging.getLogger(__name__) class BinaryUtil(object): """Wraps utility methods for finding binary executables. :API: public """ class Factory(Subsystem): """ :API: public """ options_scope = 'binaries' @classmethod def register_options(cls, register): register('--baseurls', type=list, advanced=True, default=['https://dl.bintray.com/pantsbuild/bin/build-support'], help='List of urls from which binary tools are downloaded. Urls are searched in ' 'order until the requested path is found.') register('--fetch-timeout-secs', type=int, default=30, advanced=True, help='Timeout in seconds for url reads when fetching binary tools from the ' 'repos specified by --baseurls') register('--path-by-id', type=dict, advanced=True, help='Maps output of uname for a machine to a binary search path. e.g. ' '{ ("darwin", "15"): ["mac", "10.11"]), ("linux", "arm32"): ["linux", "arm32"] }') @classmethod def create(cls): """ :API: public """ # NB: create is a class method to ~force binary fetch location to be global. options = cls.global_instance().get_options() return BinaryUtil(options.baseurls, options.fetch_timeout_secs, options.pants_bootstrapdir, options.path_by_id) class MissingMachineInfo(TaskError): """Indicates that pants was unable to map this machine's OS to a binary path prefix.""" pass class BinaryNotFound(TaskError): def __init__(self, binary, accumulated_errors): super(BinaryUtil.BinaryNotFound, self).__init__( 'Failed to fetch binary {binary} from any source: ({sources})' .format(binary=binary, sources=', '.join(accumulated_errors))) class NoBaseUrlsError(TaskError): """Indicates that no urls were specified in pants.ini.""" pass def _select_binary_base_path(self, supportdir, version, name, uname_func=None): """Calculate the base path. Exposed for associated unit tests. :param supportdir: the path used to make a path under --pants_bootstrapdir. :param version: the version number of the tool used to make a path under --pants-bootstrapdir. :param name: name of the binary to search for. (e.g 'protoc') :param uname_func: method to use to emulate os.uname() in testing :returns: Base path used to select the binary file. """ uname_func = uname_func or os.uname os_id = get_os_id(uname_func=uname_func) if not os_id: raise self.MissingMachineInfo('Pants has no binaries for {}'.format(' '.join(uname_func()))) try: middle_path = self._path_by_id[os_id] except KeyError: raise self.MissingMachineInfo('Update --binaries-path-by-id to find binaries for {!r}' .format(os_id)) return os.path.join(supportdir, *(middle_path + (version, name))) def __init__(self, baseurls, timeout_secs, bootstrapdir, path_by_id=None): """Creates a BinaryUtil with the given settings to define binary lookup behavior. This constructor is primarily used for testing. Production code will usually initialize an instance using the BinaryUtil.Factory.create() method. :param baseurls: URL prefixes which represent repositories of binaries. :type baseurls: list of string :param int timeout_secs: Timeout in seconds for url reads. :param string bootstrapdir: Directory to use for caching binaries. Uses this directory to search for binaries in, or download binaries to if needed. :param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool directory naming """ self._baseurls = baseurls self._timeout_secs = timeout_secs self._pants_bootstrapdir = bootstrapdir self._path_by_id = _DEFAULT_PATH_BY_ID.copy() if path_by_id: self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items()) @contextmanager def _select_binary_stream(self, name, binary_path, fetcher=None): """Select a binary matching the current os and architecture. :param string binary_path: The path to the binary to fetch. :param fetcher: Optional argument used only for testing, to 'pretend' to open urls. :returns: a 'stream' to download it from a support directory. The returned 'stream' is actually a lambda function which returns the files binary contents. :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version and name could be found for the current platform. """ if not self._baseurls: raise self.NoBaseUrlsError( 'No urls are defined for the --pants-support-baseurls option.') downloaded_successfully = False accumulated_errors = [] for baseurl in OrderedSet(self._baseurls): # De-dup URLS: we only want to try each URL once. url = posixpath.join(baseurl, binary_path) logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url)) try: with temporary_file() as dest: fetcher = fetcher or Fetcher(get_buildroot()) fetcher.download(url, listener=Fetcher.ProgressListener(), path_or_fd=dest, timeout_secs=self._timeout_secs) logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url)) downloaded_successfully = True dest.seek(0) yield lambda: dest.read() break except (IOError, Fetcher.Error, ValueError) as e: accumulated_errors.append('Failed to fetch binary from {url}: {error}' .format(url=url, error=e)) if not downloaded_successfully: raise self.BinaryNotFound(binary_path, accumulated_errors) def select_binary(self, supportdir, version, name): """Selects a binary matching the current os and architecture. :param string supportdir: The path the `name` binaries are stored under. :param string version: The version number of the binary to select. :param string name: The name of the binary to fetch. :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version and name could be found for the current platform. """ # TODO(John Sirois): finish doc of the path structure expected under base_path. binary_path = self._select_binary_base_path(supportdir, version, name) return self._fetch_binary(name=name, binary_path=binary_path) def select_script(self, supportdir, version, name): """Selects a platform-independent script. :param string supportdir: The path the `name` scripts are stored under. :param string version: The version number of the script to select. :param string name: The name of the script to fetch. :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no script of the given version and name could be found. """ binary_path = os.path.join(supportdir, version, name) return self._fetch_binary(name=name, binary_path=binary_path) def _fetch_binary(self, name, binary_path): bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir)) bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path) if not os.path.exists(bootstrapped_binary_path): downloadpath = bootstrapped_binary_path + '~' try: with self._select_binary_stream(name, binary_path) as stream: with safe_open(downloadpath, 'wb') as bootstrapped_binary: bootstrapped_binary.write(stream()) os.rename(downloadpath, bootstrapped_binary_path) chmod_plus_x(bootstrapped_binary_path) finally: safe_delete(downloadpath) logger.debug('Selected {binary} binary bootstrapped to: {path}' .format(binary=name, path=bootstrapped_binary_path)) return bootstrapped_binary_path
pombredanne/pants
src/python/pants/binaries/binary_util.py
Python
apache-2.0
9,303
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import unittest from unittest.mock import patch from airflow.providers.apache.hive.sensors.hive_partition import HivePartitionSensor from tests.providers.apache.hive import DEFAULT_DATE, TestHiveEnvironment from tests.test_utils.mock_hooks import MockHiveMetastoreHook @unittest.skipIf('AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set") @patch( 'airflow.providers.apache.hive.sensors.hive_partition.HiveMetastoreHook', side_effect=MockHiveMetastoreHook, ) class TestHivePartitionSensor(TestHiveEnvironment): def test_hive_partition_sensor(self, mock_hive_metastore_hook): op = HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag ) op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
airbnb/airflow
tests/providers/apache/hive/sensors/test_hive_partition.py
Python
apache-2.0
1,659
""" Implementation of stack data structure in Python. """ class Stack: def __init__(self,*vargs): self.stack = list(vargs) def __repr__(self): return str(self.stack) def top(self): return self.stack[0] def push(self,elem): self.stack.insert(0,elem) def pop(self): return self.stack.pop(0) if __name__ == '__main__': stk = Stack(1,2,3,4) print stk print stk.top() stk.push(10) print stk print stk.pop() print stk
beqa2323/learntosolveit
languages/python/design_stack.py
Python
bsd-3-clause
507
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Copyright © 2016, Continuum Analytics, Inc. All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. # ----------------------------------------------------------------------------
conda/kapsel
conda_kapsel/test/__init__.py
Python
bsd-3-clause
331
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A library for managing flags-like configuration that update dynamically. """ import logging import os import re import time try: from google.appengine.api import memcache from google.appengine.ext import db from google.appengine.api import validation from google.appengine.api import yaml_object except: from google.appengine.api import memcache from google.appengine.ext import db from google.appengine.ext import validation from google.appengine.ext import yaml_object DATASTORE_DEADLINE = 1.5 RESERVED_MARKER = 'ah__conf__' NAMESPACE = '_' + RESERVED_MARKER CONFIG_KIND = '_AppEngine_Config' ACTIVE_KEY_NAME = 'active' FILENAMES = ['conf.yaml', 'conf.yml'] PARAMETERS = 'parameters' PARAMETER_NAME_REGEX = '[a-zA-Z][a-zA-Z0-9_]*' _cached_config = None class Config(db.Expando): """The representation of a config in the datastore and memcache.""" ah__conf__version = db.IntegerProperty(default=0, required=True) @classmethod def kind(cls): """Override the kind name to prevent collisions with users.""" return CONFIG_KIND def ah__conf__load_from_yaml(self, parsed_config): """Loads all the params from a YAMLConfiguration into expando fields. We set these expando properties with a special name prefix 'p_' to keep them separate from the static attributes of Config. That way we don't have to check elsewhere to make sure the user doesn't stomp on our built in properties. Args: parse_config: A YAMLConfiguration. """ for key, value in parsed_config.parameters.iteritems(): setattr(self, key, value) class _ValidParameterName(validation.Validator): """Validator to check if a value is a valid config parameter name. We only allow valid python attribute names without leading underscores that also do not collide with reserved words in the datastore models. """ def __init__(self): self.regex = validation.Regex(PARAMETER_NAME_REGEX) def Validate(self, value, key): """Check that all parameter names are valid. This is used as a validator when parsing conf.yaml. Args: value: the value to check. key: A description of the context for which this value is being validated. Returns: The validated value. """ value = self.regex.Validate(value, key) try: db.check_reserved_word(value) except db.ReservedWordError: raise validation.ValidationError( 'The config parameter name %.100r is reserved by db.Model see: ' 'https://developers.google.com/appengine/docs/python/datastore/' 'modelclass#Disallowed_Property_Names for details.' % value) if value.startswith(RESERVED_MARKER): raise validation.ValidationError( 'The config parameter name %.100r is reserved, as are all names ' 'beginning with \'%s\', please choose a different name.' % ( value, RESERVED_MARKER)) return value class _Scalar(validation.Validator): """Validator to check if a value is a simple scalar type. We only allow scalars that are well supported by both the datastore and YAML. """ ALLOWED_PARAMETER_VALUE_TYPES = frozenset( [bool, int, long, float, str, unicode]) def Validate(self, value, key): """Check that all parameters are scalar values. This is used as a validator when parsing conf.yaml Args: value: the value to check. key: the name of parameter corresponding to this value. Returns: We just return value unchanged. """ if type(value) not in self.ALLOWED_PARAMETER_VALUE_TYPES: raise validation.ValidationError( 'Expected scalar value for parameter: %s, but found %.100r which ' 'is type %s' % (key, value, type(value).__name__)) return value class _ParameterDict(validation.ValidatedDict): """This class validates the parameters dictionary in YAMLConfiguration. Keys must look like non-private python identifiers and values must be a supported scalar. See the class comment for YAMLConfiguration. """ KEY_VALIDATOR = _ValidParameterName() VALUE_VALIDATOR = _Scalar() class YAMLConfiguration(validation.Validated): """This class describes the structure of a conf.yaml file. At the top level the file should have a params attribue which is a mapping from strings to scalars. For example: parameters: background_color: 'red' message_size: 1024 boolean_valued_param: true """ ATTRIBUTES = {PARAMETERS: _ParameterDict} def LoadSingleConf(stream): """Load a conf.yaml file or string and return a YAMLConfiguration object. Args: stream: a file object corresponding to a conf.yaml file, or its contents as a string. Returns: A YAMLConfiguration instance """ return yaml_object.BuildSingleObject(YAMLConfiguration, stream) def _find_yaml_path(): """Traverse directory trees to find conf.yaml file. Begins with the current working direcotry and then moves up the directory structure until the file is found.. Returns: the path of conf.yaml file or None if not found. """ current, last = os.getcwd(), None while current != last: for yaml_name in FILENAMES: yaml_path = os.path.join(current, yaml_name) if os.path.exists(yaml_path): return yaml_path last = current current, last = os.path.dirname(current), current return None def _fetch_from_local_file(pathfinder=_find_yaml_path, fileopener=open): """Get the configuration that was uploaded with this version. Args: pathfinder: a callable to use for finding the path of the conf.yaml file. This is only for use in testing. fileopener: a callable to use for opening a named file. This is only for use in testing. Returns: A config class instance for the options that were uploaded. If there is no config file, return None """ yaml_path = pathfinder() if yaml_path: config = Config() config.ah__conf__load_from_yaml(LoadSingleConf(fileopener(yaml_path))) logging.debug('Loaded conf parameters from conf.yaml.') return config return None def _get_active_config_key(app_version): """Generate the key for the active config record belonging to app_version. Args: app_version: the major version you want configuration data for. Returns: The key for the active Config record for the given app_version. """ return db.Key.from_path( CONFIG_KIND, '%s/%s' % (app_version, ACTIVE_KEY_NAME), namespace=NAMESPACE) def _fetch_latest_from_datastore(app_version): """Get the latest configuration data for this app-version from the datastore. Args: app_version: the major version you want configuration data for. Side Effects: We populate memcache with whatever we find in the datastore. Returns: A config class instance for most recently set options or None if the query could not complete due to a datastore exception. """ rpc = db.create_rpc(deadline=DATASTORE_DEADLINE, read_policy=db.EVENTUAL_CONSISTENCY) key = _get_active_config_key(app_version) config = None try: config = Config.get(key, rpc=rpc) logging.debug('Loaded most recent conf data from datastore.') except: logging.warning('Tried but failed to fetch latest conf data from the ' 'datastore.') if config: memcache.set(app_version, db.model_to_protobuf(config).Encode(), namespace=NAMESPACE) logging.debug('Wrote most recent conf data into memcache.') return config def _fetch_latest_from_memcache(app_version): """Get the latest configuration data for this app-version from memcache. Args: app_version: the major version you want configuration data for. Returns: A Config class instance for most recently set options or None if none could be found in memcache. """ proto_string = memcache.get(app_version, namespace=NAMESPACE) if proto_string: logging.debug('Loaded most recent conf data from memcache.') return db.model_from_protobuf(proto_string) logging.debug('Tried to load conf data from memcache, but found nothing.') return None def _inspect_environment(): """Return relevant information from the cgi environment. This is mostly split out to simplify testing. Returns: A tuple: (app_version, conf_version, development) app_version: the major version of the current application. conf_version: the current configuration version. development: a boolean, True if we're running under devappserver. """ app_version = os.environ['CURRENT_VERSION_ID'].rsplit('.', 1)[0] conf_version = int(os.environ.get('CURRENT_CONFIGURATION_VERSION', '0')) development = os.environ.get('SERVER_SOFTWARE', '').startswith('Development/') return (app_version, conf_version, development) def refresh(): """Update the local config cache from memcache/datastore. Normally configuration parameters are only refreshed at the start of a new request. If you have a very long running request, or you just need the freshest data for some reason, you can call this function to force a refresh. """ app_version, _, _ = _inspect_environment() global _cached_config new_config = _fetch_latest_from_memcache(app_version) if not new_config: new_config = _fetch_latest_from_datastore(app_version) if new_config: _cached_config = new_config def _new_request(): """Test if this is the first call to this function in the current request. This function will return True exactly once for each request Subsequent calls in the same request will return False. Returns: True if this is the first call in a given request, False otherwise. """ if RESERVED_MARKER in os.environ: return False os.environ[RESERVED_MARKER] = RESERVED_MARKER return True def _get_config(): """Check if the current cached config is stale, and if so update it.""" app_version, current_config_version, development = _inspect_environment() global _cached_config if (development and _new_request()) or not _cached_config: _cached_config = _fetch_from_local_file() or Config() if _cached_config.ah__conf__version < current_config_version: newconfig = _fetch_latest_from_memcache(app_version) if not newconfig or newconfig.ah__conf__version < current_config_version: newconfig = _fetch_latest_from_datastore(app_version) _cached_config = newconfig or _cached_config return _cached_config def get(name, default=None): """Get the value of a configuration parameter. This function is guaranteed to return the same value for every call during a single request. Args: name: The name of the configuration parameter you want a value for. default: A default value to return if the named parameter doesn't exist. Returns: The string value of the configuration parameter. """ return getattr(_get_config(), name, default) def get_all(): """Return an object with an attribute for each conf parameter. Returns: An object with an attribute for each conf parameter. """ return _get_config()
ychen820/microblog
y/google-cloud-sdk/platform/google_appengine/google/appengine/api/conf.py
Python
bsd-3-clause
11,771
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """PyCrypto AES implementation.""" from .cryptomath import * from .aes import * if pycryptoLoaded: import Crypto.Cipher.AES def new(key, mode, IV): return PyCrypto_AES(key, mode, IV) class PyCrypto_AES(AES): def __init__(self, key, mode, IV): AES.__init__(self, key, mode, IV, "pycrypto") key = bytes(key) IV = bytes(IV) self.context = Crypto.Cipher.AES.new(key, mode, IV) def encrypt(self, plaintext): plaintext = bytes(plaintext) return bytearray(self.context.encrypt(plaintext)) def decrypt(self, ciphertext): ciphertext = bytes(ciphertext) return bytearray(self.context.decrypt(ciphertext))
rebolinho/liveit.repository
script.video.F4mProxy/lib/f4mUtils/pycrypto_aes.py
Python
gpl-2.0
869
import logging import os from autotest.client.shared import error, utils from virttest import data_dir, utils_test def umount_fs(mountpoint): if os.path.ismount(mountpoint): result = utils.run("umount -l %s" % mountpoint, ignore_status=True) if result.exit_status: logging.debug("Umount %s failed", mountpoint) return False logging.debug("Umount %s successfully", mountpoint) return True def run(test, params, env): """ Test libguestfs tool guestmount. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) start_vm = "yes" == params.get("start_vm", "no") if vm.is_alive() and not start_vm: vm.destroy() elif vm.is_dead() and start_vm: vm.start() # Create a file to vm with guestmount content = "This is file for guestmount test." path = params.get("gm_tempfile", "/home/gm_tmp") mountpoint = os.path.join(data_dir.get_tmp_dir(), "mountpoint") status_error = "yes" == params.get("status_error", "yes") readonly = "no" == params.get("gm_readonly", "no") special_mount = "yes" == params.get("gm_mount", "no") vt = utils_test.libguestfs.VirtTools(vm, params) vm_ref = params.get("gm_vm_ref") is_disk = "yes" == params.get("gm_is_disk", "no") # Automatically get disk if no disk specified. if is_disk and vm_ref is None: vm_ref = utils_test.libguestfs.get_primary_disk(vm) if special_mount: # Get root filesystem before test params['libvirt_domain'] = params.get("main_vm") params['gf_inspector'] = True gf = utils_test.libguestfs.GuestfishTools(params) roots, rootfs = gf.get_root() gf.close_session() if roots is False: raise error.TestError("Can not get root filesystem " "in guestfish before test") logging.info("Root filesystem is:%s", rootfs) params['special_mountpoints'] = [rootfs] writes, writeo = vt.write_file_with_guestmount(mountpoint, path, content, vm_ref) if umount_fs(mountpoint) is False: logging.error("Umount vm's filesystem failed.") if status_error: if writes: if readonly: raise error.TestFail("Write file to readonly mounted " "filesystem successfully.Not expected.") else: raise error.TestFail("Write file with guestmount " "successfully.Not expected.") else: if not writes: raise error.TestFail("Write file to mounted filesystem failed.")
liuzzfnst/tp-libvirt
libguestfs/tests/guestmount.py
Python
gpl-2.0
2,688
from django.conf.urls import patterns, include, url from misago.threads.views.privatethreads import PrivateThreadsView urlpatterns = patterns('', url(r'^private-threads/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/sort-(?P<sort>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/sort-(?P<sort>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/show-(?P<show>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/show-(?P<show>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/sort-(?P<sort>[\w-]+)/show-(?P<show>[\w-]+)/$', PrivateThreadsView.as_view(), name='private_threads'), url(r'^private-threads/sort-(?P<sort>[\w-]+)/show-(?P<show>[\w-]+)/(?P<page>\d+)/$', PrivateThreadsView.as_view(), name='private_threads'), ) # thread view from misago.threads.views.privatethreads import ThreadView urlpatterns += patterns('', url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/$', ThreadView.as_view(), name='private_thread'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/(?P<page>\d+)/$', ThreadView.as_view(), name='private_thread'), ) # goto views from misago.threads.views.privatethreads import (GotoLastView, GotoNewView, GotoPostView) urlpatterns += patterns('', url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/last/$', GotoLastView.as_view(), name='private_thread_last'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/new/$', GotoNewView.as_view(), name='private_thread_new'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/post-(?P<post_id>\d+)/$', GotoPostView.as_view(), name='private_thread_post'), ) # reported posts views from misago.threads.views.privatethreads import ReportedPostsListView urlpatterns += patterns('', url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/reported-posts/$', ReportedPostsListView.as_view(), name='private_thread_reported'), ) # participants views from misago.threads.views.privatethreads import (ThreadParticipantsView, EditThreadParticipantsView, AddThreadParticipantsView, RemoveThreadParticipantView, LeaveThreadView) urlpatterns += patterns('', url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/participants/$', ThreadParticipantsView.as_view(), name='private_thread_participants'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/edit-participants/$', EditThreadParticipantsView.as_view(), name='private_thread_edit_participants'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/remove-participant/(?P<user_id>\d+)/$', RemoveThreadParticipantView.as_view(), name='private_thread_remove_participant'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/add-participants/$', AddThreadParticipantsView.as_view(), name='private_thread_add_participants'), url(r'^private-thread/(?P<thread_slug>[\w\d-]+)-(?P<thread_id>\d+)/leave/$', LeaveThreadView.as_view(), name='private_thread_leave'), ) # post views from misago.threads.views.privatethreads import (QuotePostView, HidePostView, UnhidePostView, DeletePostView, ReportPostView) urlpatterns += patterns('', url(r'^private-post/(?P<post_id>\d+)/quote/$', QuotePostView.as_view(), name='quote_private_post'), url(r'^private-post/(?P<post_id>\d+)/unhide/$', UnhidePostView.as_view(), name='unhide_private_post'), url(r'^private-post/(?P<post_id>\d+)/hide/$', HidePostView.as_view(), name='hide_private_post'), url(r'^private-post/(?P<post_id>\d+)/delete/$', DeletePostView.as_view(), name='delete_private_post'), url(r'^private-post/(?P<post_id>\d+)/report/$', ReportPostView.as_view(), name='report_private_post'), ) # events view from misago.threads.views.privatethreads import EventsView urlpatterns += patterns('', url(r'^edit-private-event/(?P<event_id>\d+)/$', EventsView.as_view(), name='edit_private_event'), ) # posting views from misago.threads.views.privatethreads import PostingView urlpatterns += patterns('', url(r'^start-private-thread/$', PostingView.as_view(), name='start_private_thread'), url(r'^reply-private-thread/(?P<thread_id>\d+)/$', PostingView.as_view(), name='reply_private_thread'), url(r'^edit-private_post/(?P<thread_id>\d+)/(?P<post_id>\d+)/edit/$', PostingView.as_view(), name='edit_private_post'), )
390910131/Misago
misago/threads/urls/privatethreads.py
Python
gpl-2.0
5,045
# -*- coding: utf-8 -*- # Generated by Django 1.9.11 on 2016-11-04 16:36 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('pootle_app', '0014_set_directory_tp_path'), ] operations = [ migrations.AlterIndexTogether( name='directory', index_together=set([('obsolete', 'tp', 'tp_path'), ('obsolete', 'pootle_path')]), ), ]
claudep/pootle
pootle/apps/pootle_app/migrations/0015_add_tp_path_idx.py
Python
gpl-3.0
470
from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse_lazy from django.utils.decorators import method_decorator from django.views.generic import (DetailView, ListView) from django.views.generic.edit import (CreateView, DeleteView) from .forms import CredentialsForm from .models import Credentials class LoginRequiredMixin(object): """ View mixin which requires that the user is authenticated. """ @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(LoginRequiredMixin, self).dispatch(*args, **kwargs) class CredentialsList(LoginRequiredMixin, ListView): model = Credentials def get_queryset(self): return Credentials.objects.filter(owner=self.request.user).order_by('-created') class CredentialsCreate(LoginRequiredMixin, CreateView): model = Credentials form_class = CredentialsForm success_url = reverse_lazy('credentials-list') def form_valid(self, form): form.instance.owner = self.request.user return super(CredentialsCreate, self).form_valid(form) class CredentialsDetail(LoginRequiredMixin, DetailView): model = Credentials def get_queryset(self): return Credentials.objects.filter(owner=self.request.user) class CredentialsDelete(LoginRequiredMixin, DeleteView): model = Credentials success_url = reverse_lazy('credentials-list')
adusca/treeherder
treeherder/credentials/views.py
Python
mpl-2.0
1,507
#-*- coding: utf-8 -*- """ Group Configuration Tests. """ import json import mock import ddt from django.conf import settings from django.test.utils import override_settings from opaque_keys.edx.keys import AssetKey from opaque_keys.edx.locations import AssetLocation from contentstore.utils import reverse_course_url from contentstore.views.certificates import CERTIFICATE_SCHEMA_VERSION from contentstore.tests.utils import CourseTestCase from xmodule.contentstore.django import contentstore from xmodule.contentstore.content import StaticContent from xmodule.exceptions import NotFoundError from student.models import CourseEnrollment from student.roles import CourseInstructorRole, CourseStaffRole from student.tests.factories import UserFactory from course_modes.tests.factories import CourseModeFactory from contentstore.views.certificates import CertificateManager from django.test.utils import override_settings from contentstore.utils import get_lms_link_for_certificate_web_view from util.testing import EventTestMixin FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy() FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True CERTIFICATE_JSON = { u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'version': CERTIFICATE_SCHEMA_VERSION, } CERTIFICATE_JSON_WITH_SIGNATORIES = { u'name': u'Test certificate', u'description': u'Test description', u'version': CERTIFICATE_SCHEMA_VERSION, u'course_title': 'Course Title Override', u'is_active': True, u'signatories': [ { "name": "Bob Smith", "title": "The DEAN.", "signature_image_path": "/c4x/test/CSS101/asset/Signature.png" } ] } # pylint: disable=no-member class HelperMethods(object): """ Mixin that provides useful methods for certificate configuration tests. """ def _create_fake_images(self, asset_keys): """ Creates fake image files for a list of asset_keys. """ for asset_key_string in asset_keys: asset_key = AssetKey.from_string(asset_key_string) content = StaticContent( asset_key, "Fake asset", "image/png", "data", ) contentstore().save(content) def _add_course_certificates(self, count=1, signatory_count=0, is_active=False): """ Create certificate for the course. """ signatories = [ { 'name': 'Name ' + str(i), 'title': 'Title ' + str(i), 'signature_image_path': '/c4x/test/CSS101/asset/Signature{}.png'.format(i), 'id': i } for i in xrange(0, signatory_count) ] # create images for signatory signatures except the last signatory for idx, signatory in enumerate(signatories): if len(signatories) > 2 and idx == len(signatories) - 1: continue else: self._create_fake_images([signatory['signature_image_path']]) certificates = [ { 'id': i, 'name': 'Name ' + str(i), 'description': 'Description ' + str(i), 'signatories': signatories, 'version': CERTIFICATE_SCHEMA_VERSION, 'is_active': is_active } for i in xrange(0, count) ] self.course.certificates = {'certificates': certificates} self.save_course() # pylint: disable=no-member class CertificatesBaseTestCase(object): """ Mixin with base test cases for the certificates. """ def _remove_ids(self, content): """ Remove ids from the response. We cannot predict IDs, because they're generated randomly. We use this method to clean up response when creating new certificate. """ certificate_id = content.pop("id") return certificate_id def test_required_fields_are_absent(self): """ Test required fields are absent. """ bad_jsons = [ # must have name of the certificate { u'description': 'Test description', u'version': CERTIFICATE_SCHEMA_VERSION }, # an empty json {}, ] for bad_json in bad_jsons: response = self.client.post( self._url(), data=json.dumps(bad_json), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) def test_invalid_json(self): """ Test invalid json handling. """ # Invalid JSON. invalid_json = "{u'name': 'Test Name', u'description': 'Test description'," \ " u'version': " + str(CERTIFICATE_SCHEMA_VERSION) + ", []}" response = self.client.post( self._url(), data=invalid_json, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) def test_certificate_data_validation(self): #Test certificate schema version json_data_1 = { u'version': 100, u'name': u'Test certificate', u'description': u'Test description' } with self.assertRaises(Exception) as context: CertificateManager.validate(json_data_1) self.assertTrue("Unsupported certificate schema version: 100. Expected version: 1." in context.exception) #Test certificate name is missing json_data_2 = { u'version': CERTIFICATE_SCHEMA_VERSION, u'description': u'Test description' } with self.assertRaises(Exception) as context: CertificateManager.validate(json_data_2) self.assertTrue('must have name of the certificate' in context.exception) @ddt.ddt @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) class CertificatesListHandlerTestCase(EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods): """ Test cases for certificates_list_handler. """ def setUp(self): """ Set up CertificatesListHandlerTestCase. """ super(CertificatesListHandlerTestCase, self).setUp('contentstore.views.certificates.tracker') def _url(self): """ Return url for the handler. """ return reverse_course_url('certificates.certificates_list_handler', self.course.id) def test_can_create_certificate(self): """ Test that you can create a certificate. """ expected = { u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'signatories': [] } response = self.client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 201) self.assertIn("Location", response) content = json.loads(response.content) certificate_id = self._remove_ids(content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.created', course_id=unicode(self.course.id), configuration_id=certificate_id, ) def test_cannot_create_certificate_if_user_has_no_write_permissions(self): """ Tests user without write permissions on course should not able to create certificate """ user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 403) @override_settings(LMS_BASE=None) def test_no_lms_base_for_certificate_web_view_link(self): test_link = get_lms_link_for_certificate_web_view( user_id=self.user.id, course_key=self.course.id, mode='honor' ) self.assertEquals(test_link, None) @override_settings(LMS_BASE="lms_base_url") def test_lms_link_for_certificate_web_view(self): test_url = "//lms_base_url/certificates/user/" \ + str(self.user.id) + "/course/" + unicode(self.course.id) + '?preview=honor' link = get_lms_link_for_certificate_web_view( user_id=self.user.id, course_key=self.course.id, mode='honor' ) self.assertEquals(link, test_url) @mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_certificate_info_in_response(self): """ Test that certificate has been created and rendered properly. """ response = self.client.ajax_post( self._url(), data=CERTIFICATE_JSON_WITH_SIGNATORIES ) self.assertEqual(response.status_code, 201) # in html response result = self.client.get_html(self._url()) self.assertIn('Test certificate', result.content) self.assertIn('Test description', result.content) # in JSON response response = self.client.get_json(self._url()) data = json.loads(response.content) self.assertEquals(len(data), 1) self.assertEqual(data[0]['name'], 'Test certificate') self.assertEqual(data[0]['description'], 'Test description') self.assertEqual(data[0]['version'], CERTIFICATE_SCHEMA_VERSION) def test_unsupported_http_accept_header(self): """ Test if not allowed header present in request. """ response = self.client.get( self._url(), HTTP_ACCEPT="text/plain", ) self.assertEqual(response.status_code, 406) def test_certificate_unsupported_method(self): """ Unit Test: test_certificate_unsupported_method """ resp = self.client.put(self._url()) self.assertEqual(resp.status_code, 405) def test_not_permitted(self): """ Test that when user has not read access to course then permission denied exception should raised. """ test_user_client, test_user = self.create_non_staff_authed_user_client() CourseEnrollment.enroll(test_user, self.course.id) response = test_user_client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 403) self.assertIn("error", response.content) def test_audit_course_mode_is_skipped(self): """ Tests audit course mode is skipped when rendering certificates page. """ CourseModeFactory.create(course_id=self.course.id) CourseModeFactory.create(course_id=self.course.id, mode_slug='verified') response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertContains(response, 'verified') self.assertNotContains(response, 'audit') def test_audit_only_disables_cert(self): """ Tests audit course mode is skipped when rendering certificates page. """ CourseModeFactory.create(course_id=self.course.id, mode_slug='audit') response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertContains(response, 'This course does not use a mode that offers certificates.') self.assertNotContains(response, 'This module is not enabled.') self.assertNotContains(response, 'Loading') @ddt.data( ['audit', 'verified'], ['verified'], ['audit', 'verified', 'credit'], ['verified', 'credit'], ['professional'] ) def test_non_audit_enables_cert(self, slugs): """ Tests audit course mode is skipped when rendering certificates page. """ for slug in slugs: CourseModeFactory.create(course_id=self.course.id, mode_slug=slug) response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertNotContains(response, 'This course does not use a mode that offers certificates.') self.assertNotContains(response, 'This module is not enabled.') self.assertContains(response, 'Loading') def test_assign_unique_identifier_to_certificates(self): """ Test certificates have unique ids """ self._add_course_certificates(count=2) json_data = { u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'signatories': [] } response = self.client.post( self._url(), data=json.dumps(json_data), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) new_certificate = json.loads(response.content) for prev_certificate in self.course.certificates['certificates']: self.assertNotEqual(new_certificate.get('id'), prev_certificate.get('id')) @ddt.ddt @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) class CertificatesDetailHandlerTestCase(EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods): """ Test cases for CertificatesDetailHandlerTestCase. """ _id = 0 def setUp(self): # pylint: disable=arguments-differ """ Set up CertificatesDetailHandlerTestCase. """ super(CertificatesDetailHandlerTestCase, self).setUp('contentstore.views.certificates.tracker') def _url(self, cid=-1): """ Return url for the handler. """ cid = cid if cid > 0 else self._id return reverse_course_url( 'certificates.certificates_detail_handler', self.course.id, kwargs={'certificate_id': cid}, ) def test_can_create_new_certificate_if_it_does_not_exist(self): """ PUT/POST new certificate. """ expected = { u'id': 666, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.put( self._url(cid=666), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.created', course_id=unicode(self.course.id), configuration_id=666, ) def test_can_edit_certificate(self): """ Edit certificate, check its id and modified fields. """ self._add_course_certificates(count=2) expected = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.put( self._url(cid=1), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.modified', course_id=unicode(self.course.id), configuration_id=1, ) self.reload_course() # Verify that certificate is properly updated in the course. course_certificates = self.course.certificates['certificates'] self.assertEqual(len(course_certificates), 2) self.assertEqual(course_certificates[1].get('name'), u'New test certificate') self.assertEqual(course_certificates[1].get('description'), 'New test description') def test_can_edit_certificate_without_is_active(self): """ Tests user should be able to edit certificate, if is_active attribute is not present for given certificate. Old courses might not have is_active attribute in certificate data. """ certificates = [ { 'id': 1, 'name': 'certificate with is_active', 'description': 'Description ', 'signatories': [], 'version': CERTIFICATE_SCHEMA_VERSION, } ] self.course.certificates = {'certificates': certificates} self.save_course() expected = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.post( self._url(cid=1), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 201) content = json.loads(response.content) self.assertEqual(content, expected) def test_can_delete_certificate_with_signatories(self): """ Delete certificate """ self._add_course_certificates(count=2, signatory_count=1) response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.assert_event_emitted( 'edx.certificate.configuration.deleted', course_id=unicode(self.course.id), configuration_id='1', ) self.reload_course() # Verify that certificates are properly updated in the course. certificates = self.course.certificates['certificates'] self.assertEqual(len(certificates), 1) self.assertEqual(certificates[0].get('name'), 'Name 0') self.assertEqual(certificates[0].get('description'), 'Description 0') def test_delete_certificate_without_write_permissions(self): """ Tests certificate deletion without write permission on course. """ self._add_course_certificates(count=2, signatory_count=1) user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_delete_certificate_without_global_staff_permissions(self): """ Tests deletion of an active certificate without global staff permission on course. """ self._add_course_certificates(count=2, signatory_count=1, is_active=True) user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_update_active_certificate_without_global_staff_permissions(self): """ Tests update of an active certificate without global staff permission on course. """ self._add_course_certificates(count=2, signatory_count=1, is_active=True) cert_data = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'course_title': u'Course Title Override', u'org_logo_path': '', u'is_active': False, u'signatories': [] } user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.put( self._url(cid=1), data=json.dumps(cert_data), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_delete_non_existing_certificate(self): """ Try to delete a non existing certificate. It should return status code 404 Not found. """ self._add_course_certificates(count=2) response = self.client.delete( self._url(cid=100), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) def test_can_delete_signatory(self): """ Delete an existing certificate signatory """ self._add_course_certificates(count=2, signatory_count=3) certificates = self.course.certificates['certificates'] signatory = certificates[1].get("signatories")[1] image_asset_location = AssetLocation.from_deprecated_string(signatory['signature_image_path']) content = contentstore().find(image_asset_location) self.assertIsNotNone(content) test_url = '{}/signatories/1'.format(self._url(cid=1)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.reload_course() # Verify that certificates are properly updated in the course. certificates = self.course.certificates['certificates'] self.assertEqual(len(certificates[1].get("signatories")), 2) # make sure signatory signature image is deleted too self.assertRaises(NotFoundError, contentstore().find, image_asset_location) def test_deleting_signatory_without_signature(self): """ Delete an signatory whose signature image is already removed or does not exist """ self._add_course_certificates(count=2, signatory_count=4) test_url = '{}/signatories/3'.format(self._url(cid=1)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) def test_delete_signatory_non_existing_certificate(self): """ Try to delete a non existing certificate signatory. It should return status code 404 Not found. """ self._add_course_certificates(count=2) test_url = '{}/signatories/1'.format(self._url(cid=100)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) def test_certificate_activation_success(self): """ Activate and Deactivate the course certificate """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) is_active = True for i in range(2): if i == 1: is_active = not is_active response = self.client.post( test_url, data=json.dumps({"is_active": is_active}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 200) course = self.store.get_course(self.course.id) certificates = course.certificates['certificates'] self.assertEqual(certificates[0].get('is_active'), is_active) cert_event_type = 'activated' if is_active else 'deactivated' self.assert_event_emitted( '.'.join(['edx.certificate.configuration', cert_event_type]), course_id=unicode(self.course.id), ) @ddt.data(True, False) def test_certificate_activation_without_write_permissions(self, activate): """ Tests certificate Activate and Deactivate should not be allowed if user does not have write permissions on course. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.post( test_url, data=json.dumps({"is_active": activate}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 403) @ddt.data(True, False) def test_certificate_activation_without_global_staff_permissions(self, activate): """ Tests certificate Activate and Deactivate should not be allowed if user does not have global staff permissions on course. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.post( test_url, data=json.dumps({"is_active": activate}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 403) def test_certificate_activation_failure(self): """ Certificate activation should fail when user has not read access to course then permission denied exception should raised. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) test_user_client, test_user = self.create_non_staff_authed_user_client() CourseEnrollment.enroll(test_user, self.course.id) self._add_course_certificates(count=1, signatory_count=2) response = test_user_client.post( test_url, data=json.dumps({"is_active": True}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEquals(response.status_code, 403) course = self.store.get_course(self.course.id) certificates = course.certificates['certificates'] self.assertEqual(certificates[0].get('is_active'), False)
IndonesiaX/edx-platform
cms/djangoapps/contentstore/views/tests/test_certificates.py
Python
agpl-3.0
28,836
import types def is_string_like(maybe): """Test value to see if it acts like a string""" try: maybe+"" except TypeError: return 0 else: return 1 def is_list_or_tuple(maybe): return isinstance(maybe, (types.TupleType, types.ListType))
luxnovalabs/enjigo_door
web_interface/keyedcache/utils.py
Python
unlicense
281
# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo.config import cfg from oslo import messaging from nova.objects import base as objects_base from nova.openstack.common import jsonutils from nova import rpc rpcapi_opts = [ cfg.StrOpt('scheduler_topic', default='scheduler', help='The topic scheduler nodes listen on'), ] CONF = cfg.CONF CONF.register_opts(rpcapi_opts) rpcapi_cap_opt = cfg.StrOpt('scheduler', help='Set a version cap for messages sent to scheduler services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class SchedulerAPI(object): '''Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Changes to prep_resize(): - remove instance_uuid, add instance - remove instance_type_id, add instance_type - remove topic, it was unused 1.2 - Remove topic from run_instance, it was unused 1.3 - Remove instance_id, add instance to live_migration 1.4 - Remove update_db from prep_resize 1.5 - Add reservations argument to prep_resize() 1.6 - Remove reservations argument to run_instance() 1.7 - Add create_volume() method, remove topic from live_migration() 2.0 - Remove 1.x backwards compat 2.1 - Add image_id to create_volume() 2.2 - Remove reservations argument to create_volume() 2.3 - Remove create_volume() 2.4 - Change update_service_capabilities() - accepts a list of capabilities 2.5 - Add get_backdoor_port() 2.6 - Add select_hosts() ... Grizzly supports message version 2.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.6. 2.7 - Add select_destinations() 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used by the compute manager for retries. 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() ... Havana supports message version 2.9. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.9. ... - Deprecated live_migration() call, moved to conductor ... - Deprecated select_hosts() 3.0 - Removed backwards compat ''' VERSION_ALIASES = { 'grizzly': '2.6', 'havana': '2.9', 'icehouse': '3.0', } def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=CONF.scheduler_topic, version='3.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler, CONF.upgrade_levels.scheduler) serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, version_cap=version_cap, serializer=serializer) def select_destinations(self, ctxt, request_spec, filter_properties): cctxt = self.client.prepare() return cctxt.call(ctxt, 'select_destinations', request_spec=request_spec, filter_properties=filter_properties) def run_instance(self, ctxt, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec=True): msg_kwargs = {'request_spec': request_spec, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'is_first_time': is_first_time, 'filter_properties': filter_properties, 'legacy_bdm_in_spec': legacy_bdm_in_spec} cctxt = self.client.prepare() cctxt.cast(ctxt, 'run_instance', **msg_kwargs) def prep_resize(self, ctxt, instance, instance_type, image, request_spec, filter_properties, reservations): instance_p = jsonutils.to_primitive(instance) instance_type_p = jsonutils.to_primitive(instance_type) reservations_p = jsonutils.to_primitive(reservations) image_p = jsonutils.to_primitive(image) cctxt = self.client.prepare() cctxt.cast(ctxt, 'prep_resize', instance=instance_p, instance_type=instance_type_p, image=image_p, request_spec=request_spec, filter_properties=filter_properties, reservations=reservations_p)
leilihh/nova
nova/scheduler/rpcapi.py
Python
apache-2.0
5,246
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema import mock from rally.deployment.engines import devstack from tests.unit import test SAMPLE_CONFIG = { "type": "DevstackEngine", "provider": { "name": "ExistingServers", "credentials": [{"user": "root", "host": "example.com"}], }, "localrc": { "ADMIN_PASSWORD": "secret", }, } DEVSTACK_REPO = "https://git.openstack.org/openstack-dev/devstack" class DevstackEngineTestCase(test.TestCase): def setUp(self): super(DevstackEngineTestCase, self).setUp() self.deployment = { "uuid": "de641026-dbe3-4abe-844a-ffef930a600a", "config": SAMPLE_CONFIG, } self.engine = devstack.DevstackEngine(self.deployment) def test_invalid_config(self): self.deployment = SAMPLE_CONFIG.copy() self.deployment["config"] = {"type": 42} engine = devstack.DevstackEngine(self.deployment) self.assertRaises(jsonschema.ValidationError, engine.validate) def test_construct(self): self.assertEqual(self.engine.localrc["ADMIN_PASSWORD"], "secret") @mock.patch("rally.deployment.engines.devstack.open", create=True) def test_prepare_server(self, mock_open): mock_open.return_value = "fake_file" server = mock.Mock() server.password = "secret" self.engine.prepare_server(server) calls = [ mock.call("/bin/sh -e", stdin="fake_file"), mock.call("chpasswd", stdin="rally:secret"), ] self.assertEqual(calls, server.ssh.run.mock_calls) filename = mock_open.mock_calls[0][1][0] self.assertTrue(filename.endswith("rally/deployment/engines/" "devstack/install.sh")) self.assertEqual([mock.call(filename, "rb")], mock_open.mock_calls) @mock.patch("rally.deployment.engine.Engine.get_provider") @mock.patch("rally.deployment.engines.devstack.get_updated_server") @mock.patch("rally.deployment.engines.devstack.get_script") @mock.patch("rally.deployment.serverprovider.provider.Server") @mock.patch("rally.deployment.engines.devstack.objects.Endpoint") def test_deploy(self, mock_endpoint, mock_server, mock_get_script, mock_get_updated_server, mock_engine_get_provider): mock_engine_get_provider.return_value = fake_provider = ( mock.Mock() ) server = mock.Mock(host="host") mock_endpoint.return_value = "fake_endpoint" mock_get_updated_server.return_value = ds_server = mock.Mock() mock_get_script.return_value = "fake_script" server.get_credentials.return_value = "fake_credentials" fake_provider.create_servers.return_value = [server] with mock.patch.object(self.engine, "deployment") as mock_deployment: endpoints = self.engine.deploy() self.assertEqual({"admin": "fake_endpoint"}, endpoints) mock_endpoint.assert_called_once_with( "http://host:5000/v2.0/", "admin", "secret", "admin", "admin") mock_deployment.add_resource.assert_called_once_with( info="fake_credentials", provider_name="DevstackEngine", type="credentials") repo = "https://git.openstack.org/openstack-dev/devstack" cmd = "/bin/sh -e -s %s master" % repo server.ssh.run.assert_called_once_with(cmd, stdin="fake_script") ds_calls = [ mock.call.ssh.run("cat > ~/devstack/localrc", stdin=mock.ANY), mock.call.ssh.run("~/devstack/stack.sh") ] self.assertEqual(ds_calls, ds_server.mock_calls) localrc = ds_server.mock_calls[0][2]["stdin"] self.assertIn("ADMIN_PASSWORD=secret", localrc)
afaheem88/rally
tests/unit/deployment/engines/test_devstack.py
Python
apache-2.0
4,402
#!/usr/bin/env python # RSSI production test import serial, sys, optparse, time, fdpexpect parser = optparse.OptionParser("update_mode") parser.add_option("--baudrate", type='int', default=57600, help='baud rate') parser.add_option("--rtscts", action='store_true', default=False, help='enable rtscts') parser.add_option("--dsrdtr", action='store_true', default=False, help='enable dsrdtr') parser.add_option("--xonxoff", action='store_true', default=False, help='enable xonxoff') opts, args = parser.parse_args() if len(args) == 0: print("usage: rssi.py <DEVICE...>") sys.exit(1) def rssi(device): port = serial.Serial(device, opts.baudrate, timeout=0, dsrdtr=opts.dsrdtr, rtscts=opts.rtscts, xonxoff=opts.xonxoff) ser = fdpexpect.fdspawn(port.fileno(), logfile=sys.stdout) ser.send('+++') time.sleep(1) ser.send('\r\nATI\r\n') try: ser.expect(['OK','SiK .* on HM-TRP'], timeout=2) except fdpexpect.TIMEOUT: print("timeout") return ser.send('AT&F\r\n') try: ser.expect(['OK'], timeout=2) except fdpexpect.TIMEOUT: print("timeout") return ser.send('AT&T=RSSI\r\n') ctr = 0 while ctr < 200: try: count = port.inWaiting() if count == 0: count = 1 buf = port.read(count) if len(buf) == 0: continue sys.stdout.write(buf) sys.stdout.flush() ctr = ctr + 1 except KeyboardInterrupt: sys.exit(0) port.close() for d in args: print("Putting %s into rssi test mode" % d) rssi(d)
RFDesign/SiK
Firmware/tools/rssi.py
Python
bsd-2-clause
1,670
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members # old (pre-0.8.4) location for ChangeFilter from buildbot.changes.filter import ChangeFilter _hush_pyflakes = ChangeFilter # keep pyflakes happy
eunchong/build
third_party/buildbot_8_4p1/buildbot/schedulers/filter.py
Python
bsd-3-clause
851
import unittest import uuid from django.core.checks import Error, Warning as DjangoWarning from django.db import connection, models from django.test import ( SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import isolate_apps, override_settings from django.utils.functional import lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django.utils.version import get_docs_version @isolate_apps('invalid_models_tests') class AutoFieldTests(SimpleTestCase): def test_valid_case(self): class Model(models.Model): id = models.AutoField(primary_key=True) field = Model._meta.get_field('id') self.assertEqual(field.check(), []) def test_primary_key(self): # primary_key must be True. Refs #12467. class Model(models.Model): field = models.AutoField(primary_key=False) # Prevent Django from autocreating `id` AutoField, which would # result in an error, because a model must have exactly one # AutoField. another = models.IntegerField(primary_key=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( 'AutoFields must set primary_key=True.', obj=field, id='fields.E100', ), ]) def test_max_length_warning(self): class Model(models.Model): auto = models.AutoField(primary_key=True, max_length=2) field = Model._meta.get_field('auto') self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ), ]) @isolate_apps('invalid_models_tests') class BinaryFieldTests(SimpleTestCase): def test_valid_default_value(self): class Model(models.Model): field1 = models.BinaryField(default=b'test') field2 = models.BinaryField(default=None) for field_name in ('field1', 'field2'): field = Model._meta.get_field(field_name) self.assertEqual(field.check(), []) def test_str_default_value(self): class Model(models.Model): field = models.BinaryField(default='test') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "BinaryField's default cannot be a string. Use bytes content " "instead.", obj=field, id='fields.E170', ), ]) @isolate_apps('invalid_models_tests') class CharFieldTests(TestCase): def test_valid_field(self): class Model(models.Model): field = models.CharField( max_length=255, choices=[ ('1', 'item1'), ('2', 'item2'), ], db_index=True, ) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_missing_max_length(self): class Model(models.Model): field = models.CharField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "CharFields must define a 'max_length' attribute.", obj=field, id='fields.E120', ), ]) def test_negative_max_length(self): class Model(models.Model): field = models.CharField(max_length=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_bad_max_length_value(self): class Model(models.Model): field = models.CharField(max_length="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_value(self): class Model(models.Model): field = models.CharField(max_length='20') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_type(self): class Model(models.Model): field = models.CharField(max_length=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121' ), ]) def test_non_iterable_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=field, id='fields.E004', ), ]) def test_non_iterable_choices_two_letters(self): """Two letters isn't a valid choice pair.""" class Model(models.Model): field = models.CharField(max_length=10, choices=['ab']) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_iterable_of_iterable_choices(self): class ThingItem: def __init__(self, value, display): self.value = value self.display = display def __iter__(self): return iter((self.value, self.display)) def __len__(self): return 2 class Things: def __iter__(self): return iter((ThingItem(1, 2), ThingItem(3, 4))) class ThingWithIterableChoices(models.Model): thing = models.CharField(max_length=100, blank=True, choices=Things()) self.assertEqual(ThingWithIterableChoices._meta.get_field('thing').check(), []) def test_choices_containing_non_pairs(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)]) class Model2(models.Model): field = models.IntegerField(choices=[0]) for model in (Model, Model2): with self.subTest(model.__name__): field = model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual " "value, human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_containing_lazy(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[['1', _('1')], ['2', _('2')]]) self.assertEqual(Model._meta.get_field('field').check(), []) def test_lazy_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices=lazy(lambda: [[1, '1'], [2, '2']], tuple)()) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [['L', 'Lancelot'], ['G', 'Galahad']]], ['wizards', [['T', 'Tim the Enchanter']]], ['R', 'Random character'], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group_non_pairs(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[['knights', [['L', 'Lancelot', 'Du Lac']]]], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_bad_structure(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [ ['Noble', [['G', 'Galahad']]], ['Combative', [['L', 'Lancelot']]], ]], ], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_lazy(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ [_('knights'), [['L', _('Lancelot')], ['G', _('Galahad')]]], ['R', _('Random character')], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_in_max_length(self): class Model(models.Model): field = models.CharField( max_length=2, choices=[ ('ABC', 'Value Too Long!'), ('OK', 'Good') ], ) group = models.CharField( max_length=2, choices=[ ('Nested', [('OK', 'Good'), ('Longer', 'Longer')]), ('Grouped', [('Bad', 'Bad')]), ], ) for name, choice_max_length in (('field', 3), ('group', 6)): with self.subTest(name): field = Model._meta.get_field(name) self.assertEqual(field.check(), [ Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=field, id='fields.E009', ), ]) def test_bad_db_index_value(self): class Model(models.Model): field = models.CharField(max_length=10, db_index='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'db_index' must be None, True or False.", obj=field, id='fields.E006', ), ]) def test_bad_validators(self): class Model(models.Model): field = models.CharField(max_length=10, validators=[True]) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "All 'validators' must be callable.", hint=( "validators[0] (True) isn't a function or instance of a " "validator class." ), obj=field, id='fields.E008', ), ]) @unittest.skipUnless(connection.vendor == 'mysql', "Test valid only for MySQL") def test_too_long_char_field_under_mysql(self): from django.db.backends.mysql.validation import DatabaseValidation class Model(models.Model): field = models.CharField(unique=True, max_length=256) field = Model._meta.get_field('field') validator = DatabaseValidation(connection=connection) self.assertEqual(validator.check_field(field), [ DjangoWarning( '%s may not allow unique CharFields to have a max_length > ' '255.' % connection.display_name, hint=( 'See: https://docs.djangoproject.com/en/%s/ref/databases/' '#mysql-character-fields' % get_docs_version() ), obj=field, id='mysql.W003', ) ]) def test_db_collation(self): class Model(models.Model): field = models.CharField(max_length=100, db_collation='anything') field = Model._meta.get_field('field') error = Error( '%s does not support a database collation on CharFields.' % connection.display_name, id='fields.E190', obj=field, ) expected = [] if connection.features.supports_collation_on_charfield else [error] self.assertEqual(field.check(databases=self.databases), expected) def test_db_collation_required_db_features(self): class Model(models.Model): field = models.CharField(max_length=100, db_collation='anything') class Meta: required_db_features = {'supports_collation_on_charfield'} field = Model._meta.get_field('field') self.assertEqual(field.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class DateFieldTests(SimpleTestCase): maxDiff = None def test_auto_now_and_auto_now_add_raise_error(self): class Model(models.Model): field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now) field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now) field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now) field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None) expected = [] checks = [] for i in range(4): field = Model._meta.get_field('field%d' % i) expected.append(Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=field, id='fields.E160', )) checks.extend(field.check()) self.assertEqual(checks, expected) def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateField(default=now()) field_d = models.DateField(default=now().date()) field_now = models.DateField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DateTimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateTimeField(default=now()) field_d = models.DateTimeField(default=now().date()) field_now = models.DateTimeField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DecimalFieldTests(SimpleTestCase): def test_required_attributes(self): class Model(models.Model): field = models.DecimalField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "DecimalFields must define a 'decimal_places' attribute.", obj=field, id='fields.E130', ), Error( "DecimalFields must define a 'max_digits' attribute.", obj=field, id='fields.E132', ), ]) def test_negative_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits=-1, decimal_places=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_bad_values_of_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits="bad", decimal_places="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_decimal_places_greater_than_max_digits(self): class Model(models.Model): field = models.DecimalField(max_digits=9, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=field, id='fields.E134', ), ]) def test_valid_field(self): class Model(models.Model): field = models.DecimalField(max_digits=10, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FileFieldTests(SimpleTestCase): def test_valid_default_case(self): class Model(models.Model): field = models.FileField() self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_case(self): class Model(models.Model): field = models.FileField(upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_primary_key(self): class Model(models.Model): field = models.FileField(primary_key=False, upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'primary_key' is not a valid argument for a FileField.", obj=field, id='fields.E201', ) ]) def test_upload_to_starts_with_slash(self): class Model(models.Model): field = models.FileField(upload_to='/somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FileField's 'upload_to' argument must be a relative path, not " "an absolute path.", obj=field, id='fields.E202', hint='Remove the leading slash.', ) ]) def test_upload_to_callable_not_checked(self): def callable(instance, filename): return '/' + filename class Model(models.Model): field = models.FileField(upload_to=callable) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FilePathFieldTests(SimpleTestCase): def test_forbidden_files_and_folders(self): class Model(models.Model): field = models.FilePathField(allow_files=False, allow_folders=False) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=field, id='fields.E140', ), ]) @isolate_apps('invalid_models_tests') class GenericIPAddressFieldTests(SimpleTestCase): def test_non_nullable_blank(self): class Model(models.Model): field = models.GenericIPAddressField(null=False, blank=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( ('GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.'), obj=field, id='fields.E150', ), ]) @isolate_apps('invalid_models_tests') class ImageFieldTests(SimpleTestCase): def test_pillow_installed(self): try: from PIL import Image # NOQA except ImportError: pillow_installed = False else: pillow_installed = True class Model(models.Model): field = models.ImageField(upload_to='somewhere') field = Model._meta.get_field('field') errors = field.check() expected = [] if pillow_installed else [ Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=field, id='fields.E210', ), ] self.assertEqual(errors, expected) @isolate_apps('invalid_models_tests') class IntegerFieldTests(SimpleTestCase): def test_max_length_warning(self): class Model(models.Model): integer = models.IntegerField(max_length=2) biginteger = models.BigIntegerField(max_length=2) smallinteger = models.SmallIntegerField(max_length=2) positiveinteger = models.PositiveIntegerField(max_length=2) positivebiginteger = models.PositiveBigIntegerField(max_length=2) positivesmallinteger = models.PositiveSmallIntegerField(max_length=2) for field in Model._meta.get_fields(): if field.auto_created: continue with self.subTest(name=field.name): self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ) ]) @isolate_apps('invalid_models_tests') class TimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.TimeField(default=now()) field_t = models.TimeField(default=now().time()) # Timezone-aware time object (when USE_TZ=True). field_tz = models.TimeField(default=now().timetz()) field_now = models.DateField(default=now) names = ['field_dt', 'field_t', 'field_tz', 'field_now'] fields = [Model._meta.get_field(name) for name in names] errors = [] for field in fields: errors.extend(field.check()) self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=fields[0], id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=fields[1], id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint=( 'It seems you set a fixed date / time / datetime value as ' 'default for this field. This may not be what you want. ' 'If you want to have the current date as default, use ' '`django.utils.timezone.now`' ), obj=fields[2], id='fields.W161', ), # field_now doesn't raise a warning. ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class TextFieldTests(TestCase): @skipIfDBFeature('supports_index_on_text_field') def test_max_length_warning(self): class Model(models.Model): value = models.TextField(db_index=True) field = Model._meta.get_field('value') field_type = field.db_type(connection) self.assertEqual(field.check(databases=self.databases), [ DjangoWarning( '%s does not support a database index on %s columns.' % (connection.display_name, field_type), hint=( "An index won't be created. Silence this warning if you " "don't care about it." ), obj=field, id='fields.W162', ) ]) def test_db_collation(self): class Model(models.Model): field = models.TextField(db_collation='anything') field = Model._meta.get_field('field') error = Error( '%s does not support a database collation on TextFields.' % connection.display_name, id='fields.E190', obj=field, ) expected = [] if connection.features.supports_collation_on_textfield else [error] self.assertEqual(field.check(databases=self.databases), expected) def test_db_collation_required_db_features(self): class Model(models.Model): field = models.TextField(db_collation='anything') class Meta: required_db_features = {'supports_collation_on_textfield'} field = Model._meta.get_field('field') self.assertEqual(field.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class UUIDFieldTests(TestCase): def test_choices_named_group(self): class Model(models.Model): field = models.UUIDField( choices=[ ['knights', [ [uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'Lancelot'], [uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'Galahad'], ]], [uuid.UUID('25d405be-4895-4d50-9b2e-d6695359ce47'), 'Other'], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) @isolate_apps('invalid_models_tests') @skipUnlessDBFeature('supports_json_field') class JSONFieldTests(TestCase): def test_invalid_default(self): class Model(models.Model): field = models.JSONField(default={}) self.assertEqual(Model._meta.get_field('field').check(), [ DjangoWarning( msg=( "JSONField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint=( 'Use a callable instead, e.g., use `dict` instead of `{}`.' ), obj=Model._meta.get_field('field'), id='fields.E010', ) ]) def test_valid_default(self): class Model(models.Model): field = models.JSONField(default=dict) self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_default_none(self): class Model(models.Model): field = models.JSONField(default=None) self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_callable_default(self): def callable_default(): return {'it': 'works'} class Model(models.Model): field = models.JSONField(default=callable_default) self.assertEqual(Model._meta.get_field('field').check(), [])
ar4s/django
tests/invalid_models_tests/test_ordinary_fields.py
Python
bsd-3-clause
31,641
""" =============================================== Create topographic ERF maps in delayed SSP mode =============================================== This script shows how to apply SSP projectors delayed, that is, at the evoked stage. This is particularly useful to support decisions related to the trade-off between denoising and preserving signal. In this example we demonstrate how to use topographic maps for delayed SSP application. """ # Authors: Denis Engemann <denis.engemann@gmail.com> # Christian Brodbeck <christianbrodbeck@nyu.edu> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) import numpy as np import mne from mne import io from mne.datasets import sample print(__doc__) data_path = sample.data_path() ############################################################################### # Set parameters raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif' event_id, tmin, tmax = 1, -0.2, 0.5 # Setup for reading the raw data raw = io.Raw(raw_fname) events = mne.read_events(event_fname) # delete EEG projections (we know it's the last one) raw.del_proj(-1) # add ECG projs for magnetometers [raw.add_proj(p) for p in mne.read_proj(ecg_fname) if 'axial' in p['desc']] # pick magnetometer channels picks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True, include=[], exclude='bads') # We will make of the proj `delayed` option to # interactively select projections at the evoked stage. # more information can be found in the example/plot_evoked_delayed_ssp.py epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12), proj='delayed') evoked = epochs.average() # average epochs and get an Evoked dataset. ############################################################################### # Interactively select / deselect the SSP projection vectors # set time instants in seconds (from 50 to 150ms in a step of 10ms) times = np.arange(0.05, 0.15, 0.01) evoked.plot_topomap(times, proj='interactive') # Hint: the same works for evoked.plot and viz.plot_topo
trachelr/mne-python
examples/visualization/plot_evoked_topomap_delayed_ssp.py
Python
bsd-3-clause
2,301
import os import CTK UPLOAD_DIR = "/tmp" def ok (filename, target_dir, target_file, params): txt = "<h1>It worked!</h1>" txt += "<pre>%s</pre>" %(os.popen("ls -l '%s'" %(os.path.join(target_dir, target_file))).read()) txt += "<p>Params: %s</p>" %(str(params)) txt += "<p>Filename: %s</p>" %(filename) return txt class default: def __init__ (self): self.page = CTK.Page () self.page += CTK.RawHTML ("<h1>Direct Upload with params</h1>") self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, {'var':'foo'}) self.page += CTK.RawHTML ("<h1>Temporal Upload without params</h1>") self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, direct=False) def __call__ (self): return self.page.Render() CTK.publish ('', default) CTK.run (port=8000)
cherokee/pyscgi
tests/test5.py
Python
bsd-3-clause
850
# The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. from sha import sha from random import randint #this is ugly, hopefully os.entropy will be in 2.4 try: from entropy import entropy except ImportError: def entropy(n): s = '' for i in range(n): s += chr(randint(0,255)) return s def intify(hstr): """20 bit hash, big-endian -> long python integer""" assert len(hstr) == 20 return long(hstr.encode('hex'), 16) def stringify(num): """long int -> 20-character string""" str = hex(num)[2:] if str[-1] == 'L': str = str[:-1] if len(str) % 2 != 0: str = '0' + str str = str.decode('hex') return (20 - len(str)) *'\x00' + str def distance(a, b): """distance between two 160-bit hashes expressed as 20-character strings""" return intify(a) ^ intify(b) def newID(): """returns a new pseudorandom globally unique ID string""" h = sha() h.update(entropy(20)) return h.digest() def newIDInRange(min, max): return stringify(randRange(min,max)) def randRange(min, max): return min + intify(newID()) % (max - min) def newTID(): return randRange(-2**30, 2**30) ### Test Cases ### import unittest class NewID(unittest.TestCase): def testLength(self): self.assertEqual(len(newID()), 20) def testHundreds(self): for x in xrange(100): self.testLength class Intify(unittest.TestCase): known = [('\0' * 20, 0), ('\xff' * 20, 2L**160 - 1), ] def testKnown(self): for str, value in self.known: self.assertEqual(intify(str), value) def testEndianessOnce(self): h = newID() while h[-1] == '\xff': h = newID() k = h[:-1] + chr(ord(h[-1]) + 1) self.assertEqual(intify(k) - intify(h), 1) def testEndianessLots(self): for x in xrange(100): self.testEndianessOnce() class Disantance(unittest.TestCase): known = [ (("\0" * 20, "\xff" * 20), 2**160L -1), ((sha("foo").digest(), sha("foo").digest()), 0), ((sha("bar").digest(), sha("bar").digest()), 0) ] def testKnown(self): for pair, dist in self.known: self.assertEqual(distance(pair[0], pair[1]), dist) def testCommutitive(self): for i in xrange(100): x, y, z = newID(), newID(), newID() self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z)) class RandRange(unittest.TestCase): def testOnce(self): a = intify(newID()) b = intify(newID()) if a < b: c = randRange(a, b) self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a)) else: c = randRange(b, a) assert b <= c < a, "output out of range %d %d %d" % (b, c, a) def testOneHundredTimes(self): for i in xrange(100): self.testOnce() if __name__ == '__main__': unittest.main()
rays/ipodderx-core
khashmir/khash.py
Python
mit
3,533
#!/usr/bin/env python """ demonstrate adding a FigureCanvasGTK3Agg widget to a Gtk.ScrolledWindow using GTK3 accessed via pygobject """ from gi.repository import Gtk from matplotlib.figure import Figure from numpy import arange, sin, pi from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas win = Gtk.Window() win.connect("delete-event", Gtk.main_quit ) win.set_default_size(400,300) win.set_title("Embedding in GTK") f = Figure(figsize=(5,4), dpi=100) a = f.add_subplot(111) t = arange(0.0,3.0,0.01) s = sin(2*pi*t) a.plot(t,s) sw = Gtk.ScrolledWindow() win.add (sw) # A scrolled window border goes outside the scrollbars and viewport sw.set_border_width (10) canvas = FigureCanvas(f) # a Gtk.DrawingArea canvas.set_size_request(800,600) sw.add_with_viewport (canvas) win.show_all() Gtk.main()
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/lib/mpl_examples/user_interfaces/embedding_in_gtk3.py
Python
mit
834
__version__ = "2.0"
sbidoul/pip
tests/data/src/simplewheel-2.0/simplewheel/__init__.py
Python
mit
20
# Simple test suite for Cookie.py from test.test_support import verify, verbose, run_doctest import Cookie import warnings warnings.filterwarnings("ignore", ".* class is insecure.*", DeprecationWarning) # Currently this only tests SimpleCookie cases = [ ('chips=ahoy; vienna=finger', {'chips':'ahoy', 'vienna':'finger'}), ('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;";', {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'}), # Check illegal cookies that have an '=' char in an unquoted value ('keebler=E=mc2;', {'keebler' : 'E=mc2'}) ] for data, dict in cases: C = Cookie.SimpleCookie() ; C.load(data) print repr(C) print str(C) items = dict.items() items.sort() for k, v in items: print ' ', k, repr( C[k].value ), repr(v) verify(C[k].value == v) print C[k] C = Cookie.SimpleCookie() C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme') verify(C['Customer'].value == 'WILE_E_COYOTE') verify(C['Customer']['version'] == '1') verify(C['Customer']['path'] == '/acme') print C.output(['path']) print C.js_output() print C.js_output(['path']) # Try cookie with quoted meta-data C = Cookie.SimpleCookie() C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"') verify(C['Customer'].value == 'WILE_E_COYOTE') verify(C['Customer']['version'] == '1') verify(C['Customer']['path'] == '/acme') print "If anything blows up after this line, it's from Cookie's doctest." run_doctest(Cookie)
trivoldus28/pulsarch-verilog
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/test/test_cookie.py
Python
gpl-2.0
1,516
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_launch author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: Launch an Ansible Job. description: - Launch an Ansible Tower jobs. See U(https://www.ansible.com/tower) for an overview. options: job_template: description: - Name of the job_template to use. required: True job_explanation: description: - Job explanation field. default: null job_type: description: - Job_type to use for the job, only used if prompt for job_type is set. choices: ["run", "check", "scan"] default: null inventory: description: - Inventory to use for the job, only used if prompt for inventory is set. default: null credential: description: - Credential to use for job, only used if prompt for credential is set. default: null extra_vars: description: - Extra_vars to use for the job_template. Use '@' for a file. default: null limit: description: - Limit to use for the job_template. default: null tags: description: - Specific tags to use for from playbook. default: null use_job_endpoint: description: - Disable launching jobs from job template. default: False extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Launch a job tower_job_launch: job_template: "My Job Template" register: job - name: Wait for job max 120s tower_job_wait: job_id: job.id timeout: 120 ''' RETURN = ''' id: description: job id of the newly launched job returned: success type: int sample: 86 status: description: status of newly launched job returned: success type: string sample: pending ''' from ansible.module_utils.basic import AnsibleModule try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings from ansible.module_utils.ansible_tower import ( tower_auth_config, tower_check_mode, tower_argument_spec, ) HAS_TOWER_CLI = True except ImportError: HAS_TOWER_CLI = False def main(): argument_spec = tower_argument_spec() argument_spec.update(dict( job_template=dict(required=True), job_type=dict(choices=['run', 'check', 'scan']), inventory=dict(), credential=dict(), limit=dict(), tags=dict(type='list'), extra_vars=dict(type='list'), )) module = AnsibleModule( argument_spec, supports_check_mode=True ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') json_output = {} tags = module.params.get('tags') tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) try: params = module.params.copy() if isinstance(tags, list): params['tags'] = ','.join(tags) job = tower_cli.get_resource('job') lookup_fields = ('job_template', 'inventory', 'credential') for field in lookup_fields: try: name = params.pop(field) result = tower_cli.get_resource(field).get(name=name) params[field] = result['id'] except exc.NotFound as excinfo: module.fail_json(msg='Unable to launch job, {0}/{1} was not found: {2}'.format(field, name, excinfo), changed=False) result = job.launch(no_input=True, **params) json_output['id'] = result['id'] json_output['status'] = result['status'] except (exc.ConnectionError, exc.BadRequest) as excinfo: module.fail_json(msg='Unable to launch job: {0}'.format(excinfo), changed=False) json_output['changed'] = result['changed'] module.exit_json(**json_output) if __name__ == '__main__': main()
HuaweiSwitch/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_launch.py
Python
gpl-3.0
4,901
"""Copyright 2008 Orbitz WorldWide Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" import calendar import hashlib import pytz from flask import request def is_pattern(s): return '*' in s or '?' in s or '[' in s or '{' in s class RequestParams(object): """Dict-like structure that allows accessing request params whatever their origin (json body, form body, request args).""" def __getitem__(self, key): if request.json and key in request.json: return request.json[key] if key in request.form: return request.form.getlist(key)[-1] if key in request.args: return request.args.getlist(key)[-1] raise KeyError def __contains__(self, key): try: self[key] return True except KeyError: return False def get(self, key, default=None): try: return self[key] except KeyError: return default def getlist(self, key): if request.json and key in request.json: value = self[key] if not isinstance(value, list): value = [value] return value if key in request.form: return request.form.getlist(key) return request.args.getlist(key) RequestParams = RequestParams() def hash_request(): keys = set() if request.json: keys.update(request.json.keys()) if request.form: keys.update(request.form.keys()) keys.update(request.args.keys()) params = u",".join([ u"{0}={1}".format(key, u"&".join(sorted(RequestParams.getlist(key)))) for key in sorted(keys) if not key.startswith('_') ]) md5 = hashlib.md5() md5.update(params.encode('utf-8')) return md5.hexdigest() def to_seconds(delta): return abs(delta.seconds + delta.days * 86400) def epoch(dt): """ Returns the epoch timestamp of a timezone-aware datetime object. """ return calendar.timegm(dt.astimezone(pytz.utc).timetuple())
absalon-james/graphite-api
graphite_api/utils.py
Python
apache-2.0
2,513
import unittest import random, sys, time, re sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): # assume we're at 0xdata with it's hdfs namenode h2o.init(java_heap_GB=14) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_parse_mnist_A_training(self): importFolderPath = "mnist" csvFilelist = [ ("mnist_training.csv.gz", 600), ("mnist_training.csv.gz", 600), ] trial = 0 allDelta = [] for (csvFilename, timeoutSecs) in csvFilelist: testKey2 = csvFilename + "_" + str(trial) + ".hex" start = time.time() parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+csvFilename, hex_key=testKey2, timeoutSecs=timeoutSecs) elapsed = time.time() - start print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\ "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) def test_parse_mnist_B_testing(self): importFolderPath = "mnist" csvFilelist = [ ("mnist_testing.csv.gz", 600), ("mnist_testing.csv.gz", 600), ] trial = 0 allDelta = [] for (csvFilename, timeoutSecs) in csvFilelist: testKey2 = csvFilename + "_" + str(trial) + ".hex" start = time.time() parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+csvFilename, hex_key=testKey2, timeoutSecs=timeoutSecs) elapsed = time.time() - start print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\ "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) if __name__ == '__main__': h2o.unit_main()
vbelakov/h2o
py/testdir_single_jvm/test_parse_mnist_fvec.py
Python
apache-2.0
2,023
# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields # TODO(berrange): Remove NovaObjectDictCompat class VirtualInterface(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'address': fields.StringField(nullable=True), 'network_id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'uuid': fields.UUIDField(), } @staticmethod def _from_db_object(context, vif, db_vif): for field in vif.fields: vif[field] = db_vif[field] vif._context = context vif.obj_reset_changes() return vif @base.remotable_classmethod def get_by_id(cls, context, vif_id): db_vif = db.virtual_interface_get(context, vif_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_uuid(cls, context, vif_uuid): db_vif = db.virtual_interface_get_by_uuid(context, vif_uuid) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_address(cls, context, address): db_vif = db.virtual_interface_get_by_address(context, address) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_instance_and_network(cls, context, instance_uuid, network_id): db_vif = db.virtual_interface_get_by_instance_and_network(context, instance_uuid, network_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() db_vif = db.virtual_interface_create(self._context, updates) self._from_db_object(self._context, self, db_vif) @base.remotable_classmethod def delete_by_instance_uuid(cls, context, instance_uuid): db.virtual_interface_delete_by_instance(context, instance_uuid) class VirtualInterfaceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('VirtualInterface'), } child_versions = { '1.0': '1.0', } @base.remotable_classmethod def get_all(cls, context): db_vifs = db.virtual_interface_get_all(context) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs)
joker946/nova
nova/objects/virtual_interface.py
Python
apache-2.0
3,823
#!/usr/bin/env python """Grover's quantum search algorithm example.""" from sympy import pprint from sympy.physics.quantum import qapply from sympy.physics.quantum.qubit import IntQubit from sympy.physics.quantum.grover import (OracleGate, superposition_basis, WGate, grover_iteration) def demo_vgate_app(v): for i in range(2**v.nqubits): print('qapply(v*IntQubit(%i, %r))' % (i, v.nqubits)) pprint(qapply(v*IntQubit(i, nqubits=v.nqubits))) qapply(v*IntQubit(i, nqubits=v.nqubits)) def black_box(qubits): return True if qubits == IntQubit(1, nqubits=qubits.nqubits) else False def main(): print() print('Demonstration of Grover\'s Algorithm') print('The OracleGate or V Gate carries the unknown function f(x)') print('> V|x> = ((-1)^f(x))|x> where f(x) = 1 when x = a (True in our case)') print('> and 0 (False in our case) otherwise') print() nqubits = 2 print('nqubits = ', nqubits) v = OracleGate(nqubits, black_box) print('Oracle or v = OracleGate(%r, black_box)' % nqubits) print() psi = superposition_basis(nqubits) print('psi:') pprint(psi) demo_vgate_app(v) print('qapply(v*psi)') pprint(qapply(v*psi)) print() w = WGate(nqubits) print('WGate or w = WGate(%r)' % nqubits) print('On a 2 Qubit system like psi, 1 iteration is enough to yield |1>') print('qapply(w*v*psi)') pprint(qapply(w*v*psi)) print() nqubits = 3 print('On a 3 Qubit system, it requires 2 iterations to achieve') print('|1> with high enough probability') psi = superposition_basis(nqubits) print('psi:') pprint(psi) v = OracleGate(nqubits, black_box) print('Oracle or v = OracleGate(%r, black_box)' % nqubits) print() print('iter1 = grover.grover_iteration(psi, v)') iter1 = qapply(grover_iteration(psi, v)) pprint(iter1) print() print('iter2 = grover.grover_iteration(iter1, v)') iter2 = qapply(grover_iteration(iter1, v)) pprint(iter2) print() if __name__ == "__main__": main()
kaushik94/sympy
examples/advanced/grover_example.py
Python
bsd-3-clause
2,081
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import subprocess import sys import six _RE_INFO_USER_EMAIL = r'Logged in as (?P<email>\S+)\.$' class AuthorizationError(Exception): pass def _RunCommand(command): try: return six.ensure_str( subprocess.check_output(['luci-auth', command], stderr=subprocess.STDOUT, universal_newlines=True)) except subprocess.CalledProcessError as exc: raise AuthorizationError(exc.output.strip()) def CheckLoggedIn(): """Check that the user is currently logged in. Otherwise sys.exit immediately with the error message from luci-auth instructing the user how to log in. """ try: GetAccessToken() except AuthorizationError as exc: sys.exit(str(exc)) def GetAccessToken(): """Get an access token to make requests on behalf of the logged in user.""" return _RunCommand('token').rstrip() def GetUserEmail(): """Get the email address of the currently logged in user.""" output = _RunCommand('info') m = re.match(_RE_INFO_USER_EMAIL, output, re.MULTILINE) assert m, 'Failed to parse luci-auth info output.' return m.group('email')
scheib/chromium
tools/perf/core/services/luci_auth.py
Python
bsd-3-clause
1,316
import sys from functools import update_wrapper from future.utils import iteritems from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.models.base import ModelBase from django.utils import six from django.views.decorators.cache import never_cache from django.template.engine import Engine import inspect if six.PY2 and sys.getdefaultencoding() == 'ascii': import imp imp.reload(sys) sys.setdefaultencoding("utf-8") class AlreadyRegistered(Exception): pass class NotRegistered(Exception): pass class MergeAdminMetaclass(type): def __new__(cls, name, bases, attrs): return type.__new__(cls, str(name), bases, attrs) class AdminSite(object): def __init__(self, name='xadmin'): self.name = name self.app_name = 'xadmin' self._registry = {} # model_class class -> admin_class class self._registry_avs = {} # admin_view_class class -> admin_class class self._registry_settings = {} # settings name -> admin_class class self._registry_views = [] # url instance contains (path, admin_view class, name) self._registry_modelviews = [] # url instance contains (path, admin_view class, name) self._registry_plugins = {} # view_class class -> plugin_class class self._admin_view_cache = {} # self.check_dependencies() self.model_admins_order = 0 def copy_registry(self): import copy return { 'models': copy.copy(self._registry), 'avs': copy.copy(self._registry_avs), 'views': copy.copy(self._registry_views), 'settings': copy.copy(self._registry_settings), 'modelviews': copy.copy(self._registry_modelviews), 'plugins': copy.copy(self._registry_plugins), } def restore_registry(self, data): self._registry = data['models'] self._registry_avs = data['avs'] self._registry_views = data['views'] self._registry_settings = data['settings'] self._registry_modelviews = data['modelviews'] self._registry_plugins = data['plugins'] def register_modelview(self, path, admin_view_class, name): from xadmin.views.base import BaseAdminView if issubclass(admin_view_class, BaseAdminView): self._registry_modelviews.append((path, admin_view_class, name)) else: raise ImproperlyConfigured(u'The registered view class %s isn\'t subclass of %s' % (admin_view_class.__name__, BaseAdminView.__name__)) def register_view(self, path, admin_view_class, name): self._registry_views.append((path, admin_view_class, name)) def register_plugin(self, plugin_class, admin_view_class): from xadmin.views.base import BaseAdminPlugin if issubclass(plugin_class, BaseAdminPlugin): self._registry_plugins.setdefault( admin_view_class, []).append(plugin_class) else: raise ImproperlyConfigured(u'The registered plugin class %s isn\'t subclass of %s' % (plugin_class.__name__, BaseAdminPlugin.__name__)) def register_settings(self, name, admin_class): self._registry_settings[name.lower()] = admin_class def register(self, model_or_iterable, admin_class=object, **options): from xadmin.views.base import BaseAdminView if isinstance(model_or_iterable, ModelBase) or issubclass(model_or_iterable, BaseAdminView): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if isinstance(model, ModelBase): if model._meta.abstract: raise ImproperlyConfigured('The model %s is abstract, so it ' 'cannot be registered with admin.' % model.__name__) if model in self._registry: raise AlreadyRegistered( 'The model %s is already registered' % model.__name__) # If we got **options then dynamically construct a subclass of # admin_class with those **options. if options: # For reasons I don't quite understand, without a __module__ # the created class appears to "live" in the wrong place, # which causes issues later on. options['__module__'] = __name__ admin_class = type(str("%s%sAdmin" % (model._meta.app_label, model._meta.model_name)), (admin_class,), options or {}) admin_class.model = model admin_class.order = self.model_admins_order self.model_admins_order += 1 self._registry[model] = admin_class else: if model in self._registry_avs: raise AlreadyRegistered('The admin_view_class %s is already registered' % model.__name__) if options: options['__module__'] = __name__ admin_class = type(str( "%sAdmin" % model.__name__), (admin_class,), options) # Instantiate the admin class to save in the registry self._registry_avs[model] = admin_class def unregister(self, model_or_iterable): """ Unregisters the given model(s). If a model isn't already registered, this will raise NotRegistered. """ from xadmin.views.base import BaseAdminView if isinstance(model_or_iterable, (ModelBase, BaseAdminView)): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if isinstance(model, ModelBase): if model not in self._registry: raise NotRegistered( 'The model %s is not registered' % model.__name__) del self._registry[model] else: if model not in self._registry_avs: raise NotRegistered('The admin_view_class %s is not registered' % model.__name__) del self._registry_avs[model] def set_loginview(self, login_view): self.login_view = login_view def has_permission(self, request): """ Returns True if the given HttpRequest has permission to view *at least one* page in the admin site. """ return request.user.is_active and request.user.is_staff def check_dependencies(self): """ Check that all things needed to run the admin have been correctly installed. The default implementation checks that LogEntry, ContentType and the auth context processor are installed. """ from django.contrib.contenttypes.models import ContentType if not ContentType._meta.installed: raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in " "your INSTALLED_APPS setting in order to use the admin application.") default_template_engine = Engine.get_default() if not ('django.contrib.auth.context_processors.auth' in default_template_engine.context_processors or 'django.core.context_processors.auth' in default_template_engine.context_processors): raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' " "in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.") def admin_view(self, view, cacheable=False): """ Decorator to create an admin view attached to this ``AdminSite``. This wraps the view and provides permission checking by calling ``self.has_permission``. You'll want to use this from within ``AdminSite.get_urls()``: class MyAdminSite(AdminSite): def get_urls(self): from django.conf.urls import url urls = super(MyAdminSite, self).get_urls() urls += [ url(r'^my_view/$', self.admin_view(some_view)) ] return urls By default, admin_views are marked non-cacheable using the ``never_cache`` decorator. If the view can be safely cached, set cacheable=True. """ def inner(request, *args, **kwargs): if not self.has_permission(request) and getattr(view, 'need_site_permission', True): return self.create_admin_view(self.login_view)(request, *args, **kwargs) return view(request, *args, **kwargs) if not cacheable: inner = never_cache(inner) return update_wrapper(inner, view) def _get_merge_attrs(self, option_class, plugin_class): return dict([(name, getattr(option_class, name)) for name in dir(option_class) if name[0] != '_' and not callable(getattr(option_class, name)) and hasattr(plugin_class, name)]) def _get_settings_class(self, admin_view_class): name = admin_view_class.__name__.lower() if name in self._registry_settings: return self._registry_settings[name] elif name.endswith('admin') and name[0:-5] in self._registry_settings: return self._registry_settings[name[0:-5]] elif name.endswith('adminview') and name[0:-9] in self._registry_settings: return self._registry_settings[name[0:-9]] return None def _create_plugin(self, option_classes): def merge_class(plugin_class): if option_classes: attrs = {} bases = [plugin_class] for oc in option_classes: attrs.update(self._get_merge_attrs(oc, plugin_class)) meta_class = getattr(oc, plugin_class.__name__, getattr(oc, plugin_class.__name__.replace('Plugin', ''), None)) if meta_class: bases.insert(0, meta_class) if attrs: plugin_class = MergeAdminMetaclass( '%s%s' % (''.join([oc.__name__ for oc in option_classes]), plugin_class.__name__), tuple(bases), attrs) return plugin_class return merge_class def get_plugins(self, admin_view_class, *option_classes): from xadmin.views import BaseAdminView plugins = [] opts = [oc for oc in option_classes if oc] for klass in admin_view_class.mro(): if klass == BaseAdminView or issubclass(klass, BaseAdminView): merge_opts = [] reg_class = self._registry_avs.get(klass) if reg_class: merge_opts.append(reg_class) settings_class = self._get_settings_class(klass) if settings_class: merge_opts.append(settings_class) merge_opts.extend(opts) ps = self._registry_plugins.get(klass, []) plugins.extend(map(self._create_plugin( merge_opts), ps) if merge_opts else ps) return plugins def get_view_class(self, view_class, option_class=None, **opts): merges = [option_class] if option_class else [] for klass in view_class.mro(): reg_class = self._registry_avs.get(klass) if reg_class: merges.append(reg_class) settings_class = self._get_settings_class(klass) if settings_class: merges.append(settings_class) merges.append(klass) new_class_name = ''.join([c.__name__ for c in merges]) if new_class_name not in self._admin_view_cache: plugins = self.get_plugins(view_class, option_class) self._admin_view_cache[new_class_name] = MergeAdminMetaclass( new_class_name, tuple(merges), dict({'plugin_classes': plugins, 'admin_site': self}, **opts)) return self._admin_view_cache[new_class_name] def create_admin_view(self, admin_view_class): return self.get_view_class(admin_view_class).as_view() def create_model_admin_view(self, admin_view_class, model, option_class): return self.get_view_class(admin_view_class, option_class).as_view() def get_urls(self): from django.conf.urls import url, include from xadmin.views.base import BaseAdminView if settings.DEBUG: self.check_dependencies() def wrap(view, cacheable=False): def wrapper(*args, **kwargs): return self.admin_view(view, cacheable)(*args, **kwargs) return update_wrapper(wrapper, view) # Admin-site-wide views. urlpatterns = [ url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n') ] # Registed admin views # inspect[isclass]: Only checks if the object is a class. With it lets you create an custom view that # inherits from multiple views and have more of a metaclass. urlpatterns += [ url( path, wrap(self.create_admin_view(clz_or_func)) if inspect.isclass(clz_or_func) and issubclass(clz_or_func, BaseAdminView) else include(clz_or_func(self)), name=name ) for path, clz_or_func, name in self._registry_views ] # Add in each model's views. for model, admin_class in iteritems(self._registry): view_urls = [ url( path, wrap(self.create_model_admin_view(clz, model, admin_class)), name=name % (model._meta.app_label, model._meta.model_name) ) for path, clz, name in self._registry_modelviews ] urlpatterns += [ url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(view_urls)) ] return urlpatterns @property def urls(self): return self.get_urls(), self.name, self.app_name def i18n_javascript(self, request): """ Displays the i18n JavaScript that the Django admin requires. This takes into account the USE_I18N setting. If it's set to False, the generated JavaScript will be leaner and faster. """ if settings.USE_I18N: from django.views.i18n import javascript_catalog else: from django.views.i18n import null_javascript_catalog as javascript_catalog return javascript_catalog(request, packages=['django.conf', 'xadmin']) # This global object represents the default admin site, for the common case. # You can instantiate AdminSite in your own code to create a custom admin site. site = AdminSite() def register(models, **kwargs): def _model_admin_wrapper(admin_class): site.register(models, admin_class) return _model_admin_wrapper
sshwsfc/django-xadmin
xadmin/sites.py
Python
bsd-3-clause
15,155
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # png.py - PNG encoder in pure Python # Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org> # <ah> Modifications for pyglet by Alex Holkner <alex.holkner@gmail.com> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Contributors (alphabetical): # Nicko van Someren <nicko@nicko.org> # # Changelog (recent first): # 2006-06-17 Nicko: Reworked into a class, faster interlacing. # 2006-06-17 Johann: Very simple prototype PNG decoder. # 2006-06-17 Nicko: Test suite with various image generators. # 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support. # 2006-06-15 Johann: Scanline iterator interface for large input files. # 2006-06-09 Johann: Very simple prototype PNG encoder. """ Pure Python PNG Reader/Writer This is an implementation of a subset of the PNG specification at http://www.w3.org/TR/2003/REC-PNG-20031110 in pure Python. It reads and writes PNG files with 8/16/24/32/48/64 bits per pixel (greyscale, RGB, RGBA, with 8 or 16 bits per layer), with a number of options. For help, type "import png; help(png)" in your python interpreter. This file can also be used as a command-line utility to convert PNM files to PNG. The interface is similar to that of the pnmtopng program from the netpbm package. Type "python png.py --help" at the shell prompt for usage and a list of options. """ __revision__ = '$Rev$' __date__ = '$Date$' __author__ = '$Author$' import sys import zlib import struct import math from array import array from pyglet.compat import asbytes _adam7 = ((0, 0, 8, 8), (4, 0, 8, 8), (0, 4, 4, 8), (2, 0, 4, 4), (0, 2, 2, 4), (1, 0, 2, 2), (0, 1, 1, 2)) def interleave_planes(ipixels, apixels, ipsize, apsize): """ Interleave color planes, e.g. RGB + A = RGBA. Return an array of pixels consisting of the ipsize bytes of data from each pixel in ipixels followed by the apsize bytes of data from each pixel in apixels, for an image of size width x height. """ itotal = len(ipixels) atotal = len(apixels) newtotal = itotal + atotal newpsize = ipsize + apsize # Set up the output buffer out = array('B') # It's annoying that there is no cheap way to set the array size :-( out.extend(ipixels) out.extend(apixels) # Interleave in the pixel data for i in range(ipsize): out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize] for i in range(apsize): out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize] return out class Error(Exception): pass class Writer: """ PNG encoder in pure Python. """ def __init__(self, width, height, transparent=None, background=None, gamma=None, greyscale=False, has_alpha=False, bytes_per_sample=1, compression=None, interlaced=False, chunk_limit=2**20): """ Create a PNG encoder object. Arguments: width, height - size of the image in pixels transparent - create a tRNS chunk background - create a bKGD chunk gamma - create a gAMA chunk greyscale - input data is greyscale, not RGB has_alpha - input data has alpha channel (RGBA) bytes_per_sample - 8-bit or 16-bit input data compression - zlib compression level (1-9) chunk_limit - write multiple IDAT chunks to save memory If specified, the transparent and background parameters must be a tuple with three integer values for red, green, blue, or a simple integer (or singleton tuple) for a greyscale image. If specified, the gamma parameter must be a float value. """ if width <= 0 or height <= 0: raise ValueError("width and height must be greater than zero") if has_alpha and transparent is not None: raise ValueError( "transparent color not allowed with alpha channel") if bytes_per_sample < 1 or bytes_per_sample > 2: raise ValueError("bytes per sample must be 1 or 2") if transparent is not None: if greyscale: if type(transparent) is not int: raise ValueError( "transparent color for greyscale must be integer") else: if not (len(transparent) == 3 and type(transparent[0]) is int and type(transparent[1]) is int and type(transparent[2]) is int): raise ValueError( "transparent color must be a triple of integers") if background is not None: if greyscale: if type(background) is not int: raise ValueError( "background color for greyscale must be integer") else: if not (len(background) == 3 and type(background[0]) is int and type(background[1]) is int and type(background[2]) is int): raise ValueError( "background color must be a triple of integers") self.width = width self.height = height self.transparent = transparent self.background = background self.gamma = gamma self.greyscale = greyscale self.has_alpha = has_alpha self.bytes_per_sample = bytes_per_sample self.compression = compression self.chunk_limit = chunk_limit self.interlaced = interlaced if self.greyscale: self.color_depth = 1 if self.has_alpha: self.color_type = 4 self.psize = self.bytes_per_sample * 2 else: self.color_type = 0 self.psize = self.bytes_per_sample else: self.color_depth = 3 if self.has_alpha: self.color_type = 6 self.psize = self.bytes_per_sample * 4 else: self.color_type = 2 self.psize = self.bytes_per_sample * 3 def write_chunk(self, outfile, tag, data): """ Write a PNG chunk to the output file, including length and checksum. """ # http://www.w3.org/TR/PNG/#5Chunk-layout outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) # <ah> Avoid DeprecationWarning: struct integer overflow masking # with Python2.5/Windows. checksum = checksum & 0xffffffff outfile.write(struct.pack("!I", checksum)) def write(self, outfile, scanlines): """ Write a PNG image to the output file. """ # http://www.w3.org/TR/PNG/#5PNG-file-signature outfile.write(struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)) # http://www.w3.org/TR/PNG/#11IHDR if self.interlaced: interlaced = 1 else: interlaced = 0 self.write_chunk(outfile, 'IHDR', struct.pack("!2I5B", self.width, self.height, self.bytes_per_sample * 8, self.color_type, 0, 0, interlaced)) # http://www.w3.org/TR/PNG/#11tRNS if self.transparent is not None: if self.greyscale: self.write_chunk(outfile, 'tRNS', struct.pack("!1H", *self.transparent)) else: self.write_chunk(outfile, 'tRNS', struct.pack("!3H", *self.transparent)) # http://www.w3.org/TR/PNG/#11bKGD if self.background is not None: if self.greyscale: self.write_chunk(outfile, 'bKGD', struct.pack("!1H", *self.background)) else: self.write_chunk(outfile, 'bKGD', struct.pack("!3H", *self.background)) # http://www.w3.org/TR/PNG/#11gAMA if self.gamma is not None: self.write_chunk(outfile, 'gAMA', struct.pack("!L", int(self.gamma * 100000))) # http://www.w3.org/TR/PNG/#11IDAT if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() data = array('B') for scanline in scanlines: data.append(0) data.extend(scanline) if len(data) > self.chunk_limit: compressed = compressor.compress(data.tostring()) if len(compressed): # print >> sys.stderr, len(data), len(compressed) self.write_chunk(outfile, 'IDAT', compressed) data = array('B') if len(data): compressed = compressor.compress(data.tostring()) else: compressed = '' flushed = compressor.flush() if len(compressed) or len(flushed): # print >> sys.stderr, len(data), len(compressed), len(flushed) self.write_chunk(outfile, 'IDAT', compressed + flushed) # http://www.w3.org/TR/PNG/#11IEND self.write_chunk(outfile, 'IEND', '') def write_array(self, outfile, pixels): """ Encode a pixel array to PNG and write output file. """ if self.interlaced: self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.array_scanlines(pixels)) def convert_ppm(self, ppmfile, outfile): """ Convert a PPM file containing raw pixel data into a PNG file with the parameters set in the writer object. """ if self.interlaced: pixels = array('B') pixels.fromfile(ppmfile, self.bytes_per_sample * self.color_depth * self.width * self.height) self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.file_scanlines(ppmfile)) def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile): """ Convert a PPM and PGM file containing raw pixel data into a PNG outfile with the parameters set in the writer object. """ pixels = array('B') pixels.fromfile(ppmfile, self.bytes_per_sample * self.color_depth * self.width * self.height) apixels = array('B') apixels.fromfile(pgmfile, self.bytes_per_sample * self.width * self.height) pixels = interleave_planes(pixels, apixels, self.bytes_per_sample * self.color_depth, self.bytes_per_sample) if self.interlaced: self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.array_scanlines(pixels)) def file_scanlines(self, infile): """ Generator for scanlines from an input file. """ row_bytes = self.psize * self.width for y in range(self.height): scanline = array('B') scanline.fromfile(infile, row_bytes) yield scanline def array_scanlines(self, pixels): """ Generator for scanlines from an array. """ row_bytes = self.width * self.psize stop = 0 for y in range(self.height): start = stop stop = start + row_bytes yield pixels[start:stop] def old_array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. http://www.w3.org/TR/PNG/#8InterlaceMethods """ row_bytes = self.psize * self.width for xstart, ystart, xstep, ystep in _adam7: for y in range(ystart, self.height, ystep): if xstart < self.width: if xstep == 1: offset = y*row_bytes yield pixels[offset:offset+row_bytes] else: row = array('B') offset = y*row_bytes + xstart* self.psize skip = self.psize * xstep for x in range(xstart, self.width, xstep): row.extend(pixels[offset:offset + self.psize]) offset += skip yield row def array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. http://www.w3.org/TR/PNG/#8InterlaceMethods """ row_bytes = self.psize * self.width for xstart, ystart, xstep, ystep in _adam7: for y in range(ystart, self.height, ystep): if xstart >= self.width: continue if xstep == 1: offset = y * row_bytes yield pixels[offset:offset+row_bytes] else: row = array('B') # Note we want the ceiling of (self.width - xstart) / xtep row_len = self.psize * ( (self.width - xstart + xstep - 1) / xstep) # There's no easier way to set the length of an array row.extend(pixels[0:row_len]) offset = y * row_bytes + xstart * self.psize end_offset = (y+1) * row_bytes skip = self.psize * xstep for i in range(self.psize): row[i:row_len:self.psize] = \ pixels[offset+i:end_offset:skip] yield row class _readable: """ A simple file-like interface for strings and arrays. """ def __init__(self, buf): self.buf = buf self.offset = 0 def read(self, n): r = self.buf[offset:offset+n] if isinstance(r, array): r = r.tostring() self.offset += n return r class Reader: """ PNG decoder in pure Python. """ def __init__(self, _guess=None, **kw): """ Create a PNG decoder object. The constructor expects exactly one keyword argument. If you supply a positional argument instead, it will guess the input type. You can choose among the following arguments: filename - name of PNG input file file - object with a read() method pixels - array or string with PNG data """ if ((_guess is not None and len(kw) != 0) or (_guess is None and len(kw) != 1)): raise TypeError("Reader() takes exactly 1 argument") if _guess is not None: if isinstance(_guess, array): kw["pixels"] = _guess elif isinstance(_guess, str): kw["filename"] = _guess elif isinstance(_guess, file): kw["file"] = _guess if "filename" in kw: self.file = file(kw["filename"]) elif "file" in kw: self.file = kw["file"] elif "pixels" in kw: self.file = _readable(kw["pixels"]) else: raise TypeError("expecting filename, file or pixels array") def read_chunk(self): """ Read a PNG chunk from the input file, return tag name and data. """ # http://www.w3.org/TR/PNG/#5Chunk-layout try: data_bytes, tag = struct.unpack('!I4s', self.file.read(8)) except struct.error: raise ValueError('Chunk too short for header') data = self.file.read(data_bytes) if len(data) != data_bytes: raise ValueError('Chunk %s too short for required %i data octets' % (tag, data_bytes)) checksum = self.file.read(4) if len(checksum) != 4: raise ValueError('Chunk %s too short for checksum', tag) verify = zlib.crc32(tag) verify = zlib.crc32(data, verify) # Whether the output from zlib.crc32 is signed or not varies # according to hideous implementation details, see # http://bugs.python.org/issue1202 . # We coerce it to be positive here (in a way which works on # Python 2.3 and older). verify &= 2**32 - 1 verify = struct.pack('!I', verify) if checksum != verify: # print repr(checksum) (a,) = struct.unpack('!I', checksum) (b,) = struct.unpack('!I', verify) raise ValueError("Checksum error in %s chunk: 0x%X != 0x%X" % (tag, a, b)) return tag, data def _reconstruct_sub(self, offset, xstep, ystep): """ Reverse sub filter. """ pixels = self.pixels a_offset = offset offset += self.psize * xstep if xstep == 1: for index in range(self.psize, self.row_bytes): x = pixels[offset] a = pixels[a_offset] pixels[offset] = (x + a) & 0xff offset += 1 a_offset += 1 else: byte_step = self.psize * xstep for index in range(byte_step, self.row_bytes, byte_step): for i in range(self.psize): x = pixels[offset + i] a = pixels[a_offset + i] pixels[offset + i] = (x + a) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep def _reconstruct_up(self, offset, xstep, ystep): """ Reverse up filter. """ pixels = self.pixels b_offset = offset - (self.row_bytes * ystep) if xstep == 1: for index in range(self.row_bytes): x = pixels[offset] b = pixels[b_offset] pixels[offset] = (x + b) & 0xff offset += 1 b_offset += 1 else: for index in range(0, self.row_bytes, xstep * self.psize): for i in range(self.psize): x = pixels[offset + i] b = pixels[b_offset + i] pixels[offset + i] = (x + b) & 0xff offset += self.psize * xstep b_offset += self.psize * xstep def _reconstruct_average(self, offset, xstep, ystep): """ Reverse average filter. """ pixels = self.pixels a_offset = offset - (self.psize * xstep) b_offset = offset - (self.row_bytes * ystep) if xstep == 1: for index in range(self.row_bytes): x = pixels[offset] if index < self.psize: a = 0 else: a = pixels[a_offset] if b_offset < 0: b = 0 else: b = pixels[b_offset] pixels[offset] = (x + ((a + b) >> 1)) & 0xff offset += 1 a_offset += 1 b_offset += 1 else: for index in range(0, self.row_bytes, self.psize * xstep): for i in range(self.psize): x = pixels[offset+i] if index < self.psize: a = 0 else: a = pixels[a_offset + i] if b_offset < 0: b = 0 else: b = pixels[b_offset + i] pixels[offset + i] = (x + ((a + b) >> 1)) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep b_offset += self.psize * xstep def _reconstruct_paeth(self, offset, xstep, ystep): """ Reverse Paeth filter. """ pixels = self.pixels a_offset = offset - (self.psize * xstep) b_offset = offset - (self.row_bytes * ystep) c_offset = b_offset - (self.psize * xstep) # There's enough inside this loop that it's probably not worth # optimising for xstep == 1 for index in range(0, self.row_bytes, self.psize * xstep): for i in range(self.psize): x = pixels[offset+i] if index < self.psize: a = c = 0 b = pixels[b_offset+i] else: a = pixels[a_offset+i] b = pixels[b_offset+i] c = pixels[c_offset+i] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: pr = a elif pb <= pc: pr = b else: pr = c pixels[offset+i] = (x + pr) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep b_offset += self.psize * xstep c_offset += self.psize * xstep # N.B. PNG files with 'up', 'average' or 'paeth' filters on the # first line of a pass are legal. The code above for 'average' # deals with this case explicitly. For up we map to the null # filter and for paeth we map to the sub filter. def reconstruct_line(self, filter_type, first_line, offset, xstep, ystep): # print >> sys.stderr, "Filter type %s, first_line=%s" % ( # filter_type, first_line) filter_type += (first_line << 8) if filter_type == 1 or filter_type == 0x101 or filter_type == 0x104: self._reconstruct_sub(offset, xstep, ystep) elif filter_type == 2: self._reconstruct_up(offset, xstep, ystep) elif filter_type == 3 or filter_type == 0x103: self._reconstruct_average(offset, xstep, ystep) elif filter_type == 4: self._reconstruct_paeth(offset, xstep, ystep) return def deinterlace(self, scanlines): # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," + # " bpp=%s") % (self.width, self.height, self.planes, self.bps) a = array('B') self.pixels = a # Make the array big enough temp = scanlines[0:self.width*self.height*self.psize] a.extend(temp) source_offset = 0 for xstart, ystart, xstep, ystep in _adam7: # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % ( # xstart, ystart, xstep, ystep) filter_first_line = 1 for y in range(ystart, self.height, ystep): if xstart >= self.width: continue filter_type = scanlines[source_offset] source_offset += 1 if xstep == 1: offset = y * self.row_bytes a[offset:offset+self.row_bytes] = \ scanlines[source_offset:source_offset + self.row_bytes] source_offset += self.row_bytes else: # Note we want the ceiling of (width - xstart) / xtep row_len = self.psize * ( (self.width - xstart + xstep - 1) / xstep) offset = y * self.row_bytes + xstart * self.psize end_offset = (y+1) * self.row_bytes skip = self.psize * xstep for i in range(self.psize): a[offset+i:end_offset:skip] = \ scanlines[source_offset + i: source_offset + row_len: self.psize] source_offset += row_len if filter_type: self.reconstruct_line(filter_type, filter_first_line, offset, xstep, ystep) filter_first_line = 0 return a def read_flat(self, scanlines): a = array('B') self.pixels = a offset = 0 source_offset = 0 filter_first_line = 1 for y in range(self.height): filter_type = scanlines[source_offset] source_offset += 1 a.extend(scanlines[source_offset: source_offset + self.row_bytes]) if filter_type: self.reconstruct_line(filter_type, filter_first_line, offset, 1, 1) filter_first_line = 0 offset += self.row_bytes source_offset += self.row_bytes return a def read(self): """ Read a simple PNG file, return width, height, pixels and image metadata This function is a very early prototype with limited flexibility and excessive use of memory. """ signature = self.file.read(8) if (signature != struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)): raise Error("PNG file has invalid header") compressed = [] image_metadata = {} while True: try: tag, data = self.read_chunk() except ValueError, e: raise Error('Chunk error: ' + e.args[0]) # print >> sys.stderr, tag, len(data) if tag == asbytes('IHDR'): # http://www.w3.org/TR/PNG/#11IHDR (width, height, bits_per_sample, color_type, compression_method, filter_method, interlaced) = struct.unpack("!2I5B", data) bps = bits_per_sample // 8 if bps == 0: raise Error("unsupported pixel depth") if bps > 2 or bits_per_sample != (bps * 8): raise Error("invalid pixel depth") if color_type == 0: greyscale = True has_alpha = False planes = 1 elif color_type == 2: greyscale = False has_alpha = False planes = 3 elif color_type == 4: greyscale = True has_alpha = True planes = 2 elif color_type == 6: greyscale = False has_alpha = True planes = 4 else: raise Error("unknown PNG colour type %s" % color_type) if compression_method != 0: raise Error("unknown compression method") if filter_method != 0: raise Error("unknown filter method") self.bps = bps self.planes = planes self.psize = bps * planes self.width = width self.height = height self.row_bytes = width * self.psize elif tag == asbytes('IDAT'): # http://www.w3.org/TR/PNG/#11IDAT compressed.append(data) elif tag == asbytes('bKGD'): if greyscale: image_metadata["background"] = struct.unpack("!1H", data) else: image_metadata["background"] = struct.unpack("!3H", data) elif tag == asbytes('tRNS'): if greyscale: image_metadata["transparent"] = struct.unpack("!1H", data) else: image_metadata["transparent"] = struct.unpack("!3H", data) elif tag == asbytes('gAMA'): image_metadata["gamma"] = ( struct.unpack("!L", data)[0]) / 100000.0 elif tag == asbytes('IEND'): # http://www.w3.org/TR/PNG/#11IEND break scanlines = array('B', zlib.decompress(asbytes('').join(compressed))) if interlaced: pixels = self.deinterlace(scanlines) else: pixels = self.read_flat(scanlines) image_metadata["greyscale"] = greyscale image_metadata["has_alpha"] = has_alpha image_metadata["bytes_per_sample"] = bps image_metadata["interlaced"] = interlaced return width, height, pixels, image_metadata def test_suite(options): """ Run regression test and write PNG file to stdout. """ # Below is a big stack of test image generators def test_gradient_horizontal_lr(x, y): return x def test_gradient_horizontal_rl(x, y): return 1-x def test_gradient_vertical_tb(x, y): return y def test_gradient_vertical_bt(x, y): return 1-y def test_radial_tl(x, y): return max(1-math.sqrt(x*x+y*y), 0.0) def test_radial_center(x, y): return test_radial_tl(x-0.5, y-0.5) def test_radial_tr(x, y): return test_radial_tl(1-x, y) def test_radial_bl(x, y): return test_radial_tl(x, 1-y) def test_radial_br(x, y): return test_radial_tl(1-x, 1-y) def test_stripe(x, n): return 1.0*(int(x*n) & 1) def test_stripe_h_2(x, y): return test_stripe(x, 2) def test_stripe_h_4(x, y): return test_stripe(x, 4) def test_stripe_h_10(x, y): return test_stripe(x, 10) def test_stripe_v_2(x, y): return test_stripe(y, 2) def test_stripe_v_4(x, y): return test_stripe(y, 4) def test_stripe_v_10(x, y): return test_stripe(y, 10) def test_stripe_lr_10(x, y): return test_stripe(x+y, 10) def test_stripe_rl_10(x, y): return test_stripe(x-y, 10) def test_checker(x, y, n): return 1.0*((int(x*n) & 1) ^ (int(y*n) & 1)) def test_checker_8(x, y): return test_checker(x, y, 8) def test_checker_15(x, y): return test_checker(x, y, 15) def test_zero(x, y): return 0 def test_one(x, y): return 1 test_patterns = { "GLR": test_gradient_horizontal_lr, "GRL": test_gradient_horizontal_rl, "GTB": test_gradient_vertical_tb, "GBT": test_gradient_vertical_bt, "RTL": test_radial_tl, "RTR": test_radial_tr, "RBL": test_radial_bl, "RBR": test_radial_br, "RCTR": test_radial_center, "HS2": test_stripe_h_2, "HS4": test_stripe_h_4, "HS10": test_stripe_h_10, "VS2": test_stripe_v_2, "VS4": test_stripe_v_4, "VS10": test_stripe_v_10, "LRS": test_stripe_lr_10, "RLS": test_stripe_rl_10, "CK8": test_checker_8, "CK15": test_checker_15, "ZERO": test_zero, "ONE": test_one, } def test_pattern(width, height, depth, pattern): a = array('B') fw = float(width) fh = float(height) pfun = test_patterns[pattern] if depth == 1: for y in range(height): for x in range(width): a.append(int(pfun(float(x)/fw, float(y)/fh) * 255)) elif depth == 2: for y in range(height): for x in range(width): v = int(pfun(float(x)/fw, float(y)/fh) * 65535) a.append(v >> 8) a.append(v & 0xff) return a def test_rgba(size=256, depth=1, red="GTB", green="GLR", blue="RTL", alpha=None): r = test_pattern(size, size, depth, red) g = test_pattern(size, size, depth, green) b = test_pattern(size, size, depth, blue) if alpha: a = test_pattern(size, size, depth, alpha) i = interleave_planes(r, g, depth, depth) i = interleave_planes(i, b, 2 * depth, depth) if alpha: i = interleave_planes(i, a, 3 * depth, depth) return i # The body of test_suite() size = 256 if options.test_size: size = options.test_size depth = 1 if options.test_deep: depth = 2 kwargs = {} if options.test_red: kwargs["red"] = options.test_red if options.test_green: kwargs["green"] = options.test_green if options.test_blue: kwargs["blue"] = options.test_blue if options.test_alpha: kwargs["alpha"] = options.test_alpha pixels = test_rgba(size, depth, **kwargs) writer = Writer(size, size, bytes_per_sample=depth, transparent=options.transparent, background=options.background, gamma=options.gamma, has_alpha=options.test_alpha, compression=options.compression, interlaced=options.interlace) writer.write_array(sys.stdout, pixels) def read_pnm_header(infile, supported='P6'): """ Read a PNM header, return width and height of the image in pixels. """ header = [] while len(header) < 4: line = infile.readline() sharp = line.find('#') if sharp > -1: line = line[:sharp] header.extend(line.split()) if len(header) == 3 and header[0] == 'P4': break # PBM doesn't have maxval if header[0] not in supported: raise NotImplementedError('file format %s not supported' % header[0]) if header[0] != 'P4' and header[3] != '255': raise NotImplementedError('maxval %s not supported' % header[3]) return int(header[1]), int(header[2]) def color_triple(color): """ Convert a command line color value to a RGB triple of integers. FIXME: Somewhere we need support for greyscale backgrounds etc. """ if color.startswith('#') and len(color) == 4: return (int(color[1], 16), int(color[2], 16), int(color[3], 16)) if color.startswith('#') and len(color) == 7: return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)) elif color.startswith('#') and len(color) == 13: return (int(color[1:5], 16), int(color[5:9], 16), int(color[9:13], 16)) def _main(): """ Run the PNG encoder with options from the command line. """ # Parse command line arguments from optparse import OptionParser version = '%prog ' + __revision__.strip('$').replace('Rev: ', 'r') parser = OptionParser(version=version) parser.set_usage("%prog [options] [pnmfile]") parser.add_option("-i", "--interlace", default=False, action="store_true", help="create an interlaced PNG file (Adam7)") parser.add_option("-t", "--transparent", action="store", type="string", metavar="color", help="mark the specified color as transparent") parser.add_option("-b", "--background", action="store", type="string", metavar="color", help="save the specified background color") parser.add_option("-a", "--alpha", action="store", type="string", metavar="pgmfile", help="alpha channel transparency (RGBA)") parser.add_option("-g", "--gamma", action="store", type="float", metavar="value", help="save the specified gamma value") parser.add_option("-c", "--compression", action="store", type="int", metavar="level", help="zlib compression level (0-9)") parser.add_option("-T", "--test", default=False, action="store_true", help="create a test image") parser.add_option("-R", "--test-red", action="store", type="string", metavar="pattern", help="test pattern for the red image layer") parser.add_option("-G", "--test-green", action="store", type="string", metavar="pattern", help="test pattern for the green image layer") parser.add_option("-B", "--test-blue", action="store", type="string", metavar="pattern", help="test pattern for the blue image layer") parser.add_option("-A", "--test-alpha", action="store", type="string", metavar="pattern", help="test pattern for the alpha image layer") parser.add_option("-D", "--test-deep", default=False, action="store_true", help="use test patterns with 16 bits per layer") parser.add_option("-S", "--test-size", action="store", type="int", metavar="size", help="width and height of the test image") (options, args) = parser.parse_args() # Convert options if options.transparent is not None: options.transparent = color_triple(options.transparent) if options.background is not None: options.background = color_triple(options.background) # Run regression tests if options.test: return test_suite(options) # Prepare input and output files if len(args) == 0: ppmfilename = '-' ppmfile = sys.stdin elif len(args) == 1: ppmfilename = args[0] ppmfile = open(ppmfilename, 'rb') else: parser.error("more than one input file") outfile = sys.stdout # Encode PNM to PNG width, height = read_pnm_header(ppmfile) writer = Writer(width, height, transparent=options.transparent, background=options.background, has_alpha=options.alpha is not None, gamma=options.gamma, compression=options.compression) if options.alpha is not None: pgmfile = open(options.alpha, 'rb') awidth, aheight = read_pnm_header(pgmfile, 'P5') if (awidth, aheight) != (width, height): raise ValueError("alpha channel image size mismatch" + " (%s has %sx%s but %s has %sx%s)" % (ppmfilename, width, height, options.alpha, awidth, aheight)) writer.convert_ppm_and_pgm(ppmfile, pgmfile, outfile, interlace=options.interlace) else: writer.convert_ppm(ppmfile, outfile, interlace=options.interlace) if __name__ == '__main__': _main()
mpasternak/pyglet-fix-issue-552
pyglet/image/codecs/pypng.py
Python
bsd-3-clause
41,571
"""Functions used by least-squares algorithms.""" from math import copysign import numpy as np from numpy.linalg import norm from scipy.linalg import cho_factor, cho_solve, LinAlgError from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator, aslinearoperator EPS = np.finfo(float).eps # Functions related to a trust-region problem. def intersect_trust_region(x, s, Delta): """Find the intersection of a line with the boundary of a trust region. This function solves the quadratic equation with respect to t ||(x + s*t)||**2 = Delta**2. Returns ------- t_neg, t_pos : tuple of float Negative and positive roots. Raises ------ ValueError If `s` is zero or `x` is not within the trust region. """ a = np.dot(s, s) if a == 0: raise ValueError("`s` is zero.") b = np.dot(x, s) c = np.dot(x, x) - Delta**2 if c > 0: raise ValueError("`x` is not within the trust region.") d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. # Computations below avoid loss of significance, see "Numerical Recipes". q = -(b + copysign(d, b)) t1 = q / a t2 = c / q if t1 < t2: return t1, t2 else: return t2, t1 def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, rtol=0.01, max_iter=10): """Solve a trust-region problem arising in least-squares minimization. This function implements a method described by J. J. More [1]_ and used in MINPACK, but it relies on a single SVD of Jacobian instead of series of Cholesky decompositions. Before running this function, compute: ``U, s, VT = svd(J, full_matrices=False)``. Parameters ---------- n : int Number of variables. m : int Number of residuals. uf : ndarray Computed as U.T.dot(f). s : ndarray Singular values of J. V : ndarray Transpose of VT. Delta : float Radius of a trust region. initial_alpha : float, optional Initial guess for alpha, which might be available from a previous iteration. If None, determined automatically. rtol : float, optional Stopping tolerance for the root-finding procedure. Namely, the solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. max_iter : int, optional Maximum allowed number of iterations for the root-finding procedure. Returns ------- p : ndarray, shape (n,) Found solution of a trust-region problem. alpha : float Positive value such that (J.T*J + alpha*I)*p = -J.T*f. Sometimes called Levenberg-Marquardt parameter. n_iter : int Number of iterations made by root-finding procedure. Zero means that Gauss-Newton step was selected as the solution. References ---------- .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. """ def phi_and_derivative(alpha, suf, s, Delta): """Function of which to find zero. It is defined as "norm of regularized (by alpha) least-squares solution minus `Delta`". Refer to [1]_. """ denom = s**2 + alpha p_norm = norm(suf / denom) phi = p_norm - Delta phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm return phi, phi_prime suf = s * uf # Check if J has full rank and try Gauss-Newton step. if m >= n: threshold = EPS * m * s[0] full_rank = s[-1] > threshold else: full_rank = False if full_rank: p = -V.dot(uf / s) if norm(p) <= Delta: return p, 0.0, 0 alpha_upper = norm(suf) / Delta if full_rank: phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) alpha_lower = -phi / phi_prime else: alpha_lower = 0.0 if initial_alpha is None or not full_rank and initial_alpha == 0: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) else: alpha = initial_alpha for it in range(max_iter): if alpha < alpha_lower or alpha > alpha_upper: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) if phi < 0: alpha_upper = alpha ratio = phi / phi_prime alpha_lower = max(alpha_lower, alpha - ratio) alpha -= (phi + Delta) * ratio / Delta if np.abs(phi) < rtol * Delta: break p = -V.dot(suf / (s**2 + alpha)) # Make the norm of p equal to Delta, p is changed only slightly during # this. It is done to prevent p lie outside the trust region (which can # cause problems later). p *= Delta / norm(p) return p, alpha, it + 1 def solve_trust_region_2d(B, g, Delta): """Solve a general trust-region problem in 2 dimensions. The problem is reformulated as a 4-th order algebraic equation, the solution of which is found by numpy.roots. Parameters ---------- B : ndarray, shape (2, 2) Symmetric matrix, defines a quadratic term of the function. g : ndarray, shape (2,) Defines a linear term of the function. Delta : float Radius of a trust region. Returns ------- p : ndarray, shape (2,) Found solution. newton_step : bool Whether the returned solution is the Newton step which lies within the trust region. """ try: R, lower = cho_factor(B) p = -cho_solve((R, lower), g) if np.dot(p, p) <= Delta**2: return p, True except LinAlgError: pass a = B[0, 0] * Delta**2 b = B[0, 1] * Delta**2 c = B[1, 1] * Delta**2 d = g[0] * Delta f = g[1] * Delta coeffs = np.array( [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) t = np.roots(coeffs) # Can handle leading zeros. t = np.real(t[np.isreal(t)]) p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) i = np.argmin(value) p = p[:, i] return p, False def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit): """Update the radius of a trust region based on the cost reduction. Returns ------- Delta : float New radius. ratio : float Ratio between actual and predicted reductions. Zero if predicted reduction is zero. """ if predicted_reduction > 0: ratio = actual_reduction / predicted_reduction else: ratio = 0 if ratio < 0.25: Delta = 0.25 * step_norm elif ratio > 0.75 and bound_hit: Delta *= 2.0 return Delta, ratio # Construction and minimization of quadratic functions. def build_quadratic_1d(J, g, s, diag=None, s0=None): """Parameterize a multivariate quadratic function along a line. The resulting univariate quadratic function is given as follows: :: f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + g.T * (s0 + s*t) Parameters ---------- J : ndarray, sparse matrix or LinearOperator shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (n,) Direction vector of a line. diag : None or ndarray with shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. s0 : None or ndarray with shape (n,), optional Initial point. If None, assumed to be 0. Returns ------- a : float Coefficient for t**2. b : float Coefficient for t. c : float Free term. Returned only if `s0` is provided. """ v = J.dot(s) a = np.dot(v, v) if diag is not None: a += np.dot(s * diag, s) a *= 0.5 b = np.dot(g, s) if s0 is not None: u = J.dot(s0) b += np.dot(u, v) c = 0.5 * np.dot(u, u) + np.dot(g, s0) if diag is not None: b += np.dot(s0 * diag, s) c += 0.5 * np.dot(s0 * diag, s0) return a, b, c else: return a, b def minimize_quadratic_1d(a, b, lb, ub, c=0): """Minimize a 1-d quadratic function subject to bounds. The free term `c` is 0 by default. Bounds must be finite. Returns ------- t : float Minimum point. y : float Minimum value. """ t = [lb, ub] if a != 0: extremum = -0.5 * b / a if lb < extremum < ub: t.append(extremum) t = np.asarray(t) y = a * t**2 + b * t + c min_index = np.argmin(y) return t[min_index], y[min_index] def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse matrix or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If `s` was 2-dimensional then ndarray is returned, otherwise float is returned. """ if s.ndim == 1: Js = J.dot(s) q = np.dot(Js, Js) if diag is not None: q += np.dot(s * diag, s) else: Js = J.dot(s.T) q = np.sum(Js**2, axis=0) if diag is not None: q += np.sum(diag * s**2, axis=1) l = np.dot(s, g) return 0.5 * q + l # Utility functions to work with bound constraints. def in_bounds(x, lb, ub): """Check if a point lies within bounds.""" return np.all((x >= lb) & (x <= ub)) def step_size_to_bound(x, s, lb, ub): """Compute a min_step size required to reach a bound. The function computes a positive scalar t, such that x + s * t is on the bound. Returns ------- step : float Computed step. Non-negative value. hits : ndarray of int with shape of x Each element indicates whether a corresponding variable reaches the bound: * 0 - the bound was not hit. * -1 - the lower bound was hit. * 1 - the upper bound was hit. """ non_zero = np.nonzero(s) s_non_zero = s[non_zero] steps = np.empty_like(x) steps.fill(np.inf) with np.errstate(over='ignore'): steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, (ub - x)[non_zero] / s_non_zero) min_step = np.min(steps) return min_step, np.equal(steps, min_step) * np.sign(s).astype(int) def find_active_constraints(x, lb, ub, rtol=1e-10): """Determine which constraints are active in a given point. The threshold is computed using `rtol` and the absolute value of the closest bound. Returns ------- active : ndarray of int with shape of x Each component shows whether the corresponding constraint is active: * 0 - a constraint is not active. * -1 - a lower bound is active. * 1 - a upper bound is active. """ active = np.zeros_like(x, dtype=int) if rtol == 0: active[x <= lb] = -1 active[x >= ub] = 1 return active lower_dist = x - lb upper_dist = ub - x lower_threshold = rtol * np.maximum(1, np.abs(lb)) upper_threshold = rtol * np.maximum(1, np.abs(ub)) lower_active = (np.isfinite(lb) & (lower_dist <= np.minimum(upper_dist, lower_threshold))) active[lower_active] = -1 upper_active = (np.isfinite(ub) & (upper_dist <= np.minimum(lower_dist, upper_threshold))) active[upper_active] = 1 return active def make_strictly_feasible(x, lb, ub, rstep=1e-10): """Shift a point to the interior of a feasible region. Each element of the returned vector is at least at a relative distance `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. """ x_new = x.copy() active = find_active_constraints(x, lb, ub, rstep) lower_mask = np.equal(active, -1) upper_mask = np.equal(active, 1) if rstep == 0: x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask]) x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask]) else: x_new[lower_mask] = (lb[lower_mask] + rstep * np.maximum(1, np.abs(lb[lower_mask]))) x_new[upper_mask] = (ub[upper_mask] - rstep * np.maximum(1, np.abs(ub[upper_mask]))) tight_bounds = (x_new < lb) | (x_new > ub) x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) return x_new def CL_scaling_vector(x, g, lb, ub): """Compute Coleman-Li scaling vector and its derivatives. Components of a vector v are defined as follows: :: | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf | 1, otherwise According to this definition v[i] >= 0 for all i. It differs from the definition in paper [1]_ (eq. (2.2)), where the absolute value of v is used. Both definitions are equivalent down the line. Derivatives of v with respect to x take value 1, -1 or 0 depending on a case. Returns ------- v : ndarray with shape of x Scaling vector. dv : ndarray with shape of x Derivatives of v[i] with respect to x[i], diagonal elements of v's Jacobian. References ---------- .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. """ v = np.ones_like(x) dv = np.zeros_like(x) mask = (g < 0) & np.isfinite(ub) v[mask] = ub[mask] - x[mask] dv[mask] = -1 mask = (g > 0) & np.isfinite(lb) v[mask] = x[mask] - lb[mask] dv[mask] = 1 return v, dv def reflective_transformation(y, lb, ub): """Compute reflective transformation and its gradient.""" if in_bounds(y, lb, ub): return y, np.ones_like(y) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) x = y.copy() g_negative = np.zeros_like(y, dtype=bool) mask = lb_finite & ~ub_finite x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask]) g_negative[mask] = y[mask] < lb[mask] mask = ~lb_finite & ub_finite x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask]) g_negative[mask] = y[mask] > ub[mask] mask = lb_finite & ub_finite d = ub - lb t = np.remainder(y[mask] - lb[mask], 2 * d[mask]) x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t) g_negative[mask] = t > d[mask] g = np.ones_like(y) g[g_negative] = -1 return x, g # Functions to display algorithm's progress. def print_header_nonlinear(): print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}" .format("Iteration", "Total nfev", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = "{0:^15.2e}".format(cost_reduction) if step_norm is None: step_norm = " " * 15 else: step_norm = "{0:^15.2e}".format(step_norm) print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}" .format(iteration, nfev, cost, cost_reduction, step_norm, optimality)) def print_header_linear(): print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}" .format("Iteration", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_linear(iteration, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = "{0:^15.2e}".format(cost_reduction) if step_norm is None: step_norm = " " * 15 else: step_norm = "{0:^15.2e}".format(step_norm) print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format( iteration, cost, cost_reduction, step_norm, optimality)) # Simple helper functions. def compute_grad(J, f): """Compute gradient of the least-squares cost function.""" if isinstance(J, LinearOperator): return J.rmatvec(f) else: return J.T.dot(f) def compute_jac_scale(J, scale_inv_old=None): """Compute variables scale based on the Jacobian matrix.""" if issparse(J): scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5 else: scale_inv = np.sum(J**2, axis=0)**0.5 if scale_inv_old is None: scale_inv[scale_inv == 0] = 1 else: scale_inv = np.maximum(scale_inv, scale_inv_old) return 1 / scale_inv, scale_inv def left_multiplied_operator(J, d): """Return diag(d) J as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return d * J.matvec(x) def matmat(X): return d * J.matmat(X) def rmatvec(x): return J.rmatvec(x.ravel() * d) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def right_multiplied_operator(J, d): """Return J diag(d) as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return J.matvec(np.ravel(x) * d) def matmat(X): return J.matmat(X * d[:, np.newaxis]) def rmatvec(x): return d * J.rmatvec(x) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def regularized_lsq_operator(J, diag): """Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from `diag`. """ J = aslinearoperator(J) m, n = J.shape def matvec(x): return np.hstack((J.matvec(x), diag * x)) def rmatvec(x): x1 = x[:m] x2 = x[m:] return J.rmatvec(x1) + diag * x2 return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec) def right_multiply(J, d, copy=True): """Compute J diag(d). If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. elif isinstance(J, LinearOperator): J = right_multiplied_operator(J, d) else: J *= d return J def left_multiply(J, d, copy=True): """Compute diag(d) J. If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): """Check termination condition for nonlinear least squares.""" ftol_satisfied = dF < ftol * F and ratio > 0.25 xtol_satisfied = dx_norm < xtol * (xtol + x_norm) if ftol_satisfied and xtol_satisfied: return 4 elif ftol_satisfied: return 2 elif xtol_satisfied: return 3 else: return None def scale_for_robust_loss_function(J, f, rho): """Scale Jacobian and residuals for a robust loss function. Arrays are modified in place. """ J_scale = rho[1] + 2 * rho[2] * f**2 J_scale[J_scale < EPS] = EPS J_scale **= 0.5 f *= rho[1] / J_scale return left_multiply(J, J_scale, copy=False), f
jlcarmic/producthunt_simulator
venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py
Python
mit
20,742
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import sys, os from calibre import prints as prints_, preferred_encoding, isbytestring from calibre.utils.config import Config, ConfigProxy, JSONConfig from calibre.utils.ipc.launch import Worker from calibre.constants import __appname__, __version__, iswindows from calibre.gui2 import error_dialog # Time to wait for communication to/from the interpreter process POLL_TIMEOUT = 0.01 # seconds preferred_encoding, isbytestring, __appname__, __version__, error_dialog, \ iswindows def console_config(): desc='Settings to control the calibre console' c = Config('console', desc) c.add_opt('theme', default='native', help='The color theme') c.add_opt('scrollback', default=10000, help='Max number of lines to keep in the scrollback buffer') return c prefs = ConfigProxy(console_config()) dynamic = JSONConfig('console') def prints(*args, **kwargs): kwargs['file'] = sys.__stdout__ prints_(*args, **kwargs) class Process(Worker): @property def env(self): env = dict(os.environ) env.update(self._env) return env
alexston/calibre-webserver
src/calibre/utils/pyconsole/__init__.py
Python
gpl-3.0
1,289
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors # # This module is part of GitDB and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/gitdb/test/__init__.py
Python
agpl-3.0
210
"""The met component.""" from datetime import timedelta import logging from random import randrange import metno from homeassistant.const import ( CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE, EVENT_CORE_CONFIG_UPDATE, LENGTH_FEET, LENGTH_METERS, ) from homeassistant.core import Config, HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from homeassistant.util.distance import convert as convert_distance import homeassistant.util.dt as dt_util from .const import CONF_TRACK_HOME, DOMAIN URL = "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/2.0/complete" _LOGGER = logging.getLogger(__name__) async def async_setup(hass: HomeAssistant, config: Config) -> bool: """Set up configured Met.""" hass.data.setdefault(DOMAIN, {}) return True async def async_setup_entry(hass, config_entry): """Set up Met as config entry.""" coordinator = MetDataUpdateCoordinator(hass, config_entry) await coordinator.async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady if config_entry.data.get(CONF_TRACK_HOME, False): coordinator.track_home() hass.data[DOMAIN][config_entry.entry_id] = coordinator hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, "weather") ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" await hass.config_entries.async_forward_entry_unload(config_entry, "weather") hass.data[DOMAIN][config_entry.entry_id].untrack_home() hass.data[DOMAIN].pop(config_entry.entry_id) return True class MetDataUpdateCoordinator(DataUpdateCoordinator): """Class to manage fetching Met data.""" def __init__(self, hass, config_entry): """Initialize global Met data updater.""" self._unsub_track_home = None self.weather = MetWeatherData( hass, config_entry.data, hass.config.units.is_metric ) self.weather.init_data() update_interval = timedelta(minutes=randrange(55, 65)) super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval) async def _async_update_data(self): """Fetch data from Met.""" try: return await self.weather.fetch_data() except Exception as err: raise UpdateFailed(f"Update failed: {err}") from err def track_home(self): """Start tracking changes to HA home setting.""" if self._unsub_track_home: return async def _async_update_weather_data(_event=None): """Update weather data.""" self.weather.init_data() await self.async_refresh() self._unsub_track_home = self.hass.bus.async_listen( EVENT_CORE_CONFIG_UPDATE, _async_update_weather_data ) def untrack_home(self): """Stop tracking changes to HA home setting.""" if self._unsub_track_home: self._unsub_track_home() self._unsub_track_home = None class MetWeatherData: """Keep data for Met.no weather entities.""" def __init__(self, hass, config, is_metric): """Initialise the weather entity data.""" self.hass = hass self._config = config self._is_metric = is_metric self._weather_data = None self.current_weather_data = {} self.daily_forecast = None self.hourly_forecast = None def init_data(self): """Weather data inialization - get the coordinates.""" if self._config.get(CONF_TRACK_HOME, False): latitude = self.hass.config.latitude longitude = self.hass.config.longitude elevation = self.hass.config.elevation else: latitude = self._config[CONF_LATITUDE] longitude = self._config[CONF_LONGITUDE] elevation = self._config[CONF_ELEVATION] if not self._is_metric: elevation = int( round(convert_distance(elevation, LENGTH_FEET, LENGTH_METERS)) ) coordinates = { "lat": str(latitude), "lon": str(longitude), "msl": str(elevation), } self._weather_data = metno.MetWeatherData( coordinates, async_get_clientsession(self.hass), api_url=URL ) async def fetch_data(self): """Fetch data from API - (current weather and forecast).""" await self._weather_data.fetching_data() self.current_weather_data = self._weather_data.get_current_weather() time_zone = dt_util.DEFAULT_TIME_ZONE self.daily_forecast = self._weather_data.get_forecast(time_zone, False) self.hourly_forecast = self._weather_data.get_forecast(time_zone, True) return self
tchellomello/home-assistant
homeassistant/components/met/__init__.py
Python
apache-2.0
4,998
I am a bad file that should not pass compileall.
pelson/conda-build
tests/test-recipes/metadata/_compile-test/f2_bad.py
Python
bsd-3-clause
49
from blur.quickinit import * def tableSizeInMegs(tableName): q = Database.current().exec_('SELECT * FROM table_size_in_megs(?)',[QVariant(tableName)]) if q.next(): return q.value(0) return None def pruneTable(tableName, defaultSizeLimit, orderColumn, rowsPerIteration = 100): maxSize = Config.getInt('assburnerTableLimit' + tableName, defaultSizeLimit) tableNameLwr = str(tableName).lower() while tableSizeInMegs(tableNameLwr) > maxSize: q = Database.current().exec_('DELETE FROM %(name)s WHERE key%(name)s IN (SELECT key%(name)s FROM %(name)s ORDER BY %(sort_col)s DESC LIMIT %(limit)i)' % {'name':tableNameLwr,'sort_col':orderColumn,'limit':rowsPerIteration} ) if q.numRowsAffected() < rowsPerIteration: break pruneTable('JobCommandHistory', 5000, 'keyjobcommandhistory')
jacksonwilliams/arsenalsuite
python/scripts/db_pruner.py
Python
gpl-2.0
794
"""cascade folder deletes to imapuid Otherwise, since this fk is NOT NULL, deleting a folder which has associated imapuids still existing will cause a database IntegrityError. Only the mail sync engine does such a thing. Nothing else should be deleting folders, hard or soft. This also fixes a problem where if e.g. someone disables their Spam folder from showing up in Gmail IMAP, the server will crash trying to delete that folder the account.spam_folder_id constraint fails. Revision ID: 350a08df27ee Revises: 1eab2619cc4f Create Date: 2014-05-25 01:40:21.762119 """ # revision identifiers, used by Alembic. revision = '350a08df27ee' down_revision = '1eab2619cc4f' from alembic import op def upgrade(): op.drop_constraint('imapuid_ibfk_3', 'imapuid', type_='foreignkey') op.create_foreign_key('imapuid_ibfk_3', 'imapuid', 'folder', ['folder_id'], ['id'], ondelete='CASCADE') op.drop_constraint('account_ibfk_2', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_2', 'account', 'folder', ['inbox_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_3', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_3', 'account', 'folder', ['sent_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_4', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_4', 'account', 'folder', ['drafts_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_5', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_5', 'account', 'folder', ['spam_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_6', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_6', 'account', 'folder', ['trash_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_7', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_7', 'account', 'folder', ['archive_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_8', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_8', 'account', 'folder', ['all_folder_id'], ['id'], ondelete='SET NULL') op.drop_constraint('account_ibfk_9', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_9', 'account', 'folder', ['starred_folder_id'], ['id'], ondelete='SET NULL') # for some reason this was left out of migration 024, so might not exist try: op.drop_constraint('account_ibfk_10', 'account', type_='foreignkey') except: pass op.create_foreign_key('account_ibfk_10', 'account', 'folder', ['important_folder_id'], ['id'], ondelete='SET NULL') def downgrade(): op.drop_constraint('imapuid_ibfk_3', 'imapuid', type_='foreignkey') op.create_foreign_key('imapuid_ibfk_3', 'imapuid', 'folder', ['folder_id'], ['id']) op.drop_constraint('account_ibfk_2', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_2', 'account', 'folder', ['inbox_folder_id'], ['id']) op.drop_constraint('account_ibfk_3', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_3', 'account', 'folder', ['sent_folder_id'], ['id']) op.drop_constraint('account_ibfk_4', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_4', 'account', 'folder', ['drafts_folder_id'], ['id']) op.drop_constraint('account_ibfk_5', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_5', 'account', 'folder', ['spam_folder_id'], ['id']) op.drop_constraint('account_ibfk_6', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_6', 'account', 'folder', ['trash_folder_id'], ['id']) op.drop_constraint('account_ibfk_7', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_7', 'account', 'folder', ['archive_folder_id'], ['id']) op.drop_constraint('account_ibfk_8', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_8', 'account', 'folder', ['all_folder_id'], ['id']) op.drop_constraint('account_ibfk_9', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_9', 'account', 'folder', ['starred_folder_id'], ['id']) op.drop_constraint('account_ibfk_10', 'account', type_='foreignkey') op.create_foreign_key('account_ibfk_10', 'account', 'folder', ['important_folder_id'], ['id'])
nylas/sync-engine
migrations/versions/034_cascade_folder_deletes_to_imapuid.py
Python
agpl-3.0
4,899
from coalib.bearlib.aspects import Root, Taste @Root.subaspect class Spelling: """ How words should be written. """ class docs: example = """ 'Tihs si surly som incoreclt speling. `Coala` is always written with a lowercase `c`. """ example_language = 'reStructuredText' importance_reason = """ Words should always be written as they are supposed to be; standardisation facilitates communication. """ fix_suggestions = """ Use the correct spelling for the misspelled words. """ @Spelling.subaspect class DictionarySpelling: """ Valid language's words spelling. """ class docs: example = """ This is toatly wonrg. """ example_language = 'reStructuredText' importance_reason = """ Good spelling facilitates communication and avoids confusion. By following the same rules for spelling words, we can all understand the text we read. Poor spelling distracts the reader and they lose focus. """ fix_suggestions = """ You can use a spell-checker to fix this for you or just ensure yourself that things are well written. """ @Spelling.subaspect class OrgSpecificWordSpelling: """ Organisations like coala specified words' spelling. """ class docs: example = """ `Coala` is always written with a lower case c, also at the beginning of the sentence. """ example_language = 'reStructuredText' importance_reason = """ There are words you want to be written as you want, like your organisation's name. """ fix_suggestions = """ Simply make sure those words match with what is provided by the organisation. """ specific_word = Taste[list]( 'Represents the regex of the specific word to check.', (('c[o|O][a|A][l|L][a|A]',), ), default=list())
kartikeys98/coala
coalib/bearlib/aspects/Spelling.py
Python
agpl-3.0
2,014
########################################################################## # # Copyright (c) 2017, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import Gaffer import GafferImage # Command suitable for use with `NodeMenu.append()`. def nodeMenuCreateCommand( menu ) : median = GafferImage.Median() median["radius"].gang() return median Gaffer.Metadata.registerNode( GafferImage.Median, "description", """ Applies a median filter to the image. This can be useful for removing noise. """, )
lucienfostier/gaffer
python/GafferImageUI/MedianUI.py
Python
bsd-3-clause
2,161
#!/usr/bin/env python # # Released under the BSD license. See LICENSE file for details. """ This program basically does face detection an blurs the face out. """ print __doc__ from SimpleCV import Camera, Display, HaarCascade # Initialize the camera cam = Camera() # Create the display to show the image display = Display() # Haar Cascade face detection, only faces haarcascade = HaarCascade("face") # Loop forever while display.isNotDone(): # Get image, flip it so it looks mirrored, scale to speed things up img = cam.getImage().flipHorizontal().scale(0.5) # Load in trained face file faces = img.findHaarFeatures(haarcascade) # Pixelize the detected face if faces: bb = faces[-1].boundingBox() img = img.pixelize(10, region=(bb[0], bb[1], bb[2], bb[3])) # Display the image img.save(display)
nils-werner/SimpleCV
SimpleCV/examples/detection/facetrack.py
Python
bsd-3-clause
849
import numpy as np from numpy.testing import * from numpy.testing.noseclasses import KnownFailureTest import nose def test_slow(): @dec.slow def slow_func(x,y,z): pass assert_(slow_func.slow) def test_setastest(): @dec.setastest() def f_default(a): pass @dec.setastest(True) def f_istest(a): pass @dec.setastest(False) def f_isnottest(a): pass assert_(f_default.__test__) assert_(f_istest.__test__) assert_(not f_isnottest.__test__) class DidntSkipException(Exception): pass def test_skip_functions_hardcoded(): @dec.skipif(True) def f1(x): raise DidntSkipException try: f1('a') except DidntSkipException: raise Exception('Failed to skip') except nose.SkipTest: pass @dec.skipif(False) def f2(x): raise DidntSkipException try: f2('a') except DidntSkipException: pass except nose.SkipTest: raise Exception('Skipped when not expected to') def test_skip_functions_callable(): def skip_tester(): return skip_flag == 'skip me!' @dec.skipif(skip_tester) def f1(x): raise DidntSkipException try: skip_flag = 'skip me!' f1('a') except DidntSkipException: raise Exception('Failed to skip') except nose.SkipTest: pass @dec.skipif(skip_tester) def f2(x): raise DidntSkipException try: skip_flag = 'five is right out!' f2('a') except DidntSkipException: pass except nose.SkipTest: raise Exception('Skipped when not expected to') def test_skip_generators_hardcoded(): @dec.knownfailureif(True, "This test is known to fail") def g1(x): for i in xrange(x): yield i try: for j in g1(10): pass except KnownFailureTest: pass else: raise Exception('Failed to mark as known failure') @dec.knownfailureif(False, "This test is NOT known to fail") def g2(x): for i in xrange(x): yield i raise DidntSkipException('FAIL') try: for j in g2(10): pass except KnownFailureTest: raise Exception('Marked incorretly as known failure') except DidntSkipException: pass def test_skip_generators_callable(): def skip_tester(): return skip_flag == 'skip me!' @dec.knownfailureif(skip_tester, "This test is known to fail") def g1(x): for i in xrange(x): yield i try: skip_flag = 'skip me!' for j in g1(10): pass except KnownFailureTest: pass else: raise Exception('Failed to mark as known failure') @dec.knownfailureif(skip_tester, "This test is NOT known to fail") def g2(x): for i in xrange(x): yield i raise DidntSkipException('FAIL') try: skip_flag = 'do not skip' for j in g2(10): pass except KnownFailureTest: raise Exception('Marked incorretly as known failure') except DidntSkipException: pass def test_deprecated(): @dec.deprecated(True) def non_deprecated_func(): pass @dec.deprecated() def deprecated_func(): import warnings warnings.warn("TEST: deprecated func", DeprecationWarning) @dec.deprecated() def deprecated_func2(): import warnings warnings.warn("AHHHH") raise ValueError @dec.deprecated() def deprecated_func3(): import warnings warnings.warn("AHHHH") # marked as deprecated, but does not raise DeprecationWarning assert_raises(AssertionError, non_deprecated_func) # should be silent deprecated_func() # fails if deprecated decorator just disables test. See #1453. assert_raises(ValueError, deprecated_func2) # first warnings is not a DeprecationWarning assert_raises(AssertionError, deprecated_func3) if __name__ == '__main__': run_module_suite()
lthurlow/Network-Grapher
proj/external/numpy-1.7.0/numpy/testing/tests/test_decorators.py
Python
mit
4,070
# -*- coding: utf-8 -*- # Generated by Django 1.11.12 on 2018-04-11 15:33 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('third_party_auth', '0020_cleanup_slug_fields'), ] operations = [ migrations.AddField( model_name='ltiproviderconfig', name='enable_sso_id_verification', field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'), ), migrations.AddField( model_name='oauth2providerconfig', name='enable_sso_id_verification', field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'), ), migrations.AddField( model_name='samlproviderconfig', name='enable_sso_id_verification', field=models.BooleanField(default=False, help_text=b'Use the presence of a profile from a trusted third party as proof of identity verification.'), ), ]
ahmedaljazzar/edx-platform
common/djangoapps/third_party_auth/migrations/0021_sso_id_verification.py
Python
agpl-3.0
1,186
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hashlib import itertools import logging import os import re from openerp import tools from openerp.osv import fields,osv _logger = logging.getLogger(__name__) class ir_attachment(osv.osv): """Attachments are used to link binary files or url to any openerp document. External attachment storage --------------------------- The 'data' function field (_data_get,data_set) is implemented using _file_read, _file_write and _file_delete which can be overridden to implement other storage engines, shuch methods should check for other location pseudo uri (example: hdfs://hadoppserver) The default implementation is the file:dirname location that stores files on the local filesystem using name based on their sha1 hash """ def _name_get_resname(self, cr, uid, ids, object, method, context): data = {} for attachment in self.browse(cr, uid, ids, context=context): model_object = attachment.res_model res_id = attachment.res_id if model_object and res_id: model_pool = self.pool.get(model_object) res = model_pool.name_get(cr,uid,[res_id],context) res_name = res and res[0][1] or False if res_name: field = self._columns.get('res_name',False) if field and len(res_name) > field.size: res_name = res_name[:field.size-3] + '...' data[attachment.id] = res_name else: data[attachment.id] = False return data # 'data' field implementation def _full_path(self, cr, uid, location, path): # location = 'file:filestore' assert location.startswith('file:'), "Unhandled filestore location %s" % location location = location[5:] # sanitize location name and path location = re.sub('[.]','',location) location = location.strip('/\\') path = re.sub('[.]','',path) path = path.strip('/\\') return os.path.join(tools.config['root_path'], location, cr.dbname, path) def _file_read(self, cr, uid, location, fname, bin_size=False): full_path = self._full_path(cr, uid, location, fname) r = '' try: if bin_size: r = os.path.getsize(full_path) else: r = open(full_path,'rb').read().encode('base64') except IOError: _logger.error("_read_file reading %s",full_path) return r def _file_write(self, cr, uid, location, value): bin_value = value.decode('base64') fname = hashlib.sha1(bin_value).hexdigest() # scatter files across 1024 dirs # we use '/' in the db (even on windows) fname = fname[:3] + '/' + fname full_path = self._full_path(cr, uid, location, fname) try: dirname = os.path.dirname(full_path) if not os.path.isdir(dirname): os.makedirs(dirname) open(full_path,'wb').write(bin_value) except IOError: _logger.error("_file_write writing %s",full_path) return fname def _file_delete(self, cr, uid, location, fname): count = self.search(cr, 1, [('store_fname','=',fname)], count=True) if count <= 1: full_path = self._full_path(cr, uid, location, fname) try: os.unlink(full_path) except OSError: _logger.error("_file_delete could not unlink %s",full_path) except IOError: # Harmless and needed for race conditions _logger.error("_file_delete could not unlink %s",full_path) def _data_get(self, cr, uid, ids, name, arg, context=None): if context is None: context = {} result = {} location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location') bin_size = context.get('bin_size') for attach in self.browse(cr, uid, ids, context=context): if location and attach.store_fname: result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size) else: result[attach.id] = attach.db_datas return result def _data_set(self, cr, uid, id, name, value, arg, context=None): # We dont handle setting data to null if not value: return True if context is None: context = {} location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location') file_size = len(value.decode('base64')) if location: attach = self.browse(cr, uid, id, context=context) if attach.store_fname: self._file_delete(cr, uid, location, attach.store_fname) fname = self._file_write(cr, uid, location, value) super(ir_attachment, self).write(cr, uid, [id], {'store_fname': fname, 'file_size': file_size}, context=context) else: super(ir_attachment, self).write(cr, uid, [id], {'db_datas': value, 'file_size': file_size}, context=context) return True _name = 'ir.attachment' _columns = { 'name': fields.char('Attachment Name',size=256, required=True), 'datas_fname': fields.char('File Name',size=256), 'description': fields.text('Description'), 'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True), 'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"), 'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"), 'create_date': fields.datetime('Date Created', readonly=True), 'create_uid': fields.many2one('res.users', 'Owner', readonly=True), 'company_id': fields.many2one('res.company', 'Company', change_default=True), 'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ], 'Type', help="Binary File or URL", required=True, change_default=True), 'url': fields.char('Url', size=1024), # al: We keep shitty field names for backward compatibility with document 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True), 'store_fname': fields.char('Stored Filename', size=256), 'db_datas': fields.binary('Database Data'), 'file_size': fields.integer('File Size'), } _defaults = { 'type': 'binary', 'file_size': 0, 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c), } def _auto_init(self, cr, context=None): super(ir_attachment, self)._auto_init(cr, context) cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',)) if not cr.fetchone(): cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)') cr.commit() def check(self, cr, uid, ids, mode, context=None, values=None): """Restricts the access to an ir.attachment, according to referred model In the 'document' module, it is overriden to relax this hard rule, since more complex ones apply there. """ if not ids: return res_ids = {} if ids: if isinstance(ids, (int, long)): ids = [ids] cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,)) for rmod, rid in cr.fetchall(): if not (rmod and rid): continue res_ids.setdefault(rmod,set()).add(rid) if values: if values.get('res_model') and 'res_id' in values: res_ids.setdefault(values['res_model'],set()).add(values['res_id']) ima = self.pool.get('ir.model.access') for model, mids in res_ids.items(): # ignore attachments that are not attached to a resource anymore when checking access rights # (resource was deleted but attachment was not) mids = self.pool.get(model).exists(cr, uid, mids) ima.check(cr, uid, model, mode) self.pool.get(model).check_access_rule(cr, uid, mids, mode, context=context) def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None): ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False, access_rights_uid=access_rights_uid) if not ids: if count: return 0 return [] # Work with a set, as list.remove() is prohibitive for large lists of documents # (takes 20+ seconds on a db with 100k docs during search_count()!) orig_ids = ids ids = set(ids) # For attachments, the permissions of the document they are attached to # apply, so we must remove attachments for which the user cannot access # the linked document. # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs), # and the permissions are checked in super() and below anyway. cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),)) targets = cr.dictfetchall() model_attachments = {} for target_dict in targets: if not (target_dict['res_id'] and target_dict['res_model']): continue # model_attachments = { 'model': { 'res_id': [id1,id2] } } model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'],set()).add(target_dict['id']) # To avoid multiple queries for each attachment found, checks are # performed in batch as much as possible. ima = self.pool.get('ir.model.access') for model, targets in model_attachments.iteritems(): if not ima.check(cr, uid, model, 'read', False): # remove all corresponding attachment ids for attach_id in itertools.chain(*targets.values()): ids.remove(attach_id) continue # skip ir.rule processing, these ones are out already # filter ids according to what access rules permit target_ids = targets.keys() allowed_ids = self.pool.get(model).search(cr, uid, [('id', 'in', target_ids)], context=context) disallowed_ids = set(target_ids).difference(allowed_ids) for res_id in disallowed_ids: for attach_id in targets[res_id]: ids.remove(attach_id) # sort result according to the original sort ordering result = [id for id in orig_ids if id in ids] return len(result) if count else list(result) def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'): self.check(cr, uid, ids, 'read', context=context) return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load) def write(self, cr, uid, ids, vals, context=None): self.check(cr, uid, ids, 'write', context=context, values=vals) if 'file_size' in vals: del vals['file_size'] return super(ir_attachment, self).write(cr, uid, ids, vals, context) def copy(self, cr, uid, id, default=None, context=None): self.check(cr, uid, [id], 'write', context=context) return super(ir_attachment, self).copy(cr, uid, id, default, context) def unlink(self, cr, uid, ids, context=None): self.check(cr, uid, ids, 'unlink', context=context) location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location') if location: for attach in self.browse(cr, uid, ids, context=context): if attach.store_fname: self._file_delete(cr, uid, location, attach.store_fname) return super(ir_attachment, self).unlink(cr, uid, ids, context) def create(self, cr, uid, values, context=None): self.check(cr, uid, [], mode='create', context=context, values=values) if 'file_size' in values: del values['file_size'] return super(ir_attachment, self).create(cr, uid, values, context) def action_get(self, cr, uid, context=None): return self.pool.get('ir.actions.act_window').for_xml_id( cr, uid, 'base', 'action_attachment', context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Novasoft-India/OperERP-AM-Motors
openerp/addons/base/ir/ir_attachment.py
Python
agpl-3.0
13,948
from ..broker import Broker class IssueDetailBroker(Broker): controller = "issue_details" def show(self, **kwargs): """Shows the details for the specified issue detail. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param IssueID: The internal NetMRI identifier for this issue instance. :type IssueID: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc. :type include: Array of String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return issue_detail: The issue detail identified by the specified IssueID. :rtype issue_detail: IssueDetail """ return self.api_request(self._get_method_fullname("show"), kwargs) def index(self, **kwargs): """Lists the available issue details. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. :type BatchID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. :type BatchID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device to which this issue applies. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device to which this issue applies. :type DeviceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param EndTime: The ending effective time of this revision of this record, or empty if still in effect. :type EndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param EndTime: The ending effective time of this revision of this record, or empty if still in effect. :type EndTime: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. :type InterfaceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. :type InterfaceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. :type IprgID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. :type IprgID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IssueID: The internal NetMRI identifier for this issue instance. :type IssueID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IssueID: The internal NetMRI identifier for this issue instance. :type IssueID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IssueTypeID: An internal NetMRI identifier for the type of this issue. :type IssueTypeID: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IssueTypeID: An internal NetMRI identifier for the type of this issue. :type IssueTypeID: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. :type SubnetID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. :type SubnetID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Timestamp: The date and time this record was collected or calculated. :type Timestamp: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Timestamp: The date and time this record was collected or calculated. :type Timestamp: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. :type VlanID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. :type VlanID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` IssueID :param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return issue_details: An array of the IssueDetail objects that match the specified input criteria. :rtype issue_details: Array of IssueDetail """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def search(self, **kwargs): """Lists the available issue details matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant. :type AltDeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant. :type AltDeviceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. :type BatchID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. :type BatchID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param ChangedCols: The fields that changed between this revision of the record and the previous revision. :type ChangedCols: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param ChangedCols: The fields that changed between this revision of the record and the previous revision. :type ChangedCols: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Component: The issue component (Devices, Configuration, VLANs, etc.). :type Component: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Component: The issue component (Devices, Configuration, VLANs, etc.). :type Component: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Correctness: The correctness contribution for this issue. :type Correctness: Float | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Correctness: The correctness contribution for this issue. :type Correctness: Array of Float | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Criteria: The criteria value for this issue at the time it was raised. :type Criteria: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Criteria: The criteria value for this issue at the time it was raised. :type Criteria: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue. :type DataSourceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue. :type DataSourceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DetailID: A unique identifier for this issue instance. :type DetailID: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DetailID: A unique identifier for this issue instance. :type DetailID: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device to which this issue applies. :type DeviceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceID: The internal NetMRI identifier for the device to which this issue applies. :type DeviceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param EndTime: The ending effective time of this revision of this record, or empty if still in effect. :type EndTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param EndTime: The ending effective time of this revision of this record, or empty if still in effect. :type EndTime: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. :type InterfaceID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. :type InterfaceID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. :type IprgID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. :type IprgID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IssueID: The internal NetMRI identifier for this issue instance. :type IssueID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IssueID: The internal NetMRI identifier for this issue instance. :type IssueID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IssueTypeID: An internal NetMRI identifier for the type of this issue. :type IssueTypeID: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IssueTypeID: An internal NetMRI identifier for the type of this issue. :type IssueTypeID: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param IssueValue: The meaning of this field varies based upon the specific issue. :type IssueValue: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param IssueValue: The meaning of this field varies based upon the specific issue. :type IssueValue: Array of String | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting. :type SeverityID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting. :type SeverityID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Stability: The stability contribution for this issue. :type Stability: Float | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Stability: The stability contribution for this issue. :type Stability: Array of Float | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param StartTime: The date/time this issue instance was raised. :type StartTime: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param StartTime: The date/time this issue instance was raised. :type StartTime: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. :type SubnetID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. :type SubnetID: Array of Integer | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param SuppressedInd: A flag indicating whether this issue is suppressed or not. :type SuppressedInd: Boolean | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param SuppressedInd: A flag indicating whether this issue is suppressed or not. :type SuppressedInd: Array of Boolean | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param Timestamp: The date and time this record was collected or calculated. :type Timestamp: DateTime | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param Timestamp: The date and time this record was collected or calculated. :type Timestamp: Array of DateTime | ``api version min:`` 2.3 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. :type VlanID: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. :type VlanID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` IssueID :param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against issue details, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: AltDeviceID, BatchID, ChangedCols, Component, Correctness, Criteria, DataSourceID, DetailID, DeviceID, EndTime, InterfaceID, IprgID, IssueID, IssueTypeID, IssueValue, SeverityID, Stability, StartTime, SubnetID, SuppressedInd, Timestamp, VlanID. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return issue_details: An array of the IssueDetail objects that match the specified input criteria. :rtype issue_details: Array of IssueDetail """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available issue details matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: AltDeviceID, BatchID, ChangedCols, Component, Correctness, Criteria, DataSourceID, DetailID, DeviceID, EndTime, InterfaceID, IprgID, IssueID, IssueTypeID, IssueValue, SeverityID, Stability, StartTime, SubnetID, SuppressedInd, Timestamp, VlanID. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_AltDeviceID: The operator to apply to the field AltDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_AltDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_AltDeviceID: If op_AltDeviceID is specified, the field named in this input will be compared to the value in AltDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_AltDeviceID must be specified if op_AltDeviceID is specified. :type val_f_AltDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_AltDeviceID: If op_AltDeviceID is specified, this value will be compared to the value in AltDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_AltDeviceID must be specified if op_AltDeviceID is specified. :type val_c_AltDeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_BatchID: The operator to apply to the field BatchID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_BatchID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_BatchID: If op_BatchID is specified, the field named in this input will be compared to the value in BatchID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BatchID must be specified if op_BatchID is specified. :type val_f_BatchID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_BatchID: If op_BatchID is specified, this value will be compared to the value in BatchID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BatchID must be specified if op_BatchID is specified. :type val_c_BatchID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_ChangedCols: The operator to apply to the field ChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_ChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_ChangedCols: If op_ChangedCols is specified, the field named in this input will be compared to the value in ChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ChangedCols must be specified if op_ChangedCols is specified. :type val_f_ChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_ChangedCols: If op_ChangedCols is specified, this value will be compared to the value in ChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ChangedCols must be specified if op_ChangedCols is specified. :type val_c_ChangedCols: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_Component: The operator to apply to the field Component. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Component: The issue component (Devices, Configuration, VLANs, etc.). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_Component: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_Component: If op_Component is specified, the field named in this input will be compared to the value in Component using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Component must be specified if op_Component is specified. :type val_f_Component: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_Component: If op_Component is specified, this value will be compared to the value in Component using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Component must be specified if op_Component is specified. :type val_c_Component: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_Correctness: The operator to apply to the field Correctness. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Correctness: The correctness contribution for this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_Correctness: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_Correctness: If op_Correctness is specified, the field named in this input will be compared to the value in Correctness using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Correctness must be specified if op_Correctness is specified. :type val_f_Correctness: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_Correctness: If op_Correctness is specified, this value will be compared to the value in Correctness using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Correctness must be specified if op_Correctness is specified. :type val_c_Correctness: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_Criteria: The operator to apply to the field Criteria. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Criteria: The criteria value for this issue at the time it was raised. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_Criteria: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_Criteria: If op_Criteria is specified, the field named in this input will be compared to the value in Criteria using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Criteria must be specified if op_Criteria is specified. :type val_f_Criteria: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_Criteria: If op_Criteria is specified, this value will be compared to the value in Criteria using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Criteria must be specified if op_Criteria is specified. :type val_c_Criteria: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified. :type val_f_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified. :type val_c_DataSourceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DetailID: The operator to apply to the field DetailID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DetailID: A unique identifier for this issue instance. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DetailID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DetailID: If op_DetailID is specified, the field named in this input will be compared to the value in DetailID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DetailID must be specified if op_DetailID is specified. :type val_f_DetailID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DetailID: If op_DetailID is specified, this value will be compared to the value in DetailID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DetailID must be specified if op_DetailID is specified. :type val_c_DetailID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which this issue applies. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified. :type val_f_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified. :type val_c_DeviceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified. :type val_f_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified. :type val_c_EndTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified. :type val_f_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified. :type val_c_InterfaceID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IprgID: The operator to apply to the field IprgID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IprgID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IprgID: If op_IprgID is specified, the field named in this input will be compared to the value in IprgID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgID must be specified if op_IprgID is specified. :type val_f_IprgID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IprgID: If op_IprgID is specified, this value will be compared to the value in IprgID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgID must be specified if op_IprgID is specified. :type val_c_IprgID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IssueID: The operator to apply to the field IssueID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueID: The internal NetMRI identifier for this issue instance. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IssueID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IssueID: If op_IssueID is specified, the field named in this input will be compared to the value in IssueID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueID must be specified if op_IssueID is specified. :type val_f_IssueID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IssueID: If op_IssueID is specified, this value will be compared to the value in IssueID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueID must be specified if op_IssueID is specified. :type val_c_IssueID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IssueTypeID: The operator to apply to the field IssueTypeID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueTypeID: An internal NetMRI identifier for the type of this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IssueTypeID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IssueTypeID: If op_IssueTypeID is specified, the field named in this input will be compared to the value in IssueTypeID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueTypeID must be specified if op_IssueTypeID is specified. :type val_f_IssueTypeID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IssueTypeID: If op_IssueTypeID is specified, this value will be compared to the value in IssueTypeID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueTypeID must be specified if op_IssueTypeID is specified. :type val_c_IssueTypeID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_IssueValue: The operator to apply to the field IssueValue. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueValue: The meaning of this field varies based upon the specific issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_IssueValue: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_IssueValue: If op_IssueValue is specified, the field named in this input will be compared to the value in IssueValue using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueValue must be specified if op_IssueValue is specified. :type val_f_IssueValue: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_IssueValue: If op_IssueValue is specified, this value will be compared to the value in IssueValue using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueValue must be specified if op_IssueValue is specified. :type val_c_IssueValue: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_SeverityID: The operator to apply to the field SeverityID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_SeverityID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_SeverityID: If op_SeverityID is specified, the field named in this input will be compared to the value in SeverityID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SeverityID must be specified if op_SeverityID is specified. :type val_f_SeverityID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_SeverityID: If op_SeverityID is specified, this value will be compared to the value in SeverityID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SeverityID must be specified if op_SeverityID is specified. :type val_c_SeverityID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_Stability: The operator to apply to the field Stability. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Stability: The stability contribution for this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_Stability: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_Stability: If op_Stability is specified, the field named in this input will be compared to the value in Stability using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Stability must be specified if op_Stability is specified. :type val_f_Stability: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_Stability: If op_Stability is specified, this value will be compared to the value in Stability using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Stability must be specified if op_Stability is specified. :type val_c_Stability: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date/time this issue instance was raised. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified. :type val_f_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified. :type val_c_StartTime: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_SubnetID: The operator to apply to the field SubnetID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_SubnetID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_SubnetID: If op_SubnetID is specified, the field named in this input will be compared to the value in SubnetID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubnetID must be specified if op_SubnetID is specified. :type val_f_SubnetID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_SubnetID: If op_SubnetID is specified, this value will be compared to the value in SubnetID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubnetID must be specified if op_SubnetID is specified. :type val_c_SubnetID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_SuppressedInd: The operator to apply to the field SuppressedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SuppressedInd: A flag indicating whether this issue is suppressed or not. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_SuppressedInd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_SuppressedInd: If op_SuppressedInd is specified, the field named in this input will be compared to the value in SuppressedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SuppressedInd must be specified if op_SuppressedInd is specified. :type val_f_SuppressedInd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_SuppressedInd: If op_SuppressedInd is specified, this value will be compared to the value in SuppressedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SuppressedInd must be specified if op_SuppressedInd is specified. :type val_c_SuppressedInd: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_Timestamp: The operator to apply to the field Timestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Timestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_Timestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified. :type val_f_Timestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified. :type val_c_Timestamp: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_VlanID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified. :type val_f_VlanID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified. :type val_c_VlanID: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results. :type DeviceGroupID: Array of Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data. :type timestamp: DateTime | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice. :type methods: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc. :type include: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` IssueID :param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return issue_details: An array of the IssueDetail objects that match the specified input criteria. :rtype issue_details: Array of IssueDetail """ return self.api_list_request(self._get_method_fullname("find"), kwargs) def direct_data(self, **kwargs): """Return data for a given issue. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param column_names: The names of columns for which we want the content. :type column_names: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param start_time: None :type start_time: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param end_time: None :type end_time: String | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param device_id: None :type device_id: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param issue_type_id: None :type issue_type_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param issue_id: None :type issue_id: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param group_ids: None :type group_ids: Array | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param issue_id: Id of the issue. :type issue_id: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param mode: None :type mode: String **Outputs** """ return self.api_request(self._get_method_fullname("direct_data"), kwargs)
infobloxopen/infoblox-netmri
infoblox_netmri/api/broker/v3_8_0/issue_detail_broker.py
Python
apache-2.0
78,423
from django.utils.translation import ugettext_lazy as _ import horizon from horizon.test.test_dashboards.dogs import dashboard class Puppies(horizon.Panel): name = _("Puppies") slug = "puppies" dashboard.Dogs.register(Puppies)
trunglq7/horizon
horizon/test/test_dashboards/dogs/puppies/panel.py
Python
apache-2.0
241
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # A platform-agnostic tool for running a program in a modified environment. # import sys import os import subprocess from optparse import OptionParser def main(argv=None): parser = OptionParser(usage="Usage: %prog [options] [--] VAR=VALUE... command [options] arg1 arg2...") parser.add_option("-i", "--ignore-environment", action="store_true", default=False, help="Start with an empty environment (do not inherit current environment)") (options, args) = parser.parse_args(args=argv) if options.ignore_environment: new_env = {} else: new_env = os.environ.copy() # pull out each name value pair while (len(args)): z = args[0].split("=",1) if len(z) != 2: break; # done with env args if len(z[0]) == 0: raise Exception("Error: incorrect format for env var: '%s'" % str(args[x])) del args[0] if len(z[1]) == 0: # value is not present, so delete it if z[0] in new_env: del new_env[z[0]] else: new_env[z[0]] = z[1] if len(args) == 0 or len(args[0]) == 0: raise Exception("Error: syntax error in command arguments") if new_env.get("VALGRIND") and new_env.get("VALGRIND_ALL"): # Python generates a lot of possibly-lost errors that are not errors, don't show them. args = [new_env.get("VALGRIND"), "--show-reachable=no", "--show-possibly-lost=no", "--error-exitcode=42"] + args p = subprocess.Popen(args, env=new_env) return p.wait() if __name__ == "__main__": sys.exit(main())
dcristoloveanu/qpid-proton
proton-c/env.py
Python
apache-2.0
2,444
########################################################################## # # Copyright (c) 2010, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import maya.cmds import IECore import IECoreMaya class VectorParameterUI( IECoreMaya.ParameterUI ) : def __init__( self, node, parameter, **kw ) : self.__dim = parameter.getTypedValue().dimensions() if self.__dim == 2: layout = maya.cmds.rowLayout( numberOfColumns = 3, columnWidth3 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ] ) elif self.__dim == 3: layout = maya.cmds.rowLayout( numberOfColumns = 4, columnWidth4 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ] ) else: raise RuntimeError("Unsupported vector dimension in VectorParameterUI") IECoreMaya.ParameterUI.__init__( self, node, parameter, layout, **kw ) self.__fields = [] maya.cmds.text( label = self.label(), font = "smallPlainLabelFont", align = "right", annotation = self.description() ) plug = self.plug() for i in range(0, self.__dim) : self.__fields.append( self.__fieldType()( value = parameter.getTypedValue()[i] ) ) maya.cmds.setParent("..") self.replace( self.node(), self.parameter ) def replace( self, node, parameter ) : IECoreMaya.ParameterUI.replace( self, node, parameter ) plug = self.plug() for i in range(0, self.__dim): childPlugName = self.nodeName() + "." + plug.child(i).partialName() maya.cmds.connectControl( self.__fields[i], childPlugName ) self._addPopupMenu( parentUI = self.__fields[i], attributeName = childPlugName ) def __fieldType( self ): if self.parameter.isInstanceOf( IECore.TypeId.V2iParameter ) or self.parameter.isInstanceOf( IECore.TypeId.V3iParameter ): return maya.cmds.intField else: return maya.cmds.floatField IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2iParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3iParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2fParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2dParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3fParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3dParameter, VectorParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.Color3fParameter, VectorParameterUI, "numeric" )
lento/cortex
python/IECoreMaya/VectorParameterUI.py
Python
bsd-3-clause
4,276
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-search/ # # Code is licensed under MIT license. # import re import urllib from htmlentitydefs import name2codepoint from BeautifulSoup import BeautifulSoup from browser import Browser, BrowserError class SearchError(Exception): """ Base class for Google Search exceptions. """ pass class ParseError(SearchError): """ Parse error in Google results. self.msg attribute contains explanation why parsing failed self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse Thrown only in debug mode """ def __init__(self, msg, tag): self.msg = msg self.tag = tag def __str__(self): return self.msg def html(self): return self.tag.prettify() class SearchResult: def __init__(self, title, url, desc): self.title = title self.url = url self.desc = desc def __str__(self): return 'Google Search Result: "%s"' % self.title class GoogleSearch(object): SEARCH_URL_0 = "http://www.google.com/search?q=%(query)s&btnG=Google+Search" NEXT_PAGE_0 = "http://www.google.com/search?q=%(query)s&start=%(start)d" SEARCH_URL_1 = "http://www.google.com/search?q=%(query)s&num=%(num)d&btnG=Google+Search" NEXT_PAGE_1 = "http://www.google.com/search?q=%(query)s&num=%(num)d&start=%(start)d" def __init__(self, query, random_agent=False, debug=False, page=0): self.query = query self.debug = debug self.browser = Browser(debug=debug) self.results_info = None self.eor = False # end of results self._page = page self._results_per_page = 10 self._last_from = 0 if random_agent: self.browser.set_random_user_agent() @property def num_results(self): if not self.results_info: page = self._get_results_page() self.results_info = self._extract_info(page) if self.results_info['total'] == 0: self.eor = True return self.results_info['total'] def _get_page(self): return self._page def _set_page(self, page): self._page = page page = property(_get_page, _set_page) def _get_results_per_page(self): return self._results_per_page def _set_results_par_page(self, rpp): self._results_per_page = rpp results_per_page = property(_get_results_per_page, _set_results_par_page) def get_results(self): """ Gets a page of results """ if self.eor: return [] MAX_VALUE = 1000000 page = self._get_results_page() #search_info = self._extract_info(page) results = self._extract_results(page) search_info = {'from': self.results_per_page*self._page, 'to': self.results_per_page*self._page + len(results), 'total': MAX_VALUE} if not self.results_info: self.results_info = search_info if self.num_results == 0: self.eor = True return [] if not results: self.eor = True return [] if self._page > 0 and search_info['from'] == self._last_from: self.eor = True return [] if search_info['to'] == search_info['total']: self.eor = True self._page += 1 self._last_from = search_info['from'] return results def _maybe_raise(self, cls, *arg): if self.debug: raise cls(*arg) def _get_results_page(self): if self._page == 0: if self._results_per_page == 10: url = GoogleSearch.SEARCH_URL_0 else: url = GoogleSearch.SEARCH_URL_1 else: if self._results_per_page == 10: url = GoogleSearch.NEXT_PAGE_0 else: url = GoogleSearch.NEXT_PAGE_1 safe_url = url % { 'query': urllib.quote_plus(self.query), 'start': self._page * self._results_per_page, 'num': self._results_per_page } try: page = self.browser.get_page(safe_url) except BrowserError, e: raise SearchError, "Failed getting %s: %s" % (e.url, e.error) return BeautifulSoup(page) def _extract_info(self, soup): empty_info = {'from': 0, 'to': 0, 'total': 0} div_ssb = soup.find('div', id='ssb') if not div_ssb: self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup) return empty_info p = div_ssb.find('p') if not p: self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup) return empty_info txt = ''.join(p.findAll(text=True)) txt = txt.replace(',', '') matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U) if not matches: return empty_info return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))} def _extract_results(self, soup): results = soup.findAll('li', {'class': 'g'}) ret_res = [] for result in results: eres = self._extract_result(result) if eres: ret_res.append(eres) return ret_res def _extract_result(self, result): title, url = self._extract_title_url(result) desc = self._extract_description(result) if not title or not url or not desc: return None return SearchResult(title, url, desc) def _extract_title_url(self, result): #title_a = result.find('a', {'class': re.compile(r'\bl\b')}) title_a = result.find('a') if not title_a: self._maybe_raise(ParseError, "Title tag in Google search result was not found", result) return None, None title = ''.join(title_a.findAll(text=True)) title = self._html_unescape(title) url = title_a['href'] match = re.match(r'/url\?q=(http[^&]+)&', url) if match: url = urllib.unquote(match.group(1)) return title, url def _extract_description(self, result): desc_div = result.find('div', {'class': re.compile(r'\bs\b')}) if not desc_div: self._maybe_raise(ParseError, "Description tag in Google search result was not found", result) return None desc_strs = [] def looper(tag): if not tag: return for t in tag: try: if t.name == 'br': break except AttributeError: pass try: desc_strs.append(t.string) except AttributeError: desc_strs.append(t) looper(desc_div) looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr> desc = ''.join(s for s in desc_strs if s) return self._html_unescape(desc) def _html_unescape(self, str): def entity_replacer(m): entity = m.group(1) if entity in name2codepoint: return unichr(name2codepoint[entity]) else: return m.group(0) def ascii_replacer(m): cp = int(m.group(1)) if cp <= 255: return unichr(cp) else: return m.group(0) s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U) return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
forweipan/fimap
src/xgoogle/search.py
Python
gpl-2.0
7,862
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'XForm.shared_data' db.add_column('odk_logger_xform', 'shared_data', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'XForm.shared_data' db.delete_column('odk_logger_xform', 'shared_data') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'odk_logger.attachment': { 'Meta': {'object_name': 'Attachment'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}), 'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}) }, 'odk_logger.instance': { 'Meta': {'object_name': 'Instance'}, 'date': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}), 'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}), 'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}), 'xml': ('django.db.models.fields.TextField', [], {}) }, 'odk_logger.surveytype': { 'Meta': {'object_name': 'SurveyType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'odk_logger.xform': { 'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'json': ('django.db.models.fields.TextField', [], {'default': "u''"}), 'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}), 'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'xml': ('django.db.models.fields.TextField', [], {}) } } complete_apps = ['logger']
awemulya/fieldsight-kobocat
onadata/apps/logger/south_migrations/0009_auto__add_field_xform_shared_data.py
Python
bsd-2-clause
6,913
"""Test the california_housing loader, if the data is available, or if specifically requested via environment variable (e.g. for travis cron job).""" import pytest from sklearn.datasets.tests.test_common import check_return_X_y from functools import partial def test_fetch(fetch_california_housing_fxt): data = fetch_california_housing_fxt() assert((20640, 8) == data.data.shape) assert((20640, ) == data.target.shape) # test return_X_y option fetch_func = partial(fetch_california_housing_fxt) check_return_X_y(data, fetch_func) def test_fetch_asframe(fetch_california_housing_fxt): pd = pytest.importorskip('pandas') bunch = fetch_california_housing_fxt(as_frame=True) frame = bunch.frame assert hasattr(bunch, 'frame') is True assert frame.shape == (20640, 9) assert isinstance(bunch.data, pd.DataFrame) assert isinstance(bunch.target, pd.Series) def test_pandas_dependency_message(fetch_california_housing_fxt, hide_available_pandas): # Check that pandas is imported lazily and that an informative error # message is raised when pandas is missing: expected_msg = ('fetch_california_housing with as_frame=True' ' requires pandas') with pytest.raises(ImportError, match=expected_msg): fetch_california_housing_fxt(as_frame=True)
glemaitre/scikit-learn
sklearn/datasets/tests/test_california_housing.py
Python
bsd-3-clause
1,370
# -*- coding: utf-8 -*- # Natural Language Toolkit: IBM Model 2 # # Copyright (C) 2001-2013 NLTK Project # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Lexical translation model that considers word order. IBM Model 2 improves on Model 1 by accounting for word order. An alignment probability is introduced, a(i | j,l,m), which predicts a source word position, given its aligned target word's position. The EM algorithm used in Model 2 is: E step - In the training data, collect counts, weighted by prior probabilities. (a) count how many times a source language word is translated into a target language word (b) count how many times a particular position in the source sentence is aligned to a particular position in the target sentence M step - Estimate new probabilities based on the counts from the E step Notations: i: Position in the source sentence Valid values are 0 (for NULL), 1, 2, ..., length of source sentence j: Position in the target sentence Valid values are 1, 2, ..., length of target sentence l: Number of words in the source sentence, excluding NULL m: Number of words in the target sentence s: A word in the source language t: A word in the target language References: Philipp Koehn. 2010. Statistical Machine Translation. Cambridge University Press, New York. Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and Robert L. Mercer. 1993. The Mathematics of Statistical Machine Translation: Parameter Estimation. Computational Linguistics, 19 (2), 263-311. """ from __future__ import division from collections import defaultdict from nltk.translate import AlignedSent from nltk.translate import Alignment from nltk.translate import IBMModel from nltk.translate import IBMModel1 from nltk.translate.ibm_model import Counts import warnings class IBMModel2(IBMModel): """ Lexical translation model that considers word order >>> bitext = [] >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) >>> ibm2 = IBMModel2(bitext, 5) >>> print(round(ibm2.translation_table['buch']['book'], 3)) 1.0 >>> print(round(ibm2.translation_table['das']['book'], 3)) 0.0 >>> print(round(ibm2.translation_table['buch'][None], 3)) 0.0 >>> print(round(ibm2.translation_table['ja'][None], 3)) 0.0 >>> print(ibm2.alignment_table[1][1][2][2]) 0.938... >>> print(round(ibm2.alignment_table[1][2][2][2], 3)) 0.0 >>> print(round(ibm2.alignment_table[2][2][4][5], 3)) 1.0 >>> test_sentence = bitext[2] >>> test_sentence.words ['das', 'buch', 'ist', 'ja', 'klein'] >>> test_sentence.mots ['the', 'book', 'is', 'small'] >>> test_sentence.alignment Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) """ def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): """ Train on ``sentence_aligned_corpus`` and create a lexical translation model and an alignment model. Translation direction is from ``AlignedSent.mots`` to ``AlignedSent.words``. :param sentence_aligned_corpus: Sentence-aligned parallel corpus :type sentence_aligned_corpus: list(AlignedSent) :param iterations: Number of iterations to run training algorithm :type iterations: int :param probability_tables: Optional. Use this to pass in custom probability values. If not specified, probabilities will be set to a uniform distribution, or some other sensible value. If specified, all the following entries must be present: ``translation_table``, ``alignment_table``. See ``IBMModel`` for the type and purpose of these tables. :type probability_tables: dict[str]: object """ super(IBMModel2, self).__init__(sentence_aligned_corpus) if probability_tables is None: # Get translation probabilities from IBM Model 1 # Run more iterations of training for Model 1, since it is # faster than Model 2 ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) self.translation_table = ibm1.translation_table self.set_uniform_probabilities(sentence_aligned_corpus) else: # Set user-defined probabilities self.translation_table = probability_tables['translation_table'] self.alignment_table = probability_tables['alignment_table'] for n in range(0, iterations): self.train(sentence_aligned_corpus) self.__align_all(sentence_aligned_corpus) def set_uniform_probabilities(self, sentence_aligned_corpus): # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m l_m_combinations = set() for aligned_sentence in sentence_aligned_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) if (l, m) not in l_m_combinations: l_m_combinations.add((l, m)) initial_prob = 1 / (l + 1) if initial_prob < IBMModel.MIN_PROB: warnings.warn("A source sentence is too long (" + str(l) + " words). Results may be less accurate.") for i in range(0, l + 1): for j in range(1, m + 1): self.alignment_table[i][j][l][m] = initial_prob def train(self, parallel_corpus): counts = Model2Counts() for aligned_sentence in parallel_corpus: src_sentence = [None] + aligned_sentence.mots trg_sentence = ['UNUSED'] + aligned_sentence.words # 1-indexed l = len(aligned_sentence.mots) m = len(aligned_sentence.words) # E step (a): Compute normalization factors to weigh counts total_count = self.prob_all_alignments(src_sentence, trg_sentence) # E step (b): Collect counts for j in range(1, m + 1): t = trg_sentence[j] for i in range(0, l + 1): s = src_sentence[i] count = self.prob_alignment_point( i, j, src_sentence, trg_sentence) normalized_count = count / total_count[t] counts.update_lexical_translation(normalized_count, s, t) counts.update_alignment(normalized_count, i, j, l, m) # M step: Update probabilities with maximum likelihood estimates self.maximize_lexical_translation_probabilities(counts) self.maximize_alignment_probabilities(counts) def maximize_alignment_probabilities(self, counts): MIN_PROB = IBMModel.MIN_PROB for i, j_s in counts.alignment.items(): for j, src_sentence_lengths in j_s.items(): for l, trg_sentence_lengths in src_sentence_lengths.items(): for m in trg_sentence_lengths: estimate = (counts.alignment[i][j][l][m] / counts.alignment_for_any_i[j][l][m]) self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) def prob_all_alignments(self, src_sentence, trg_sentence): """ Computes the probability of all possible word alignments, expressed as a marginal distribution over target words t Each entry in the return value represents the contribution to the total alignment probability by the target word t. To obtain probability(alignment | src_sentence, trg_sentence), simply sum the entries in the return value. :return: Probability of t for all s in ``src_sentence`` :rtype: dict(str): float """ alignment_prob_for_t = defaultdict(lambda: 0.0) for j in range(1, len(trg_sentence)): t = trg_sentence[j] for i in range(0, len(src_sentence)): alignment_prob_for_t[t] += self.prob_alignment_point( i, j, src_sentence, trg_sentence) return alignment_prob_for_t def prob_alignment_point(self, i, j, src_sentence, trg_sentence): """ Probability that position j in ``trg_sentence`` is aligned to position i in the ``src_sentence`` """ l = len(src_sentence) - 1 m = len(trg_sentence) - 1 s = src_sentence[i] t = trg_sentence[j] return self.translation_table[t][s] * self.alignment_table[i][j][l][m] def prob_t_a_given_s(self, alignment_info): """ Probability of target sentence and an alignment given the source sentence """ prob = 1.0 l = len(alignment_info.src_sentence) - 1 m = len(alignment_info.trg_sentence) - 1 for j, i in enumerate(alignment_info.alignment): if j == 0: continue # skip the dummy zeroeth element trg_word = alignment_info.trg_sentence[j] src_word = alignment_info.src_sentence[i] prob *= (self.translation_table[trg_word][src_word] * self.alignment_table[i][j][l][m]) return max(prob, IBMModel.MIN_PROB) def __align_all(self, parallel_corpus): for sentence_pair in parallel_corpus: self.__align(sentence_pair) def __align(self, sentence_pair): """ Determines the best word alignment for one sentence pair from the corpus that the model was trained on. The best alignment will be set in ``sentence_pair`` when the method returns. In contrast with the internal implementation of IBM models, the word indices in the ``Alignment`` are zero- indexed, not one-indexed. :param sentence_pair: A sentence in the source language and its counterpart sentence in the target language :type sentence_pair: AlignedSent """ best_alignment = [] l = len(sentence_pair.mots) m = len(sentence_pair.words) for j, trg_word in enumerate(sentence_pair.words): # Initialize trg_word to align with the NULL token best_prob = (self.translation_table[trg_word][None] * self.alignment_table[0][j + 1][l][m]) best_prob = max(best_prob, IBMModel.MIN_PROB) best_alignment_point = None for i, src_word in enumerate(sentence_pair.mots): align_prob = (self.translation_table[trg_word][src_word] * self.alignment_table[i + 1][j + 1][l][m]) if align_prob >= best_prob: best_prob = align_prob best_alignment_point = i best_alignment.append((j, best_alignment_point)) sentence_pair.alignment = Alignment(best_alignment) class Model2Counts(Counts): """ Data object to store counts of various parameters during training. Includes counts for alignment. """ def __init__(self): super(Model2Counts, self).__init__() self.alignment = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict( lambda: 0.0)))) self.alignment_for_any_i = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) def update_lexical_translation(self, count, s, t): self.t_given_s[t][s] += count self.any_t_given_s[s] += count def update_alignment(self, count, i, j, l, m): self.alignment[i][j][l][m] += count self.alignment_for_any_i[j][l][m] += count
sdoran35/hate-to-hugs
venv/lib/python3.6/site-packages/nltk/translate/ibm2.py
Python
mit
12,271
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import sys import openerp.netsvc as netsvc import openerp.osv as base import openerp.pooler as pooler from openerp.tools.safe_eval import safe_eval as eval class Env(dict): def __init__(self, cr, uid, model, ids): self.cr = cr self.uid = uid self.model = model self.ids = ids self.obj = pooler.get_pool(cr.dbname).get(model) self.columns = self.obj._columns.keys() + self.obj._inherit_fields.keys() def __getitem__(self, key): if (key in self.columns) or (key in dir(self.obj)): res = self.obj.browse(self.cr, self.uid, self.ids[0]) return res[key] else: return super(Env, self).__getitem__(key) def _eval_expr(cr, ident, workitem, action): ret=False assert action, 'You used a NULL action in a workflow, use dummy node instead.' for line in action.split('\n'): line = line.strip() uid=ident[0] model=ident[1] ids=[ident[2]] if line =='True': ret=True elif line =='False': ret=False else: env = Env(cr, uid, model, ids) ret = eval(line, env, nocopy=True) return ret def execute_action(cr, ident, workitem, activity): obj = pooler.get_pool(cr.dbname).get('ir.actions.server') ctx = {'active_model':ident[1], 'active_id':ident[2], 'active_ids':[ident[2]]} result = obj.run(cr, ident[0], [activity['action_id']], ctx) return result def execute(cr, ident, workitem, activity): return _eval_expr(cr, ident, workitem, activity['action']) def check(cr, workitem, ident, transition, signal): if transition['signal'] and signal != transition['signal']: return False uid = ident[0] if transition['group_id'] and uid != 1: pool = pooler.get_pool(cr.dbname) user_groups = pool.get('res.users').read(cr, uid, [uid], ['groups_id'])[0]['groups_id'] if not transition['group_id'] in user_groups: return False return _eval_expr(cr, ident, workitem, transition['condition']) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
BorgERP/borg-erp-6of3
server/openerp/workflow/wkf_expr.py
Python
agpl-3.0
3,130
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import os import sys from optparse import OptionParser, OptionValueError import math import vtk cohesion = 1.0 friction_angle = 20 * math.pi / 180.0 sinphi = math.sin(friction_angle) cosphi = math.sin(friction_angle) cohcos = cohesion * cosphi dp_c = 3.0 dp_phi = math.pi / 6.0 dp_sinphi = math.sin(dp_phi) dp_cosphi = math.cos(dp_phi) dp_t = 0.0 dp_tc = 2.0 def ismoother(f_diff): if (abs(f_diff) >= opts.smoothing_tol): return 0.0 return 0.5 * (opts.smoothing_tol - abs(f_diff)) - opts.smoothing_tol / math.pi * math.cos(0.5 * math.pi * f_diff / opts.smoothing_tol) def yield_function_2(yf1, yf2): return max(yf1, yf2) + ismoother(yf1 - yf2) def yield_function(x, y, z): yfs = [] if opts.twoD_example: yfs += [- x, - y, y - 1.0, - y - 0.5 + 0.5 * x] if opts.twoD_example_alternative: yfs += [y - 1.0, - y - 0.5 + 0.5 * x, - x, - y] if opts.dp: yfs += [y + x * dp_sinphi - dp_c * dp_cosphi, x - dp_t, -x - dp_tc] if opts.tensile: yfs += [x - opts.tensile_strength, y - opts.tensile_strength, z - opts.tensile_strength] if opts.mc: yfs += [0.5 * (x - z) + 0.5 * (x + z) * sinphi - cohcos, 0.5 * (y - z) + 0.5 * (y + z) * sinphi - cohcos, 0.5 * (x - y) + 0.5 * (x + y) * sinphi - cohcos, 0.5 * (y - x) + 0.5 * (x + y) * sinphi - cohcos, 0.5 * (z - y) + 0.5 * (y + z) * sinphi - cohcos, 0.5 * (z - x) + 0.5 * (x + z) * sinphi - cohcos] yf = yfs[0] for i in range(1, len(yfs)): yf = yield_function_2(yf, yfs[i]) return yf # parse command line p = OptionParser(usage="""usage: %prog [options] <vtk_file> Inserts yield function values into <vtk_file>. Only 3D input is accepted: this program assumes that the individual yield functions are functions of x, y, z. """) p.add_option("-v", action="store_true", dest="verbose", help="Verbose") p.add_option("--name", action="store", type="string", default="yield_function", dest="name", help="The pointdata produced will have this name. Default=%default") p.add_option("--smoothing_tol", action="store", type="float", default=0.1, dest="smoothing_tol", help="The smoothing tolerance (a) parameter. Default=%default") p.add_option("-t", action="store_true", dest="tensile", help="Yield function will contain contributions from tensile (Rankine) failure") p.add_option("--tensile_strength", action="store", type="float", default=0.7, dest="tensile_strength", help="Tensile strength") p.add_option("-m", action="store_true", dest="mc", help="Yield function will contain contributions from Mohr-Coulomb failure") p.add_option("-d", action="store_true", dest="dp", help="Yield function will contain contributions from Drucker-Prager failure") p.add_option("-e", action="store_true", dest="twoD_example", help="Yield function will contain contributions from an example 2D yield function") p.add_option("-a", action="store_true", dest="twoD_example_alternative", help="Yield function will contain contributions from an alternative example 2D yield function") (opts, args) = p.parse_args() # get the com filename if len(args) != 1: p.print_help() sys.exit(1) in_file = args[0] if opts.verbose: print "Reading", in_file if in_file.endswith(".vtp"): indata = vtk.vtkXMLPolyDataReader() writer = vtk.vtkXMLPolyDataWriter() elif in_file.endswith(".vtu"): indata = vtk.vtkXMLUnstructuredGridReader() writer = vtk.vtkXMLUnstructuredGridWriter() elif in_file.endswith(".vtr"): indata = vtk.vtkXMLRectilinearGridReader() writer = vtk.vtkXMLRectilinearGridWriter() else: print "This program has not yet been configured to read files of type", in_file sys.exit(2) indata.SetFileName(in_file) indata.Update() indata = indata.GetOutput() if opts.verbose: print "Generating", opts.name yf = vtk.vtkDoubleArray() yf.SetName(opts.name) yf.SetNumberOfValues(indata.GetNumberOfPoints()) for ptid in range(indata.GetNumberOfPoints()): (x, y, z) = indata.GetPoint(ptid) yf.SetValue(ptid, yield_function(x, y, z)) indata.GetPointData().AddArray(yf) if opts.verbose: print "Writing", in_file writer.SetFileName(in_file) writer.SetDataModeToBinary() writer.SetInputConnection(indata.GetProducerPort()) writer.Write() sys.exit(0)
nuclear-wizard/moose
modules/tensor_mechanics/doc/tests/yf.py
Python
lgpl-2.1
4,579
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resource management library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os as _os import sys as _sys from tensorflow.python.util import tf_inspect as _inspect from tensorflow.python.util.tf_export import tf_export # pylint: disable=g-import-not-at-top try: from rules_python.python.runfiles import runfiles except ImportError: runfiles = None # pylint: enable=g-import-not-at-top @tf_export(v1=['resource_loader.load_resource']) def load_resource(path): """Load the resource at given path, where path is relative to tensorflow/. Args: path: a string resource path relative to tensorflow/. Returns: The contents of that resource. Raises: IOError: If the path is not found, or the resource can't be opened. """ with open(get_path_to_datafile(path), 'rb') as f: return f.read() # pylint: disable=protected-access @tf_export(v1=['resource_loader.get_data_files_path']) def get_data_files_path(): """Get a direct path to the data files colocated with the script. Returns: The directory where files specified in data attribute of py_test and py_binary are stored. """ return _os.path.dirname(_inspect.getfile(_sys._getframe(1))) @tf_export(v1=['resource_loader.get_root_dir_with_all_resources']) def get_root_dir_with_all_resources(): """Get a root directory containing all the data attributes in the build rule. Returns: The path to the specified file present in the data attribute of py_test or py_binary. Falls back to returning the same as get_data_files_path if it fails to detect a bazel runfiles directory. """ script_dir = get_data_files_path() # Create a history of the paths, because the data files are located relative # to the repository root directory, which is directly under runfiles # directory. directories = [script_dir] data_files_dir = '' while True: candidate_dir = directories[-1] current_directory = _os.path.basename(candidate_dir) if '.runfiles' in current_directory: # Our file should never be directly under runfiles. # If the history has only one item, it means we are directly inside the # runfiles directory, something is wrong, fall back to the default return # value, script directory. if len(directories) > 1: data_files_dir = directories[-2] break else: new_candidate_dir = _os.path.dirname(candidate_dir) # If we are at the root directory these two will be the same. if new_candidate_dir == candidate_dir: break else: directories.append(new_candidate_dir) return data_files_dir or script_dir @tf_export(v1=['resource_loader.get_path_to_datafile']) def get_path_to_datafile(path): """Get the path to the specified file in the data dependencies. The path is relative to tensorflow/ Args: path: a string resource path relative to tensorflow/ Returns: The path to the specified file present in the data attribute of py_test or py_binary. Raises: IOError: If the path is not found, or the resource can't be opened. """ # First, try finding in the new path. if runfiles: r = runfiles.Create() new_fpath = r.Rlocation( _os.path.abspath(_os.path.join('tensorflow', path))) if new_fpath is not None and _os.path.exists(new_fpath): return new_fpath # Then, the old style path, as people became dependent on this buggy call. old_filepath = _os.path.join( _os.path.dirname(_inspect.getfile(_sys._getframe(1))), path) return old_filepath @tf_export(v1=['resource_loader.readahead_file_path']) def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument """Readahead files not implemented; simply returns given path.""" return path
karllessard/tensorflow
tensorflow/python/platform/resource_loader.py
Python
apache-2.0
4,522
""" Read SAS sas7bdat or xport files. """ from pandas import compat from pandas.io.common import _stringify_path def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): """ Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None If None, file format is inferred. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. Returns ------- DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader """ if format is None: buffer_error_msg = ("If this is a buffer object rather " "than a string name, you must specify " "a format string") filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, compat.string_types): raise ValueError(buffer_error_msg) try: fname = filepath_or_buffer.lower() if fname.endswith(".xpt"): format = "xport" elif fname.endswith(".sas7bdat"): format = "sas7bdat" else: raise ValueError("unable to infer format of SAS file") except: pass if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) elif format.lower() == 'sas7bdat': from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) else: raise ValueError('unknown SAS format') if iterator or chunksize: return reader data = reader.read() reader.close() return data
NixaSoftware/CVis
venv/lib/python2.7/site-packages/pandas/io/sas/sasreader.py
Python
apache-2.0
2,558
# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for dvr from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'dvr_host_macs', sa.Column('host', sa.String(length=255), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False, unique=True), sa.PrimaryKeyConstraint('host') ) op.create_table( 'ml2_dvr_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('vif_type', sa.String(length=64), nullable=False), sa.Column('vif_details', sa.String(length=4095), nullable=False, server_default=''), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), sa.Column(u'status', sa.String(16), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'host') ) op.create_table( 'csnat_l3_agent_bindings', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('l3_agent_id', sa.String(length=36), nullable=False), sa.Column('host_id', sa.String(length=255), nullable=True), sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id', 'l3_agent_id') )
wolverineav/neutron
neutron/db/migration/alembic_migrations/dvr_init_opts.py
Python
apache-2.0
2,619
# -*- coding: utf-8 -*- # # Django documentation build configuration file, created by # sphinx-quickstart on Thu Mar 27 09:06:53 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't picklable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import unicode_literals import sys from os.path import abspath, dirname, join # Workaround for sphinx-build recursion limit overflow: # pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) # RuntimeError: maximum recursion depth exceeded while pickling an object # # Python's default allowed recursion depth is 1000 but this isn't enough for # building docs/ref/settings.txt sometimes. # https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion sys.setrecursionlimit(2000) # Make sure we get the version of this copy of Django sys.path.insert(1, dirname(dirname(abspath(__file__)))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(abspath(join(dirname(__file__), "_ext"))) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.3' # Actually 1.3.4, but micro versions aren't supported here. # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "djangodocs", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "ticket_role", ] # Spelling check needs an additional module that is not installed by default. # Add it only if spelling check is requested so docs can be generated without it. if 'spelling' in sys.argv: extensions.append("sphinxcontrib.spelling") # Spelling language. spelling_lang = 'en_US' # Location of word list. spelling_word_list_filename = 'spelling_wordlist' # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General substitutions. project = 'Django' copyright = 'Django Software Foundation and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.11' # The full version, including alpha/beta/rc tags. try: from django import VERSION, get_version except ImportError: release = version else: def django_release(): pep440ver = get_version() if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver: return pep440ver + '.dev' return pep440ver release = django_release() # The "development version" of Django django_next_version = '1.11' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # Location for .po/.mo translation files used when language is set locale_dirs = ['locale/'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'trac' # Links to Python's docs should reference the most recent version of the 3.x # branch, which is located at this URL. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'sphinx': ('http://sphinx-doc.org/', None), 'six': ('https://pythonhosted.org/six/', None), 'formtools': ('https://django-formtools.readthedocs.io/en/latest/', None), 'psycopg2': ('http://initd.org/psycopg/docs/', None), } # Python's docs don't change every week. intersphinx_cache_limit = 90 # days # The 'versionadded' and 'versionchanged' directives are overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "djangodocs" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_theme"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # HTML translator class for the builder html_translator_class = "djangodocs.DjangoHTMLTranslator" # Content template for the index page. # html_index = '' # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Djangodoc' modindex_common_prefix = ["django."] # Appended to every page rst_epilog = """ .. |django-users| replace:: :ref:`django-users <django-users-mailing-list>` .. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>` .. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>` .. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>` .. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>` """ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { 'preamble': ( '\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}' '\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}' '\\DeclareUnicodeCharacter{2665}{[unicode-heart]}' '\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}' ), } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). # latex_documents = [] latex_documents = [ ('contents', 'django.tex', 'Django Documentation', 'Django Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [( 'ref/django-admin', 'django-admin', 'Utility script for the Django Web framework', ['Django Software Foundation'], 1 ), ] # -- Options for Texinfo output ------------------------------------------------ # List of tuples (startdocname, targetname, title, author, dir_entry, # description, category, toctree_only) texinfo_documents = [( master_doc, "django", "", "", "Django", "Documentation of the Django framework", "Web development", False )] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = 'Django Software Foundation' epub_publisher = 'Django Software Foundation' epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = 'Django' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. epub_theme = 'djangodocs-epub' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. epub_cover = ('', 'epub-cover.html') # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True # -- ticket options ------------------------------------------------------------ ticket_url = 'https://code.djangoproject.com/ticket/%s'
kutenai/django
docs/conf.py
Python
bsd-3-clause
12,643
# (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' cache: pickle short_description: Pickle formatted files. description: - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. version_added: "2.3" author: Brian Coca (@bcoca) options: _uri: required: True description: - Path in which the cache plugin will save the files type: list env: - name: ANSIBLE_CACHE_PLUGIN_CONNECTION ini: - key: fact_caching_connection section: defaults _prefix: description: User defined prefix to use when creating the files env: - name: ANSIBLE_CACHE_PLUGIN_PREFIX ini: - key: fact_caching_prefix section: defaults _timeout: default: 86400 description: Expiration timeout for the cache plugin data env: - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT ini: - key: fact_caching_timeout section: defaults ''' try: import cPickle as pickle except ImportError: import pickle from ansible.module_utils.six import PY3 from ansible.plugins.cache import BaseFileCacheModule class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ def _load(self, filepath): # Pickle is a binary format with open(filepath, 'rb') as f: if PY3: return pickle.load(f, encoding='bytes') else: return pickle.load(f) def _dump(self, value, filepath): with open(filepath, 'wb') as f: # Use pickle protocol 2 which is compatible with Python 2.3+. pickle.dump(value, f, protocol=2)
hryamzik/ansible
lib/ansible/plugins/cache/pickle.py
Python
gpl-3.0
2,016
# -*- coding: utf-8 -*- from openerp import models, fields, api, osv # We just create a new model class mother(models.Model): _name = 'test.inherit.mother' _columns = { # check interoperability of field inheritance with old-style fields 'name': osv.fields.char('Name', required=True), } surname = fields.Char(compute='_compute_surname') state = fields.Selection([('a', 'A'), ('b', 'B')]) @api.one @api.depends('name') def _compute_surname(self): self.surname = self.name or '' # We want to inherits from the parent model and we add some fields # in the child object class daughter(models.Model): _name = 'test.inherit.daughter' _inherits = {'test.inherit.mother': 'template_id'} template_id = fields.Many2one('test.inherit.mother', 'Template', required=True, ondelete='cascade') field_in_daughter = fields.Char('Field1') # We add a new field in the parent object. Because of a recent refactoring, # this feature was broken. # This test and these models try to show the bug and fix it. class mother(models.Model): _inherit = 'test.inherit.mother' field_in_mother = fields.Char() # extend the name field by adding a default value name = fields.Char(default='Unknown') # extend the selection of the state field state = fields.Selection(selection_add=[('c', 'C')]) # override the computed field, and extend its dependencies @api.one @api.depends('field_in_mother') def _compute_surname(self): if self.field_in_mother: self.surname = self.field_in_mother else: super(mother, self)._compute_surname() class mother(models.Model): _inherit = 'test.inherit.mother' # extend again the selection of the state field state = fields.Selection(selection_add=[('d', 'D')]) class daughter(models.Model): _inherit = 'test.inherit.daughter' # simply redeclare the field without adding any option template_id = fields.Many2one() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
OpenUpgrade-dev/OpenUpgrade
openerp/addons/test_inherit/models.py
Python
agpl-3.0
2,104
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import models import math class MrpProduction(models.Model): _inherit = 'mrp.production' def _get_workorder_in_product_lines( self, workcenter_lines, product_lines, properties=None): super(MrpProduction, self)._get_workorder_in_product_lines( workcenter_lines, product_lines, properties=properties) for workorder in workcenter_lines: wc = workorder.routing_wc_line cycle = wc.cycle_nbr and (self.product_qty / wc.cycle_nbr) or 0 if self.company_id.complete_cycle: cycle = int(math.ceil(cycle)) workorder.cycle = cycle workorder.hour = wc.hour_nbr * cycle
odoocn/odoomrp-wip
mrp_product_variants_configurable_timing/models/mrp_production.py
Python
agpl-3.0
1,568
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, fix_xml_ampersands, ) class TNAFlixIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tnaflix\.com/(?P<cat_id>[\w-]+)/(?P<display_id>[\w-]+)/video(?P<id>\d+)' _TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>' _DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>' _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"' _TEST = { 'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878', 'md5': 'ecf3498417d09216374fc5907f9c6ec0', 'info_dict': { 'id': '553878', 'display_id': 'Carmella-Decesare-striptease', 'ext': 'mp4', 'title': 'Carmella Decesare - striptease', 'description': '', 'thumbnail': 're:https?://.*\.jpg$', 'duration': 91, 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) title = self._html_search_regex( self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage) description = self._html_search_regex( self._DESCRIPTION_REGEX, webpage, 'description', fatal=False, default='') age_limit = self._rta_search(webpage) duration = self._html_search_meta('duration', webpage, 'duration', default=None) if duration: duration = parse_duration(duration[1:]) cfg_url = self._proto_relative_url(self._html_search_regex( self._CONFIG_REGEX, webpage, 'flashvars.config'), 'http:') cfg_xml = self._download_xml( cfg_url, display_id, note='Downloading metadata', transform_source=fix_xml_ampersands) thumbnail = cfg_xml.find('./startThumb').text formats = [] for item in cfg_xml.findall('./quality/item'): video_url = re.sub('speed=\d+', 'speed=', item.find('videoLink').text) format_id = item.find('res').text fmt = { 'url': video_url, 'format_id': format_id, } m = re.search(r'^(\d+)', format_id) if m: fmt['height'] = int(m.group(1)) formats.append(fmt) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
marxin/youtube-dl
youtube_dl/extractor/tnaflix.py
Python
unlicense
2,854
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: profitbricks short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. description: - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 version_added: "2.0" options: auto_increment: description: - Whether or not to increment a single number in the name for created virtual machines. default: yes choices: ["yes", "no"] name: description: - The name of the virtual machine. required: true image: description: - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. required: true image_password: description: - Password set for the administrative user. required: false version_added: '2.2' ssh_keys: description: - Public SSH keys allowing access to the virtual machine. required: false version_added: '2.2' datacenter: description: - The datacenter to provision this virtual machine. required: false default: null cores: description: - The number of CPU cores to allocate to the virtual machine. required: false default: 2 ram: description: - The amount of memory to allocate to the virtual machine. required: false default: 2048 cpu_family: description: - The CPU family type to allocate to the virtual machine. required: false default: AMD_OPTERON choices: [ "AMD_OPTERON", "INTEL_XEON" ] version_added: '2.2' volume_size: description: - The size in GB of the boot volume. required: false default: 10 bus: description: - The bus type for the volume. required: false default: VIRTIO choices: [ "IDE", "VIRTIO"] instance_ids: description: - list of instance ids, currently only used when state='absent' to remove instances. required: false count: description: - The number of virtual machines to create. required: false default: 1 location: description: - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. required: false default: us/las choices: [ "us/las", "de/fra", "de/fkb" ] assign_public_ip: description: - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. required: false default: false lan: description: - The ID of the LAN you wish to add the servers to. required: false default: 1 subscription_user: description: - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. required: false default: null subscription_password: description: - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. required: false default: null wait: description: - wait for the instance to be in state 'running' before returning required: false default: "yes" choices: [ "yes", "no" ] wait_timeout: description: - how long before wait gives up, in seconds default: 600 remove_boot_volume: description: - remove the bootVolume of the virtual machine you're destroying. required: false default: "yes" choices: ["yes", "no"] state: description: - create or terminate instances required: false default: 'present' choices: [ "running", "stopped", "absent", "present" ] requirements: - "profitbricks" - "python >= 2.6" author: Matt Baldwin (baldwin@stackpointcloud.com) ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Provisioning example. This will create three servers and enumerate their names. - profitbricks: datacenter: Tardis One name: web%02d.stackpointcloud.com cores: 4 ram: 2048 volume_size: 50 cpu_family: INTEL_XEON image: a3eae284-a2fe-11e4-b187-5f1f641608c8 location: us/las count: 3 assign_public_ip: true # Removing Virtual machines - profitbricks: datacenter: Tardis One instance_ids: - 'web001.stackpointcloud.com' - 'web002.stackpointcloud.com' - 'web003.stackpointcloud.com' wait_timeout: 500 state: absent # Starting Virtual Machines. - profitbricks: datacenter: Tardis One instance_ids: - 'web001.stackpointcloud.com' - 'web002.stackpointcloud.com' - 'web003.stackpointcloud.com' wait_timeout: 500 state: running # Stopping Virtual Machines - profitbricks: datacenter: Tardis One instance_ids: - 'web001.stackpointcloud.com' - 'web002.stackpointcloud.com' - 'web003.stackpointcloud.com' wait_timeout: 500 state: stopped ''' import re import uuid import time HAS_PB_SDK = True try: from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN except ImportError: HAS_PB_SDK = False LOCATIONS = ['us/las', 'de/fra', 'de/fkb'] uuid_match = re.compile( '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) def _wait_for_completion(profitbricks, promise, wait_timeout, msg): if not promise: return wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): time.sleep(5) operation_result = profitbricks.get_request( request_id=promise['requestId'], status=True) if operation_result['metadata']['status'] == "DONE": return elif operation_result['metadata']['status'] == "FAILED": raise Exception( 'Request failed to complete ' + msg + ' "' + str( promise['requestId']) + '" to complete.') raise Exception( 'Timed out waiting for async operation ' + msg + ' "' + str( promise['requestId'] ) + '" to complete.') def _create_machine(module, profitbricks, datacenter, name): cores = module.params.get('cores') ram = module.params.get('ram') cpu_family = module.params.get('cpu_family') volume_size = module.params.get('volume_size') disk_type = module.params.get('disk_type') image_password = module.params.get('image_password') ssh_keys = module.params.get('ssh_keys') bus = module.params.get('bus') lan = module.params.get('lan') assign_public_ip = module.params.get('assign_public_ip') subscription_user = module.params.get('subscription_user') subscription_password = module.params.get('subscription_password') location = module.params.get('location') image = module.params.get('image') assign_public_ip = module.boolean(module.params.get('assign_public_ip')) wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') if assign_public_ip: public_found = False lans = profitbricks.list_lans(datacenter) for lan in lans['items']: if lan['properties']['public']: public_found = True lan = lan['id'] if not public_found: i = LAN( name='public', public=True) lan_response = profitbricks.create_lan(datacenter, i) _wait_for_completion(profitbricks, lan_response, wait_timeout, "_create_machine") lan = lan_response['id'] v = Volume( name=str(uuid.uuid4()).replace('-', '')[:10], size=volume_size, image=image, image_password=image_password, ssh_keys=ssh_keys, disk_type=disk_type, bus=bus) n = NIC( lan=int(lan) ) s = Server( name=name, ram=ram, cores=cores, cpu_family=cpu_family, create_volumes=[v], nics=[n], ) try: create_server_response = profitbricks.create_server( datacenter_id=datacenter, server=s) _wait_for_completion(profitbricks, create_server_response, wait_timeout, "create_virtual_machine") server_response = profitbricks.get_server( datacenter_id=datacenter, server_id=create_server_response['id'], depth=3 ) except Exception as e: module.fail_json(msg="failed to create the new server: %s" % str(e)) else: return server_response def _startstop_machine(module, profitbricks, datacenter_id, server_id): state = module.params.get('state') try: if state == 'running': profitbricks.start_server(datacenter_id, server_id) else: profitbricks.stop_server(datacenter_id, server_id) return True except Exception as e: module.fail_json(msg="failed to start or stop the virtual machine %s: %s" % (name, str(e))) def _create_datacenter(module, profitbricks): datacenter = module.params.get('datacenter') location = module.params.get('location') wait_timeout = module.params.get('wait_timeout') i = Datacenter( name=datacenter, location=location ) try: datacenter_response = profitbricks.create_datacenter(datacenter=i) _wait_for_completion(profitbricks, datacenter_response, wait_timeout, "_create_datacenter") return datacenter_response except Exception as e: module.fail_json(msg="failed to create the new server(s): %s" % str(e)) def create_virtual_machine(module, profitbricks): """ Create new virtual machine module : AnsibleModule object profitbricks: authenticated profitbricks object Returns: True if a new virtual machine was created, false otherwise """ datacenter = module.params.get('datacenter') name = module.params.get('name') auto_increment = module.params.get('auto_increment') count = module.params.get('count') lan = module.params.get('lan') wait_timeout = module.params.get('wait_timeout') failed = True datacenter_found = False virtual_machines = [] virtual_machine_ids = [] # Locate UUID for datacenter if referenced by name. datacenter_list = profitbricks.list_datacenters() datacenter_id = _get_datacenter_id(datacenter_list, datacenter) if datacenter_id: datacenter_found = True if not datacenter_found: datacenter_response = _create_datacenter(module, profitbricks) datacenter_id = datacenter_response['id'] _wait_for_completion(profitbricks, datacenter_response, wait_timeout, "create_virtual_machine") if auto_increment: numbers = set() count_offset = 1 try: name % 0 except TypeError, e: if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) number_range = xrange(count_offset, count_offset + count + len(numbers)) available_numbers = list(set(number_range).difference(numbers)) names = [] numbers_to_use = available_numbers[:count] for number in numbers_to_use: names.append(name % number) else: names = [name] # Prefetch a list of servers for later comparison. server_list = profitbricks.list_servers(datacenter_id) for name in names: # Skip server creation if the server already exists. if _get_server_id(server_list, name): continue create_response = _create_machine(module, profitbricks, str(datacenter_id), name) nics = profitbricks.list_nics(datacenter_id, create_response['id']) for n in nics['items']: if lan == n['properties']['lan']: create_response.update({'public_ip': n['properties']['ips'][0]}) virtual_machines.append(create_response) failed = False results = { 'failed': failed, 'machines': virtual_machines, 'action': 'create', 'instance_ids': { 'instances': [i['id'] for i in virtual_machines], } } return results def remove_virtual_machine(module, profitbricks): """ Removes a virtual machine. This will remove the virtual machine along with the bootVolume. module : AnsibleModule object profitbricks: authenticated profitbricks object. Not yet supported: handle deletion of attached data disks. Returns: True if a new virtual server was deleted, false otherwise """ datacenter = module.params.get('datacenter') instance_ids = module.params.get('instance_ids') remove_boot_volume = module.params.get('remove_boot_volume') changed = False if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') # Locate UUID for datacenter if referenced by name. datacenter_list = profitbricks.list_datacenters() datacenter_id = _get_datacenter_id(datacenter_list, datacenter) if not datacenter_id: module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) # Prefetch server list for later comparison. server_list = profitbricks.list_servers(datacenter_id) for instance in instance_ids: # Locate UUID for server if referenced by name. server_id = _get_server_id(server_list, instance) if server_id: # Remove the server's boot volume if remove_boot_volume: _remove_boot_volume(module, profitbricks, datacenter_id, server_id) # Remove the server try: server_response = profitbricks.delete_server(datacenter_id, server_id) except Exception as e: module.fail_json(msg="failed to terminate the virtual server: %s" % str(e)) else: changed = True return changed def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): """ Remove the boot volume from the server """ try: server = profitbricks.get_server(datacenter_id, server_id) volume_id = server['properties']['bootVolume']['id'] volume_response = profitbricks.delete_volume(datacenter_id, volume_id) except Exception as e: module.fail_json(msg="failed to remove the server's boot volume: %s" % str(e)) def startstop_machine(module, profitbricks, state): """ Starts or Stops a virtual machine. module : AnsibleModule object profitbricks: authenticated profitbricks object. Returns: True when the servers process the action successfully, false otherwise. """ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') changed = False datacenter = module.params.get('datacenter') instance_ids = module.params.get('instance_ids') # Locate UUID for datacenter if referenced by name. datacenter_list = profitbricks.list_datacenters() datacenter_id = _get_datacenter_id(datacenter_list, datacenter) if not datacenter_id: module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) # Prefetch server list for later comparison. server_list = profitbricks.list_servers(datacenter_id) for instance in instance_ids: # Locate UUID of server if referenced by name. server_id = _get_server_id(server_list, instance) if server_id: _startstop_machine(module, profitbricks, datacenter_id, server_id) changed = True if wait: wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): matched_instances = [] for res in profitbricks.list_servers(datacenter_id)['items']: if state == 'running': if res['properties']['vmState'].lower() == state: matched_instances.append(res) elif state == 'stopped': if res['properties']['vmState'].lower() == 'shutoff': matched_instances.append(res) if len(matched_instances) < len(instance_ids): time.sleep(5) else: break if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) return (changed) def _get_datacenter_id(datacenters, identity): """ Fetch and return datacenter UUID by datacenter name if found. """ for datacenter in datacenters['items']: if identity in (datacenter['properties']['name'], datacenter['id']): return datacenter['id'] return None def _get_server_id(servers, identity): """ Fetch and return server UUID by server name if found. """ for server in servers['items']: if identity in (server['properties']['name'], server['id']): return server['id'] return None def main(): module = AnsibleModule( argument_spec=dict( datacenter=dict(), name=dict(), image=dict(), cores=dict(type='int', default=2), ram=dict(type='int', default=2048), cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], default='AMD_OPTERON'), volume_size=dict(type='int', default=10), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), image_password=dict(default=None), ssh_keys=dict(type='list', default=[]), bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), lan=dict(type='int', default=1), count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), instance_ids=dict(type='list', default=[]), subscription_user=dict(), subscription_password=dict(), location=dict(choices=LOCATIONS, default='us/las'), assign_public_ip=dict(type='bool', default=False), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), remove_boot_volume=dict(type='bool', default=True), state=dict(default='present'), ) ) if not HAS_PB_SDK: module.fail_json(msg='profitbricks required for this module') subscription_user = module.params.get('subscription_user') subscription_password = module.params.get('subscription_password') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') profitbricks = ProfitBricksService( username=subscription_user, password=subscription_password) state = module.params.get('state') if state == 'absent': if not module.params.get('datacenter'): module.fail_json(msg='datacenter parameter is required ' + 'for running or stopping machines.') try: (changed) = remove_virtual_machine(module, profitbricks) module.exit_json(changed=changed) except Exception as e: module.fail_json(msg='failed to set instance state: %s' % str(e)) elif state in ('running', 'stopped'): if not module.params.get('datacenter'): module.fail_json(msg='datacenter parameter is required for ' + 'running or stopping machines.') try: (changed) = startstop_machine(module, profitbricks, state) module.exit_json(changed=changed) except Exception as e: module.fail_json(msg='failed to set instance state: %s' % str(e)) elif state == 'present': if not module.params.get('name'): module.fail_json(msg='name parameter is required for new instance') if not module.params.get('image'): module.fail_json(msg='image parameter is required for new instance') if not module.params.get('subscription_user'): module.fail_json(msg='subscription_user parameter is ' + 'required for new instance') if not module.params.get('subscription_password'): module.fail_json(msg='subscription_password parameter is ' + 'required for new instance') try: (machine_dict_array) = create_virtual_machine(module, profitbricks) module.exit_json(**machine_dict_array) except Exception as e: module.fail_json(msg='failed to set instance state: %s' % str(e)) from ansible.module_utils.basic import * main()
haad/ansible-modules-extras
cloud/profitbricks/profitbricks.py
Python
gpl-3.0
21,799
#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 import os.path, shutil import Task, Runner, Utils, Logs, Build, Node, Options from TaskGen import extension, after, before EXT_VALA = ['.vala', '.gs'] class valac_task(Task.Task): vars = ("VALAC", "VALAC_VERSION", "VALAFLAGS") before = ("cc", "cxx") def run(self): env = self.env inputs = [a.srcpath(env) for a in self.inputs] valac = env['VALAC'] vala_flags = env.get_flat('VALAFLAGS') top_src = self.generator.bld.srcnode.abspath() top_bld = self.generator.bld.srcnode.abspath(env) if env['VALAC_VERSION'] > (0, 1, 6): cmd = [valac, '-C', '--quiet', vala_flags] else: cmd = [valac, '-C', vala_flags] if self.threading: cmd.append('--thread') if self.profile: cmd.append('--profile=%s' % self.profile) if self.target_glib: cmd.append('--target-glib=%s' % self.target_glib) features = self.generator.features if 'cshlib' in features or 'cstaticlib' in features: output_dir = self.outputs[0].bld_dir(env) cmd.append('--library ' + self.target) if env['VALAC_VERSION'] >= (0, 7, 0): for x in self.outputs: if x.name.endswith('.h'): cmd.append('--header ' + x.bldpath(self.env)) cmd.append('--basedir ' + top_src) cmd.append('-d ' + top_bld) if env['VALAC_VERSION'] > (0, 7, 2) and hasattr(self, 'gir'): cmd.append('--gir=%s.gir' % self.gir) else: output_dir = self.outputs[0].bld_dir(env) cmd.append('-d %s' % output_dir) for vapi_dir in self.vapi_dirs: cmd.append('--vapidir=%s' % vapi_dir) for package in self.packages: cmd.append('--pkg %s' % package) for package in self.packages_private: cmd.append('--pkg %s' % package) cmd.append(" ".join(inputs)) result = self.generator.bld.exec_command(" ".join(cmd)) if not 'cprogram' in features: # generate the .deps file if self.packages: filename = os.path.join(self.generator.path.abspath(env), "%s.deps" % self.target) deps = open(filename, 'w') for package in self.packages: deps.write(package + '\n') deps.close() # handle vala 0.1.6 who doesn't honor --directory for the generated .vapi self._fix_output("../%s.vapi" % self.target) # handle vala >= 0.1.7 who has a weid definition for --directory self._fix_output("%s.vapi" % self.target) # handle vala >= 0.2.0 who doesn't honor --directory for the generated .gidl self._fix_output("%s.gidl" % self.target) # handle vala >= 0.3.6 who doesn't honor --directory for the generated .gir self._fix_output("%s.gir" % self.target) if hasattr(self, 'gir'): self._fix_output("%s.gir" % self.gir) first = None for node in self.outputs: if not first: first = node else: if first.parent.id != node.parent.id: # issue #483 if env['VALAC_VERSION'] < (0, 7, 0): shutil.move(first.parent.abspath(self.env) + os.sep + node.name, node.abspath(self.env)) return result def install(self): bld = self.generator.bld features = self.generator.features if self.attr("install_path") and ("cshlib" in features or "cstaticlib" in features): headers_list = [o for o in self.outputs if o.suffix() == ".h"] vapi_list = [o for o in self.outputs if (o.suffix() in (".vapi", ".deps"))] gir_list = [o for o in self.outputs if o.suffix() == ".gir"] for header in headers_list: top_src = self.generator.bld.srcnode package = self.env['PACKAGE'] try: api_version = Utils.g_module.API_VERSION except AttributeError: version = Utils.g_module.VERSION.split(".") if version[0] == "0": api_version = "0." + version[1] else: api_version = version[0] + ".0" install_path = '${INCLUDEDIR}/%s-%s/%s' % (package, api_version, header.relpath_gen(top_src)) bld.install_as(install_path, header, self.env) bld.install_files('${DATAROOTDIR}/vala/vapi', vapi_list, self.env) bld.install_files('${DATAROOTDIR}/gir-1.0', gir_list, self.env) def _fix_output(self, output): top_bld = self.generator.bld.srcnode.abspath(self.env) try: src = os.path.join(top_bld, output) dst = self.generator.path.abspath (self.env) shutil.move(src, dst) except: pass @extension(EXT_VALA) def vala_file(self, node): valatask = getattr(self, "valatask", None) # there is only one vala task and it compiles all vala files .. :-/ if not valatask: valatask = self.create_task('valac') self.valatask = valatask self.includes = Utils.to_list(getattr(self, 'includes', [])) self.uselib = self.to_list(self.uselib) valatask.packages = [] valatask.packages_private = Utils.to_list(getattr(self, 'packages_private', [])) valatask.vapi_dirs = [] valatask.target = self.target valatask.threading = False valatask.install_path = self.install_path valatask.profile = getattr (self, 'profile', 'gobject') valatask.target_glib = None #Deprecated packages = Utils.to_list(getattr(self, 'packages', [])) vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) includes = [] if hasattr(self, 'uselib_local'): local_packages = Utils.to_list(self.uselib_local) seen = [] while len(local_packages) > 0: package = local_packages.pop() if package in seen: continue seen.append(package) # check if the package exists package_obj = self.name_to_obj(package) if not package_obj: raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')" % (package, self.name)) package_name = package_obj.target package_node = package_obj.path package_dir = package_node.relpath_gen(self.path) for task in package_obj.tasks: for output in task.outputs: if output.name == package_name + ".vapi": valatask.set_run_after(task) if package_name not in packages: packages.append(package_name) if package_dir not in vapi_dirs: vapi_dirs.append(package_dir) if package_dir not in includes: includes.append(package_dir) if hasattr(package_obj, 'uselib_local'): lst = self.to_list(package_obj.uselib_local) lst.reverse() local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages valatask.packages = packages for vapi_dir in vapi_dirs: try: valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath()) valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env)) except AttributeError: Logs.warn("Unable to locate Vala API directory: '%s'" % vapi_dir) self.includes.append(node.bld.srcnode.abspath()) self.includes.append(node.bld.srcnode.abspath(self.env)) for include in includes: try: self.includes.append(self.path.find_dir(include).abspath()) self.includes.append(self.path.find_dir(include).abspath(self.env)) except AttributeError: Logs.warn("Unable to locate include directory: '%s'" % include) if valatask.profile == 'gobject': if hasattr(self, 'target_glib'): Logs.warn ('target_glib on vala tasks is deprecated --vala-target-glib=MAJOR.MINOR from the vala tool options') if getattr(Options.options, 'vala_target_glib', None): valatask.target_glib = Options.options.vala_target_glib if not 'GOBJECT' in self.uselib: self.uselib.append('GOBJECT') if hasattr(self, 'threading'): if valatask.profile == 'gobject': valatask.threading = self.threading if not 'GTHREAD' in self.uselib: self.uselib.append('GTHREAD') else: #Vala doesn't have threading support for dova nor posix Logs.warn("Profile %s does not have threading support" % valatask.profile) if hasattr(self, 'gir'): valatask.gir = self.gir env = valatask.env output_nodes = [] c_node = node.change_ext('.c') output_nodes.append(c_node) self.allnodes.append(c_node) if env['VALAC_VERSION'] < (0, 7, 0): output_nodes.append(node.change_ext('.h')) else: if not 'cprogram' in self.features: output_nodes.append(self.path.find_or_declare('%s.h' % self.target)) if not 'cprogram' in self.features: output_nodes.append(self.path.find_or_declare('%s.vapi' % self.target)) if env['VALAC_VERSION'] > (0, 7, 2): if hasattr(self, 'gir'): output_nodes.append(self.path.find_or_declare('%s.gir' % self.gir)) elif env['VALAC_VERSION'] > (0, 3, 5): output_nodes.append(self.path.find_or_declare('%s.gir' % self.target)) elif env['VALAC_VERSION'] > (0, 1, 7): output_nodes.append(self.path.find_or_declare('%s.gidl' % self.target)) if valatask.packages: output_nodes.append(self.path.find_or_declare('%s.deps' % self.target)) valatask.inputs.append(node) valatask.outputs.extend(output_nodes) def detect(conf): min_version = (0, 1, 6) min_version_str = "%d.%d.%d" % min_version valac = conf.find_program('valac', var='VALAC', mandatory=True) if not conf.env["HAVE_GOBJECT"]: pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib conf.check_cfg(**pkg_args) if not conf.env["HAVE_GTHREAD"]: pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib conf.check_cfg(**pkg_args) try: output = Utils.cmd_output(valac + " --version", silent=True) version = output.split(' ', 1)[-1].strip().split(".")[0:3] version = [int(x) for x in version] valac_version = tuple(version) except Exception: valac_version = (0, 0, 0) conf.check_message('program version', 'valac >= ' + min_version_str, valac_version >= min_version, "%d.%d.%d" % valac_version) conf.check_tool('gnu_dirs') if valac_version < min_version: conf.fatal("valac version too old to be used with this tool") return conf.env['VALAC_VERSION'] = valac_version conf.env['VALAFLAGS'] = '' def set_options (opt): valaopts = opt.add_option_group('Vala Compiler Options') valaopts.add_option ('--vala-target-glib', default=None, dest='vala_target_glib', metavar='MAJOR.MINOR', help='Target version of glib for Vala GObject code generation')
mantaraya36/xmms2-mantaraya36
wafadmin/Tools/vala.py
Python
lgpl-2.1
10,297
from django.conf import settings from django.template import loader from django.views.i18n import set_language from xadmin.plugins.utils import get_context_dict from xadmin.sites import site from xadmin.views import BaseAdminPlugin, CommAdminView, BaseAdminView class SetLangNavPlugin(BaseAdminPlugin): def block_top_navmenu(self, context, nodes): context = get_context_dict(context) context['redirect_to'] = self.request.get_full_path() nodes.append(loader.render_to_string('xadmin/blocks/comm.top.setlang.html', context=context)) class SetLangView(BaseAdminView): def post(self, request, *args, **kwargs): if 'nav_menu' in request.session: del request.session['nav_menu'] return set_language(request) if settings.LANGUAGES and 'django.middleware.locale.LocaleMiddleware' in settings.MIDDLEWARE_CLASSES: site.register_plugin(SetLangNavPlugin, CommAdminView) site.register_view(r'^i18n/setlang/$', SetLangView, 'set_language')
sshwsfc/django-xadmin
xadmin/plugins/language.py
Python
bsd-3-clause
1,002
""" Database models for the badges app """ from importlib import import_module from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.db import models from django.utils.translation import ugettext_lazy as _ from jsonfield import JSONField from lazy import lazy from model_utils.models import TimeStampedModel from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from badges.utils import deserialize_count_specs from config_models.models import ConfigurationModel from xmodule.modulestore.django import modulestore from xmodule_django.models import CourseKeyField def validate_badge_image(image): """ Validates that a particular image is small enough to be a badge and square. """ if image.width != image.height: raise ValidationError(_(u"The badge image must be square.")) if not image.size < (250 * 1024): raise ValidationError(_(u"The badge image file size must be less than 250KB.")) def validate_lowercase(string): """ Validates that a string is lowercase. """ if not string.islower(): raise ValidationError(_(u"This value must be all lowercase.")) class CourseBadgesDisabledError(Exception): """ Exception raised when Course Badges aren't enabled, but an attempt to fetch one is made anyway. """ class BadgeClass(models.Model): """ Specifies a badge class to be registered with a backend. """ slug = models.SlugField(max_length=255, validators=[validate_lowercase]) issuing_component = models.SlugField(max_length=50, default='', blank=True, validators=[validate_lowercase]) display_name = models.CharField(max_length=255) course_id = CourseKeyField(max_length=255, blank=True, default=None) description = models.TextField() criteria = models.TextField() # Mode a badge was awarded for. Included for legacy/migration purposes. mode = models.CharField(max_length=100, default='', blank=True) image = models.ImageField(upload_to='badge_classes', validators=[validate_badge_image]) def __unicode__(self): return u"<Badge '{slug}' for '{issuing_component}'>".format( slug=self.slug, issuing_component=self.issuing_component ) @classmethod def get_badge_class( cls, slug, issuing_component, display_name=None, description=None, criteria=None, image_file_handle=None, mode='', course_id=None, create=True ): """ Looks up a badge class by its slug, issuing component, and course_id and returns it should it exist. If it does not exist, and create is True, creates it according to the arguments. Otherwise, returns None. The expectation is that an XBlock or platform developer should not need to concern themselves with whether or not a badge class has already been created, but should just feed all requirements to this function and it will 'do the right thing'. It should be the exception, rather than the common case, that a badge class would need to be looked up without also being created were it missing. """ slug = slug.lower() issuing_component = issuing_component.lower() if course_id and not modulestore().get_course(course_id).issue_badges: raise CourseBadgesDisabledError("This course does not have badges enabled.") if not course_id: course_id = CourseKeyField.Empty try: return cls.objects.get(slug=slug, issuing_component=issuing_component, course_id=course_id) except cls.DoesNotExist: if not create: return None badge_class = cls( slug=slug, issuing_component=issuing_component, display_name=display_name, course_id=course_id, mode=mode, description=description, criteria=criteria, ) badge_class.image.save(image_file_handle.name, image_file_handle) badge_class.full_clean() badge_class.save() return badge_class @lazy def backend(self): """ Loads the badging backend. """ module, klass = settings.BADGING_BACKEND.rsplit('.', 1) module = import_module(module) return getattr(module, klass)() def get_for_user(self, user): """ Get the assertion for this badge class for this user, if it has been awarded. """ return self.badgeassertion_set.filter(user=user) def award(self, user, evidence_url=None): """ Contacts the backend to have a badge assertion created for this badge class for this user. """ return self.backend.award(self, user, evidence_url=evidence_url) def save(self, **kwargs): """ Slugs must always be lowercase. """ self.slug = self.slug and self.slug.lower() self.issuing_component = self.issuing_component and self.issuing_component.lower() super(BadgeClass, self).save(**kwargs) class Meta(object): app_label = "badges" unique_together = (('slug', 'issuing_component', 'course_id'),) verbose_name_plural = "Badge Classes" class BadgeAssertion(TimeStampedModel): """ Tracks badges on our side of the badge baking transaction """ user = models.ForeignKey(User) badge_class = models.ForeignKey(BadgeClass) data = JSONField() backend = models.CharField(max_length=50) image_url = models.URLField() assertion_url = models.URLField() def __unicode__(self): return u"<{username} Badge Assertion for {slug} for {issuing_component}".format( username=self.user.username, slug=self.badge_class.slug, issuing_component=self.badge_class.issuing_component, ) @classmethod def assertions_for_user(cls, user, course_id=None): """ Get all assertions for a user, optionally constrained to a course. """ if course_id: return cls.objects.filter(user=user, badge_class__course_id=course_id) return cls.objects.filter(user=user) class Meta(object): app_label = "badges" # Abstract model doesn't index this, so we have to. BadgeAssertion._meta.get_field('created').db_index = True # pylint: disable=protected-access class CourseCompleteImageConfiguration(models.Model): """ Contains the icon configuration for badges for a specific course mode. """ mode = models.CharField( max_length=125, help_text=_(u'The course mode for this badge image. For example, "verified" or "honor".'), unique=True, ) icon = models.ImageField( # Actual max is 256KB, but need overhead for badge baking. This should be more than enough. help_text=_( u"Badge images must be square PNG files. The file size should be under 250KB." ), upload_to='course_complete_badges', validators=[validate_badge_image] ) default = models.BooleanField( help_text=_( u"Set this value to True if you want this image to be the default image for any course modes " u"that do not have a specified badge image. You can have only one default image." ), default=False, ) def __unicode__(self): return u"<CourseCompleteImageConfiguration for '{mode}'{default}>".format( mode=self.mode, default=u" (default)" if self.default else u'' ) def clean(self): """ Make sure there's not more than one default. """ # pylint: disable=no-member if self.default and CourseCompleteImageConfiguration.objects.filter(default=True).exclude(id=self.id): raise ValidationError(_(u"There can be only one default image.")) @classmethod def image_for_mode(cls, mode): """ Get the image for a particular mode. """ try: return cls.objects.get(mode=mode).icon except cls.DoesNotExist: # Fall back to default, if there is one. return cls.objects.get(default=True).icon class Meta(object): app_label = "badges" class CourseEventBadgesConfiguration(ConfigurationModel): """ Determines the settings for meta course awards-- such as completing a certain number of courses or enrolling in a certain number of them. """ courses_completed = models.TextField( blank=True, default='', help_text=_( u"On each line, put the number of completed courses to award a badge for, a comma, and the slug of a " u"badge class you have created that has the issuing component 'openedx__course'. " u"For example: 3,enrolled_3_courses" ) ) courses_enrolled = models.TextField( blank=True, default='', help_text=_( u"On each line, put the number of enrolled courses to award a badge for, a comma, and the slug of a " u"badge class you have created that has the issuing component 'openedx__course'. " u"For example: 3,enrolled_3_courses" ) ) course_groups = models.TextField( blank=True, default='', help_text=_( u"Each line is a comma-separated list. The first item in each line is the slug of a badge class you " u"have created that has an issuing component of 'openedx__course'. The remaining items in each line are " u"the course keys the learner needs to complete to be awarded the badge. For example: " u"slug_for_compsci_courses_group_badge,course-v1:CompSci+Course+First,course-v1:CompsSci+Course+Second" ) ) def __unicode__(self): return u"<CourseEventBadgesConfiguration ({})>".format(u"Enabled" if self.enabled else u"Disabled") @property def completed_settings(self): """ Parses the settings from the courses_completed field. """ return deserialize_count_specs(self.courses_completed) @property def enrolled_settings(self): """ Parses the settings from the courses_completed field. """ return deserialize_count_specs(self.courses_enrolled) @property def course_group_settings(self): """ Parses the course group settings. In example, the format is: slug_for_compsci_courses_group_badge,course-v1:CompSci+Course+First,course-v1:CompsSci+Course+Second """ specs = self.course_groups.strip() if not specs: return {} specs = [line.split(',', 1) for line in specs.splitlines()] return { slug.strip().lower(): [CourseKey.from_string(key.strip()) for key in keys.strip().split(',')] for slug, keys in specs } def clean_fields(self, exclude=tuple()): """ Verify the settings are parseable. """ errors = {} error_message = _(u"Please check the syntax of your entry.") if 'courses_completed' not in exclude: try: self.completed_settings except (ValueError, InvalidKeyError): errors['courses_completed'] = [unicode(error_message)] if 'courses_enrolled' not in exclude: try: self.enrolled_settings except (ValueError, InvalidKeyError): errors['courses_enrolled'] = [unicode(error_message)] if 'course_groups' not in exclude: store = modulestore() try: for key_list in self.course_group_settings.values(): for course_key in key_list: if not store.get_course(course_key): ValueError(u"The course {course_key} does not exist.".format(course_key=course_key)) except (ValueError, InvalidKeyError): errors['course_groups'] = [unicode(error_message)] if errors: raise ValidationError(errors) class Meta(object): app_label = "badges"
cecep-edu/edx-platform
lms/djangoapps/badges/models.py
Python
agpl-3.0
12,190
import unittest import re import os class ImportLoadLibs(unittest.TestCase): """ Test which libraries are loaded during importing ROOT """ # The whitelist is a list of regex expressions that mark wanted libraries # Note that the regex has to result in an exact match with the library name. known_libs = [ # libCore and dependencies 'libCore', 'libm', 'liblz4', 'libxxhash', 'liblzma', 'libzstd', 'libz', 'libpthread', 'libc', 'libdl', 'libpcre', # libCling and dependencies 'libCling.*', 'librt', 'libncurses.*', 'libtinfo', # by libncurses (on some older platforms) # libTree and dependencies 'libTree', 'libThread', 'libRIO', 'libNet', 'libImt', 'libMathCore', 'libMultiProc', 'libssl', 'libcrypt.*', # by libssl 'libtbb', 'liburing', # by libRIO if uring option is enabled # On centos7 libssl links against kerberos pulling in all dependencies below, removed with libssl1.1.0 'libgssapi_krb5', 'libkrb5', 'libk5crypto', 'libkrb5support', 'libselinux', 'libkeyutils', 'libcom_err', 'libresolv', # cppyy and Python libraries 'libcppyy.*', 'libROOTPythonizations.*', 'libpython.*', 'libutil.*', '.*cpython.*', '_.*', '.*module', 'operator', 'cStringIO', 'binascii', 'libbz2', 'libexpat', 'ISO8859-1', # System libraries and others 'libnss_.*', 'ld.*', 'libffi', ] # Verbose mode of the test verbose = False def test_import(self): """ Test libraries loaded after importing ROOT """ import ROOT libs = str(ROOT.gSystem.GetLibraries()) if self.verbose: print("Initial output from ROOT.gSystem.GetLibraries():\n" + libs) # Split paths libs = libs.split(' ') # Get library name without full path and .so* suffix libs = [os.path.basename(l).split('.so')[0] for l in libs \ if not l.startswith('-l') and not l.startswith('-L')] # Check that the loaded libraries are white listed bad_libs = [] good_libs = [] matched_re = [] for l in libs: matched = False for r in self.known_libs: m = re.match(r, l) if m: if m.group(0) == l: matched = True good_libs.append(l) matched_re.append(r) break if not matched: bad_libs.append(l) if self.verbose: print('Found whitelisted libraries after importing ROOT with the shown regex match:') for l, r in zip(good_libs, matched_re): print(' - {} ({})'.format(l, r)) import sys sys.stdout.flush() if bad_libs: raise Exception('Found not whitelisted libraries after importing ROOT:' \ + '\n - ' + '\n - '.join(bad_libs) \ + '\nIf the test fails with a library that is loaded on purpose, please add it to the whitelist.') if __name__ == '__main__': unittest.main()
root-mirror/root
bindings/pyroot/pythonizations/test/import_load_libs.py
Python
lgpl-2.1
3,705
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from airflow import models from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns from airflow.settings import Session from airflow.utils.dates import days_ago from airflow.utils.state import State DEV_NULL = "/dev/null" class TestMarkTasks(unittest.TestCase): def setUp(self): self.dagbag = models.DagBag(include_examples=True) self.dag1 = self.dagbag.dags['test_example_bash_operator'] self.dag2 = self.dagbag.dags['example_subdag_operator'] self.execution_dates = [days_ago(2), days_ago(1)] drs = _create_dagruns(self.dag1, self.execution_dates, state=State.RUNNING, run_id_template="scheduled__{}") for dr in drs: dr.dag = self.dag1 dr.verify_integrity() drs = _create_dagruns(self.dag2, [self.dag2.default_args['start_date']], state=State.RUNNING, run_id_template="scheduled__{}") for dr in drs: dr.dag = self.dag2 dr.verify_integrity() self.session = Session() def snapshot_state(self, dag, execution_dates): TI = models.TaskInstance tis = self.session.query(TI).filter( TI.dag_id==dag.dag_id, TI.execution_date.in_(execution_dates) ).all() self.session.expunge_all() return tis def verify_state(self, dag, task_ids, execution_dates, state, old_tis): TI = models.TaskInstance tis = self.session.query(TI).filter( TI.dag_id==dag.dag_id, TI.execution_date.in_(execution_dates) ).all() self.assertTrue(len(tis) > 0) for ti in tis: if ti.task_id in task_ids and ti.execution_date in execution_dates: self.assertEqual(ti.state, state) else: for old_ti in old_tis: if (old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date): self.assertEqual(ti.state, old_ti.state) def test_mark_tasks_now(self): # set one task to success but do not commit snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("runme_1") altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=False) self.assertEqual(len(altered), 1) self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], None, snapshot) # set one and only one task to success altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 1) self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot) # set no tasks altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 0) self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot) # set task to other than success altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=False, past=False, state=State.FAILED, commit=True) self.assertEqual(len(altered), 1) self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.FAILED, snapshot) # dont alter other tasks snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("runme_0") altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 1) self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot) def test_mark_downstream(self): # test downstream snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("runme_1") relatives = task.get_flat_relatives(upstream=False) task_ids = [t.task_id for t in relatives] task_ids.append(task.task_id) altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=True, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 3) self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot) def test_mark_upstream(self): # test upstream snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("run_after_loop") relatives = task.get_flat_relatives(upstream=True) task_ids = [t.task_id for t in relatives] task_ids.append(task.task_id) altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=True, downstream=False, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 4) self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot) def test_mark_tasks_future(self): # set one task to success towards end of scheduled dag runs snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("runme_1") altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=False, future=True, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 2) self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot) def test_mark_tasks_past(self): # set one task to success towards end of scheduled dag runs snapshot = self.snapshot_state(self.dag1, self.execution_dates) task = self.dag1.get_task("runme_1") altered = set_state(task=task, execution_date=self.execution_dates[1], upstream=False, downstream=False, future=False, past=True, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 2) self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot) def test_mark_tasks_subdag(self): # set one task to success towards end of scheduled dag runs task = self.dag2.get_task("section-1") relatives = task.get_flat_relatives(upstream=False) task_ids = [t.task_id for t in relatives] task_ids.append(task.task_id) altered = set_state(task=task, execution_date=self.execution_dates[0], upstream=False, downstream=True, future=False, past=False, state=State.SUCCESS, commit=True) self.assertEqual(len(altered), 14) # cannot use snapshot here as that will require drilling down the # the sub dag tree essentially recreating the same code as in the # tested logic. self.verify_state(self.dag2, task_ids, [self.execution_dates[0]], State.SUCCESS, []) def tearDown(self): self.dag1.clear() self.dag2.clear() # just to make sure we are fully cleaned up self.session.query(models.DagRun).delete() self.session.query(models.TaskInstance).delete() self.session.commit() self.session.close() if __name__ == '__main__': unittest.main()
zodiac/incubator-airflow
tests/api/common/mark_tasks.py
Python
apache-2.0
9,129
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import pytz from datetime import datetime from dateutil import rrule from zipline.utils.tradingcalendar import end, canonicalize_datetime, \ get_open_and_closes start = pd.Timestamp('1994-01-01', tz='UTC') def get_non_trading_days(start, end): non_trading_rules = [] start = canonicalize_datetime(start) end = canonicalize_datetime(end) weekends = rrule.rrule( rrule.YEARLY, byweekday=(rrule.SA, rrule.SU), cache=True, dtstart=start, until=end ) non_trading_rules.append(weekends) # Universal confraternization conf_universal = rrule.rrule( rrule.MONTHLY, byyearday=1, cache=True, dtstart=start, until=end ) non_trading_rules.append(conf_universal) # Sao Paulo city birthday aniversario_sao_paulo = rrule.rrule( rrule.MONTHLY, bymonth=1, bymonthday=25, cache=True, dtstart=start, until=end ) non_trading_rules.append(aniversario_sao_paulo) # Carnival Monday carnaval_segunda = rrule.rrule( rrule.MONTHLY, byeaster=-48, cache=True, dtstart=start, until=end ) non_trading_rules.append(carnaval_segunda) # Carnival Tuesday carnaval_terca = rrule.rrule( rrule.MONTHLY, byeaster=-47, cache=True, dtstart=start, until=end ) non_trading_rules.append(carnaval_terca) # Passion of the Christ sexta_paixao = rrule.rrule( rrule.MONTHLY, byeaster=-2, cache=True, dtstart=start, until=end ) non_trading_rules.append(sexta_paixao) # Corpus Christi corpus_christi = rrule.rrule( rrule.MONTHLY, byeaster=60, cache=True, dtstart=start, until=end ) non_trading_rules.append(corpus_christi) tiradentes = rrule.rrule( rrule.MONTHLY, bymonth=4, bymonthday=21, cache=True, dtstart=start, until=end ) non_trading_rules.append(tiradentes) # Labor day dia_trabalho = rrule.rrule( rrule.MONTHLY, bymonth=5, bymonthday=1, cache=True, dtstart=start, until=end ) non_trading_rules.append(dia_trabalho) # Constitutionalist Revolution constitucionalista = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=9, cache=True, dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc), until=end ) non_trading_rules.append(constitucionalista) # Independency day independencia = rrule.rrule( rrule.MONTHLY, bymonth=9, bymonthday=7, cache=True, dtstart=start, until=end ) non_trading_rules.append(independencia) # Our Lady of Aparecida aparecida = rrule.rrule( rrule.MONTHLY, bymonth=10, bymonthday=12, cache=True, dtstart=start, until=end ) non_trading_rules.append(aparecida) # All Souls' day finados = rrule.rrule( rrule.MONTHLY, bymonth=11, bymonthday=2, cache=True, dtstart=start, until=end ) non_trading_rules.append(finados) # Proclamation of the Republic proclamacao_republica = rrule.rrule( rrule.MONTHLY, bymonth=11, bymonthday=15, cache=True, dtstart=start, until=end ) non_trading_rules.append(proclamacao_republica) # Day of Black Awareness consciencia_negra = rrule.rrule( rrule.MONTHLY, bymonth=11, bymonthday=20, cache=True, dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc), until=end ) non_trading_rules.append(consciencia_negra) # Christmas Eve vespera_natal = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=24, cache=True, dtstart=start, until=end ) non_trading_rules.append(vespera_natal) # Christmas natal = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=25, cache=True, dtstart=start, until=end ) non_trading_rules.append(natal) # New Year Eve ano_novo = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=31, cache=True, dtstart=start, until=end ) non_trading_rules.append(ano_novo) # New Year Eve on saturday ano_novo_sab = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=30, byweekday=rrule.FR, cache=True, dtstart=start, until=end ) non_trading_rules.append(ano_novo_sab) non_trading_ruleset = rrule.rruleset() for rule in non_trading_rules: non_trading_ruleset.rrule(rule) non_trading_days = non_trading_ruleset.between(start, end, inc=True) # World Cup 2014 Opening non_trading_days.append(datetime(2014, 6, 12, tzinfo=pytz.utc)) non_trading_days.sort() return pd.DatetimeIndex(non_trading_days) non_trading_days = get_non_trading_days(start, end) trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days) def get_trading_days(start, end, trading_day=trading_day): return pd.date_range(start=start.date(), end=end.date(), freq=trading_day).tz_localize('UTC') trading_days = get_trading_days(start, end) # Ash Wednesday quarta_cinzas = rrule.rrule( rrule.MONTHLY, byeaster=-46, cache=True, dtstart=start, until=end ) def get_early_closes(start, end): # TSX closed at 1:00 PM on december 24th. start = canonicalize_datetime(start) end = canonicalize_datetime(end) early_close_rules = [] early_close_rules.append(quarta_cinzas) early_close_ruleset = rrule.rruleset() for rule in early_close_rules: early_close_ruleset.rrule(rule) early_closes = early_close_ruleset.between(start, end, inc=True) early_closes.sort() return pd.DatetimeIndex(early_closes) early_closes = get_early_closes(start, end) def get_open_and_close(day, early_closes): # only "early close" event in Bovespa actually is a late start # as the market only opens at 1pm open_hour = 13 if day in quarta_cinzas else 10 market_open = pd.Timestamp( datetime( year=day.year, month=day.month, day=day.day, hour=open_hour, minute=00), tz='America/Sao_Paulo').tz_convert('UTC') market_close = pd.Timestamp( datetime( year=day.year, month=day.month, day=day.day, hour=16), tz='America/Sao_Paulo').tz_convert('UTC') return market_open, market_close open_and_closes = get_open_and_closes(trading_days, early_closes, get_open_and_close)
wilsonkichoi/zipline
zipline/utils/tradingcalendar_bmf.py
Python
apache-2.0
7,576
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""CelebA dataset formating. Download img_align_celeba.zip from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html under the link "Align&Cropped Images" in the "Img" directory and list_eval_partition.txt under the link "Train/Val/Test Partitions" in the "Eval" directory. Then do: unzip img_align_celeba.zip Use the script as follow: python celeba_formatting.py \ --partition_fn [PARTITION_FILE_PATH] \ --file_out [OUTPUT_FILE_PATH_PREFIX] \ --fn_root [CELEBA_FOLDER] \ --set [SUBSET_INDEX] """ import os import os.path import scipy.io import scipy.io.wavfile import scipy.ndimage import tensorflow as tf tf.flags.DEFINE_string("file_out", "", "Filename of the output .tfrecords file.") tf.flags.DEFINE_string("fn_root", "", "Name of root file path.") tf.flags.DEFINE_string("partition_fn", "", "Partition file path.") tf.flags.DEFINE_string("set", "", "Name of subset.") FLAGS = tf.flags.FLAGS def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def main(): """Main converter function.""" # Celeb A with open(FLAGS.partition_fn, "r") as infile: img_fn_list = infile.readlines() img_fn_list = [elem.strip().split() for elem in img_fn_list] img_fn_list = [elem[0] for elem in img_fn_list if elem[1] == FLAGS.set] fn_root = FLAGS.fn_root num_examples = len(img_fn_list) file_out = "%s.tfrecords" % FLAGS.file_out writer = tf.python_io.TFRecordWriter(file_out) for example_idx, img_fn in enumerate(img_fn_list): if example_idx % 1000 == 0: print example_idx, "/", num_examples image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn)) rows = image_raw.shape[0] cols = image_raw.shape[1] depth = image_raw.shape[2] image_raw = image_raw.tostring() example = tf.train.Example( features=tf.train.Features( feature={ "height": _int64_feature(rows), "width": _int64_feature(cols), "depth": _int64_feature(depth), "image_raw": _bytes_feature(image_raw) } ) ) writer.write(example.SerializeToString()) writer.close() if __name__ == "__main__": main()
unnikrishnankgs/va
venv/lib/python3.5/site-packages/tensorflow/models/real_nvp/celeba_formatting.py
Python
bsd-2-clause
3,106
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Point of Sale', 'version': '1.0.1', 'category': 'Point Of Sale', 'sequence': 20, 'summary': 'Touchscreen Interface for Shops', 'description': """ Quick and Easy sale process =========================== This module allows you to manage your shop sales very easily with a fully web based touchscreen interface. It is compatible with all PC tablets and the iPad, offering multiple payment methods. Product selection can be done in several ways: * Using a barcode reader * Browsing through categories of products or via a text search. Main Features ------------- * Fast encoding of the sale * Choose one payment method (the quick way) or split the payment between several payment methods * Computation of the amount of money to return * Create and confirm the picking list automatically * Allows the user to create an invoice automatically * Refund previous sales """, 'depends': ['stock_account', 'barcodes'], 'data': [ 'security/point_of_sale_security.xml', 'security/ir.model.access.csv', 'data/default_barcode_patterns.xml', 'wizard/pos_box.xml', 'wizard/pos_details.xml', 'wizard/pos_discount.xml', 'wizard/pos_open_statement.xml', 'wizard/pos_payment.xml', 'views/pos_templates.xml', 'views/point_of_sale_template.xml', 'views/point_of_sale_report.xml', 'views/point_of_sale_view.xml', 'views/pos_order_view.xml', 'views/product_view.xml', 'views/pos_category_view.xml', 'views/account_journal_view.xml', 'views/pos_config_view.xml', 'views/pos_session_view.xml', 'views/point_of_sale_sequence.xml', 'data/point_of_sale_data.xml', 'views/pos_order_report_view.xml', 'views/account_statement_view.xml', 'views/account_statement_report.xml', 'views/res_users_view.xml', 'views/res_partner_view.xml', 'views/res_config_view.xml', 'views/report_statement.xml', 'views/report_userlabel.xml', 'views/report_saledetails.xml', 'views/point_of_sale.xml', 'views/point_of_sale_dashboard.xml', ], 'demo': [ 'data/point_of_sale_demo.xml', ], 'installable': True, 'application': True, 'qweb': ['static/src/xml/pos.xml'], 'website': 'https://www.odoo.com/page/point-of-sale', }
chienlieu2017/it_management
odoo/addons/point_of_sale/__manifest__.py
Python
gpl-3.0
2,501
# sqlalchemy/interfaces.py # Copyright (C) 2007-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # Copyright (C) 2007 Jason Kirtland jek@discorporate.us # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Deprecated core event interfaces. This module is **deprecated** and is superseded by the event system. """ from . import event, util class PoolListener(object): """Hooks into the lifecycle of connections in a :class:`.Pool`. .. note:: :class:`.PoolListener` is deprecated. Please refer to :class:`.PoolEvents`. Usage:: class MyListener(PoolListener): def connect(self, dbapi_con, con_record): '''perform connect operations''' # etc. # create a new pool with a listener p = QueuePool(..., listeners=[MyListener()]) # add a listener after the fact p.add_listener(MyListener()) # usage with create_engine() e = create_engine("url://", listeners=[MyListener()]) All of the standard connection :class:`~sqlalchemy.pool.Pool` types can accept event listeners for key connection lifecycle events: creation, pool check-out and check-in. There are no events fired when a connection closes. For any given DB-API connection, there will be one ``connect`` event, `n` number of ``checkout`` events, and either `n` or `n - 1` ``checkin`` events. (If a ``Connection`` is detached from its pool via the ``detach()`` method, it won't be checked back in.) These are low-level events for low-level objects: raw Python DB-API connections, without the conveniences of the SQLAlchemy ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` execution. If you execute SQL through the connection, explicitly closing all cursors and other resources is recommended. Events also receive a ``_ConnectionRecord``, a long-lived internal ``Pool`` object that basically represents a "slot" in the connection pool. ``_ConnectionRecord`` objects have one public attribute of note: ``info``, a dictionary whose contents are scoped to the lifetime of the DB-API connection managed by the record. You can use this shared storage area however you like. There is no need to subclass ``PoolListener`` to handle events. Any class that implements one or more of these methods can be used as a pool listener. The ``Pool`` will inspect the methods provided by a listener object and add the listener to one or more internal event queues based on its capabilities. In terms of efficiency and function call overhead, you're much better off only providing implementations for the hooks you'll be using. """ @classmethod def _adapt_listener(cls, self, listener): """Adapt a :class:`.PoolListener` to individual :class:`event.Dispatch` events. """ listener = util.as_interface(listener, methods=('connect', 'first_connect', 'checkout', 'checkin')) if hasattr(listener, 'connect'): event.listen(self, 'connect', listener.connect) if hasattr(listener, 'first_connect'): event.listen(self, 'first_connect', listener.first_connect) if hasattr(listener, 'checkout'): event.listen(self, 'checkout', listener.checkout) if hasattr(listener, 'checkin'): event.listen(self, 'checkin', listener.checkin) def connect(self, dbapi_con, con_record): """Called once for each new DB-API connection or Pool's ``creator()``. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def first_connect(self, dbapi_con, con_record): """Called exactly once for the first DB-API connection. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def checkout(self, dbapi_con, con_record, con_proxy): """Called when a connection is retrieved from the Pool. dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection con_proxy The ``_ConnectionFairy`` which manages the connection for the span of the current checkout. If you raise an ``exc.DisconnectionError``, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. """ def checkin(self, dbapi_con, con_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection """ class ConnectionProxy(object): """Allows interception of statement execution by Connections. .. note:: :class:`.ConnectionProxy` is deprecated. Please refer to :class:`.ConnectionEvents`. Either or both of the ``execute()`` and ``cursor_execute()`` may be implemented to intercept compiled statement and cursor level executions, e.g.:: class MyProxy(ConnectionProxy): def execute(self, conn, execute, clauseelement, *multiparams, **params): print "compiled statement:", clauseelement return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): print "raw statement:", statement return execute(cursor, statement, parameters, context) The ``execute`` argument is a function that will fulfill the default execution behavior for the operation. The signature illustrated in the example should be used. The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via the ``proxy`` argument:: e = create_engine('someurl://', proxy=MyProxy()) """ @classmethod def _adapt_listener(cls, self, listener): def adapt_execute(conn, clauseelement, multiparams, params): def execute_wrapper(clauseelement, *multiparams, **params): return clauseelement, multiparams, params return listener.execute(conn, execute_wrapper, clauseelement, *multiparams, **params) event.listen(self, 'before_execute', adapt_execute) def adapt_cursor_execute(conn, cursor, statement, parameters, context, executemany): def execute_wrapper( cursor, statement, parameters, context, ): return statement, parameters return listener.cursor_execute( execute_wrapper, cursor, statement, parameters, context, executemany, ) event.listen(self, 'before_cursor_execute', adapt_cursor_execute) def do_nothing_callback(*arg, **kw): pass def adapt_listener(fn): def go(conn, *arg, **kw): fn(conn, do_nothing_callback, *arg, **kw) return util.update_wrapper(go, fn) event.listen(self, 'begin', adapt_listener(listener.begin)) event.listen(self, 'rollback', adapt_listener(listener.rollback)) event.listen(self, 'commit', adapt_listener(listener.commit)) event.listen(self, 'savepoint', adapt_listener(listener.savepoint)) event.listen(self, 'rollback_savepoint', adapt_listener(listener.rollback_savepoint)) event.listen(self, 'release_savepoint', adapt_listener(listener.release_savepoint)) event.listen(self, 'begin_twophase', adapt_listener(listener.begin_twophase)) event.listen(self, 'prepare_twophase', adapt_listener(listener.prepare_twophase)) event.listen(self, 'rollback_twophase', adapt_listener(listener.rollback_twophase)) event.listen(self, 'commit_twophase', adapt_listener(listener.commit_twophase)) def execute(self, conn, execute, clauseelement, *multiparams, **params): """Intercept high level execute() events.""" return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events.""" return execute(cursor, statement, parameters, context) def begin(self, conn, begin): """Intercept begin() events.""" return begin() def rollback(self, conn, rollback): """Intercept rollback() events.""" return rollback() def commit(self, conn, commit): """Intercept commit() events.""" return commit() def savepoint(self, conn, savepoint, name=None): """Intercept savepoint() events.""" return savepoint(name=name) def rollback_savepoint(self, conn, rollback_savepoint, name, context): """Intercept rollback_savepoint() events.""" return rollback_savepoint(name, context) def release_savepoint(self, conn, release_savepoint, name, context): """Intercept release_savepoint() events.""" return release_savepoint(name, context) def begin_twophase(self, conn, begin_twophase, xid): """Intercept begin_twophase() events.""" return begin_twophase(xid) def prepare_twophase(self, conn, prepare_twophase, xid): """Intercept prepare_twophase() events.""" return prepare_twophase(xid) def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): """Intercept rollback_twophase() events.""" return rollback_twophase(xid, is_prepared) def commit_twophase(self, conn, commit_twophase, xid, is_prepared): """Intercept commit_twophase() events.""" return commit_twophase(xid, is_prepared)
Drvanon/Game
venv/lib/python3.3/site-packages/sqlalchemy/interfaces.py
Python
apache-2.0
10,918
# mako/ast.py # Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes""" import re from mako import compat from mako import exceptions from mako import pyparser class PythonCode(object): """represents information about a string containing Python code""" def __init__(self, code, **exception_kwargs): self.code = code # represents all identifiers which are assigned to at some point in # the code self.declared_identifiers = set() # represents all identifiers which are referenced before their # assignment, if any self.undeclared_identifiers = set() # note that an identifier can be in both the undeclared and declared # lists. # using AST to parse instead of using code.co_varnames, # code.co_names has several advantages: # - we can locate an identifier as "undeclared" even if # its declared later in the same block of code # - AST is less likely to break with version changes # (for example, the behavior of co_names changed a little bit # in python version 2.5) if isinstance(code, compat.string_types): expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) else: expr = code f = pyparser.FindIdentifiers(self, **exception_kwargs) f.visit(expr) class ArgumentList(object): """parses a fragment of code as a comma-separated list of expressions""" def __init__(self, code, **exception_kwargs): self.codeargs = [] self.args = [] self.declared_identifiers = set() self.undeclared_identifiers = set() if isinstance(code, compat.string_types): if re.match(r"\S", code) and not re.match(r",\s*$", code): # if theres text and no trailing comma, insure its parsed # as a tuple by adding a trailing comma code += "," expr = pyparser.parse(code, "exec", **exception_kwargs) else: expr = code f = pyparser.FindTuple(self, PythonCode, **exception_kwargs) f.visit(expr) class PythonFragment(PythonCode): """extends PythonCode to provide identifier lookups in partial control statements e.g.:: for x in 5: elif y==9: except (MyException, e): """ def __init__(self, code, **exception_kwargs): m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S) if not m: raise exceptions.CompileException( "Fragment '%s' is not a partial control statement" % code, **exception_kwargs ) if m.group(3): code = code[: m.start(3)] (keyword, expr) = m.group(1, 2) if keyword in ["for", "if", "while"]: code = code + "pass" elif keyword == "try": code = code + "pass\nexcept:pass" elif keyword == "elif" or keyword == "else": code = "if False:pass\n" + code + "pass" elif keyword == "except": code = "try:pass\n" + code + "pass" elif keyword == "with": code = code + "pass" else: raise exceptions.CompileException( "Unsupported control keyword: '%s'" % keyword, **exception_kwargs ) super(PythonFragment, self).__init__(code, **exception_kwargs) class FunctionDecl(object): """function declaration""" def __init__(self, code, allow_kwargs=True, **exception_kwargs): self.code = code expr = pyparser.parse(code, "exec", **exception_kwargs) f = pyparser.ParseFunc(self, **exception_kwargs) f.visit(expr) if not hasattr(self, "funcname"): raise exceptions.CompileException( "Code '%s' is not a function declaration" % code, **exception_kwargs ) if not allow_kwargs and self.kwargs: raise exceptions.CompileException( "'**%s' keyword argument not allowed here" % self.kwargnames[-1], **exception_kwargs ) def get_argument_expressions(self, as_call=False): """Return the argument declarations of this FunctionDecl as a printable list. By default the return value is appropriate for writing in a ``def``; set `as_call` to true to build arguments to be passed to the function instead (assuming locals with the same names as the arguments exist). """ namedecls = [] # Build in reverse order, since defaults and slurpy args come last argnames = self.argnames[::-1] kwargnames = self.kwargnames[::-1] defaults = self.defaults[::-1] kwdefaults = self.kwdefaults[::-1] # Named arguments if self.kwargs: namedecls.append("**" + kwargnames.pop(0)) for name in kwargnames: # Keyword-only arguments must always be used by name, so even if # this is a call, print out `foo=foo` if as_call: namedecls.append("%s=%s" % (name, name)) elif kwdefaults: default = kwdefaults.pop(0) if default is None: # The AST always gives kwargs a default, since you can do # `def foo(*, a=1, b, c=3)` namedecls.append(name) else: namedecls.append( "%s=%s" % (name, pyparser.ExpressionGenerator(default).value()) ) else: namedecls.append(name) # Positional arguments if self.varargs: namedecls.append("*" + argnames.pop(0)) for name in argnames: if as_call or not defaults: namedecls.append(name) else: default = defaults.pop(0) namedecls.append( "%s=%s" % (name, pyparser.ExpressionGenerator(default).value()) ) namedecls.reverse() return namedecls @property def allargnames(self): return tuple(self.argnames) + tuple(self.kwargnames) class FunctionArgs(FunctionDecl): """the argument portion of a function declaration""" def __init__(self, code, **kwargs): super(FunctionArgs, self).__init__( "def ANON(%s):pass" % code, **kwargs )
nwjs/chromium.src
third_party/mako/mako/ast.py
Python
bsd-3-clause
6,789
def f(a, L=<warning descr="Default argument value is mutable">[]</warning>): L.append(a) return L def f(a, L=<warning descr="Default argument value is mutable">list()</warning>): L.append(a) return L def f(a, L=<warning descr="Default argument value is mutable">set()</warning>): L.append(a) return L def f(a, L=<warning descr="Default argument value is mutable">{}</warning>): L.append(a) return L def f(a, L=<warning descr="Default argument value is mutable">dict()</warning>): L.append(a) return L def f(a, L=<warning descr="Default argument value is mutable">{1: 2}</warning>): L.append(a) return L
smmribeiro/intellij-community
python/testData/inspections/PyDefaultArgumentInspection/test.py
Python
apache-2.0
658
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: redis_kv author: Jan-Piet Mens <jpmens(at)gmail.com> version_added: "0.9" short_description: fetch data from Redis description: - this looup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse requirements: - redis (python library https://github.com/andymccurdy/redis-py/) options: _terms: description: Two element comma separated strings composed of url of the Redis server and key to query options: _url: description: location of redis host in url format default: 'redis://localhost:6379' _key: description: key to query required: True """ EXAMPLES = """ - name: query redis for somekey debug: msg="{{ lookup('redis_kv', 'redis://localhost:6379,somekey') }} is value in Redis for somekey" """ RETURN = """ _raw: description: values stored in Redis """ import os import re HAVE_REDIS = False try: import redis HAVE_REDIS = True except ImportError: pass from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase # ============================================================== # REDISGET: Obtain value from a GET on a Redis key. Terms # expected: 0 = URL, 1 = Key # URL may be empty, in which case redis://localhost:6379 assumed # -------------------------------------------------------------- class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): if not HAVE_REDIS: raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed") ret = [] for term in terms: (url, key) = term.split(',') if url == "": url = 'redis://localhost:6379' # urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason # Redis' from_url() doesn't work here. p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*' try: m = re.search(p, url) host = m.group('host') port = int(m.group('port')) except AttributeError: raise AnsibleError("Bad URI in redis lookup") try: conn = redis.Redis(host=host, port=port) res = conn.get(key) if res is None: res = "" ret.append(res) except: ret.append("") # connection failed or key not found return ret
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/lookup/redis_kv.py
Python
bsd-3-clause
2,846
#!/usr/bin/python # # Copyright (c) 2017 Julien Stroheker, <juliens@microsoft.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_acs version_added: "2.4" short_description: Manage an Azure Container Service Instance (ACS). description: - Create, update and delete an Azure Container Service Instance. options: resource_group: description: - Name of a resource group where the Container Services exists or will be created. required: true name: description: - Name of the Container Services instance. required: true default: null state: description: - Assert the state of the ACS. Use 'present' to create or update an ACS and 'absent' to delete it. default: present choices: - absent - present required: false location: description: - Valid azure location. Defaults to location of the resource group. default: resource_group location required: false orchestration_platform: description: - Specifies the Container Orchestration Platform to use. Currently can be either DCOS, Kubernetes or Swarm. required: true master_profile: description: - Master profile suboptions. required: true default: null suboptions: count: description: - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. required: true choices: - 1 - 3 - 5 dns_prefix: description: - The DNS Prefix to use for the Container Service master nodes. required: true linux_profile: description: - The linux profile suboptions. required: true default: null suboptions: admin_username: description: - The Admin Username for the Cluster. required: true default: azureuser ssh_key: description: - The Public SSH Key used to access the cluster. required: true agent_pool_profiles: description: - The agent pool profile suboptions. required: true default: null suboptions: name: description: - Unique name of the agent pool profile in the context of the subscription and resource group. required: true count: description: - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). required: true default: 1 dns_prefix: description: - The DNS Prefix given to Agents in this Agent Pool. required: true vm_size: description: - The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2). required: true default: Standard_D2v2 service_principal: description: - The service principal suboptions. required: false default: null suboptions: client_id: description: - The ID for the Service Principal. required: false client_secret: description: - The secret password associated with the service principal. required: false diagnostics_profile: description: - Should VM Diagnostics be enabled for the Container Service VM's. required: true default: false extends_documentation_fragment: - azure - azure_tags author: - "Julien Stroheker (@julienstroheker)" ''' EXAMPLES = ''' - name: Create an azure container services instance running Kubernetes azure_rm_acs: name: acctestcontservice1 location: eastus resource_group: Testing orchestration_platform: Kubernetes master_profile: - count: 3 dns_prefix: acsk8smasterdns linux_profile: - admin_username: azureuser ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... service_principal: - client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948" client_secret: "mySPNp@ssw0rd!" agent_pool_profiles: - name: default count: 5 dns_prefix: acsk8sagent vm_size: Standard_D2_v2 diagnostics_profile: false tags: Environment: Production - name: Create an azure container services instance running DCOS azure_rm_acs: name: acctestcontservice2 location: eastus resource_group: Testing orchestration_platform: DCOS master_profile: - count: 3 dns_prefix: acsdcosmasterdns linux_profile: - admin_username: azureuser ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... agent_pool_profiles: - name: default count: 5 dns_prefix: acscdcosagent vm_size: Standard_D2_v2 diagnostics_profile: false tags: Environment: Production - name: Create an azure container services instance running Swarm azure_rm_acs: name: acctestcontservice3 location: eastus resource_group: Testing orchestration_platform: Swarm master_profile: - count: 3 dns_prefix: acsswarmmasterdns linux_profile: - admin_username: azureuser ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... agent_pool_profiles: - name: default count: 5 dns_prefix: acsswarmagent vm_size: Standard_D2_v2 diagnostics_profile: false tags: Environment: Production # Deletes the specified container service in the specified subscription and resource group. # The operation does not delete other resources created as part of creating a container service, # including storage accounts, VMs, and availability sets. All the other resources created with the container # service are part of the same resource group and can be deleted individually. - name: Remove an azure container services instance azure_rm_acs: name: acctestcontservice3 location: eastus resource_group: Testing state: absent orchestration_platform: Swarm master_profile: - count: 1 dns_prefix: acstestingmasterdns5 linux_profile: - admin_username: azureuser ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... service_principal: - client_id: 7fb4173c-3ca3-4d5b-87f8-1daac941207a client_secret: MPNSuM1auUuITefiLGBrpZZnLMDKBLw2 agent_pool_profiles: - name: default count: 4 dns_prefix: acctestagent15 vm_size: Standard_A0 diagnostics_profile: false tags: Ansible: azure_rm_acs ''' RETURN = ''' state: description: Current state of the azure container service returned: always type: dict ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.containerservice.models import ( ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile, ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile, ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile, ContainerServiceLinuxProfile, ContainerServiceSshConfiguration, ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey, ContainerServiceVMDiagnostics ) except ImportError: # This is handled in azure_rm_common pass def create_agent_pool_profile_instance(agentpoolprofile): ''' Helper method to serialize a dict to a ContainerServiceAgentPoolProfile :param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile :return: ContainerServiceAgentPoolProfile ''' return ContainerServiceAgentPoolProfile( name=agentpoolprofile['name'], count=agentpoolprofile['count'], dns_prefix=agentpoolprofile['dns_prefix'], vm_size=agentpoolprofile['vm_size'] ) def create_orch_platform_instance(orchestrator): ''' Helper method to serialize a dict to a ContainerServiceOrchestratorProfile :param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile :return: ContainerServiceOrchestratorProfile ''' return ContainerServiceOrchestratorProfile( orchestrator_type=orchestrator, ) def create_service_principal_profile_instance(spnprofile): ''' Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile :param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile :return: ContainerServiceServicePrincipalProfile ''' return ContainerServiceServicePrincipalProfile( client_id=spnprofile[0]['client_id'], secret=spnprofile[0]['client_secret'] ) def create_linux_profile_instance(linuxprofile): ''' Helper method to serialize a dict to a ContainerServiceLinuxProfile :param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile :return: ContainerServiceLinuxProfile ''' return ContainerServiceLinuxProfile( admin_username=linuxprofile[0]['admin_username'], ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key']) ) def create_ssh_configuration_instance(sshconf): ''' Helper method to serialize a dict to a ContainerServiceSshConfiguration :param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration :return: ContainerServiceSshConfiguration ''' listssh = [] key = ContainerServiceSshPublicKey(key_data=str(sshconf)) listssh.append(key) return ContainerServiceSshConfiguration( public_keys=listssh ) def create_master_profile_instance(masterprofile): ''' Helper method to serialize a dict to a ContainerServiceMasterProfile :param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile :return: ContainerServiceMasterProfile ''' return ContainerServiceMasterProfile( count=masterprofile[0]['count'], dns_prefix=masterprofile[0]['dns_prefix'] ) def create_diagnostics_profile_instance(diagprofile): ''' Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile :param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile :return: ContainerServiceDiagnosticsProfile ''' return ContainerServiceDiagnosticsProfile( vm_diagnostics=create_vm_diagnostics_instance(diagprofile) ) def create_vm_diagnostics_instance(vmdiag): ''' Helper method to serialize a dict to a ContainerServiceVMDiagnostics :param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics :return: ContainerServiceVMDiagnostics ''' return ContainerServiceVMDiagnostics( enabled=vmdiag ) def create_acs_dict(acs): ''' Helper method to deserialize a ContainerService to a dict :param: acs: ContainerService or AzureOperationPoller with the Azure callback object :return: dict with the state on Azure ''' results = dict( id=acs.id, name=acs.name, location=acs.location, tags=acs.tags, orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile), master_profile=create_master_profile_dict(acs.master_profile), linux_profile=create_linux_profile_dict(acs.linux_profile), service_principal_profile=acs.service_principal_profile, diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile), provisioning_state=acs.provisioning_state, agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles), type=acs.type ) return results def create_linux_profile_dict(linuxprofile): ''' Helper method to deserialize a ContainerServiceLinuxProfile to a dict :param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object :return: dict with the state on Azure ''' results = dict( ssh_key=linuxprofile.ssh.public_keys[0].key_data, admin_username=linuxprofile.admin_username ) return results def create_master_profile_dict(masterprofile): ''' Helper method to deserialize a ContainerServiceMasterProfile to a dict :param: masterprofile: ContainerServiceMasterProfile with the Azure callback object :return: dict with the state on Azure ''' results = dict( count=masterprofile.count, fqdn=masterprofile.fqdn, dns_prefix=masterprofile.dns_prefix ) return results def create_diagnotstics_profile_dict(diagnosticsprofile): ''' Helper method to deserialize a ContainerServiceVMDiagnostics to a dict :param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object :return: dict with the state on Azure ''' results = dict( vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled ) return results def create_orchestrator_profile_dict(orchestratorprofile): ''' Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict :param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object :return: dict with the state on Azure ''' results = dict( orchestrator_type=str(orchestratorprofile.orchestrator_type) ) return results def create_agent_pool_profiles_dict(agentpoolprofiles): ''' Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict :param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object :return: dict with the state on Azure ''' results = [] for profile in agentpoolprofiles: result = dict( count=profile.count, vm_size=profile.vm_size, name=profile.name, dns_prefix=profile.dns_prefix, fqdn=profile.fqdn ) results.append(result) return results class AzureRMContainerService(AzureRMModuleBase): """Configuration class for an Azure RM container service resource""" def __init__(self): self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str', required=True ), state=dict( type='str', required=False, default='present', choices=['present', 'absent'] ), location=dict( type='str', required=False ), orchestration_platform=dict( type='str', required=True, choices=['DCOS', 'Kubernetes', 'Swarm'] ), master_profile=dict( type='list', required=True ), linux_profile=dict( type='list', required=True ), agent_pool_profiles=dict( type='list', required=True ), service_principal=dict( type='list', required=False ), diagnostics_profile=dict( type='bool', required=True ) ) self.resource_group = None self.name = None self.location = None self.tags = None self.state = None self.orchestration_platform = None self.master_profile = None self.linux_profile = None self.agent_pool_profiles = None self.service_principal = None self.diagnostics_profile = None self.results = dict(changed=False, state=dict()) super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec, supports_check_mode=True, supports_tags=True) def exec_module(self, **kwargs): """Main module execution method""" for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) resource_group = None response = None results = dict() to_be_updated = False try: resource_group = self.get_resource_group(self.resource_group) except CloudError: self.fail('resource group {} not found'.format(self.resource_group)) if not self.location: self.location = resource_group.location # Check if the ACS instance already present in the RG if self.state == 'present': if self.orchestration_platform == 'Kubernetes': if not self.service_principal: self.fail('service_principal should be specified when using Kubernetes') if not self.service_principal[0].get('client_id'): self.fail('service_principal.client_id should be specified when using Kubernetes') if not self.service_principal[0].get('client_secret'): self.fail('service_principal.client_secret should be specified when using Kubernetes') mastercount = self.master_profile[0].get('count') if mastercount != 1 and mastercount != 3 and mastercount != 5: self.fail('Master Count number wrong : {} / should be 1 3 or 5'.format(mastercount)) # For now Agent Pool cannot be more than 1, just remove this part in the future if it change agentpoolcount = len(self.agent_pool_profiles) if agentpoolcount > 1: self.fail('You cannot specify more than agent_pool_profiles') response = self.get_acs() self.results['state'] = response if not response: to_be_updated = True else: self.log('Results : {0}'.format(response)) update_tags, response['tags'] = self.update_tags(response['tags']) if response['provisioning_state'] == "Succeeded": if update_tags: to_be_updated = True # Cannot Update the master count for now // Uncomment this block in the future to support it if response['master_profile'].get('count') != self.master_profile[0].get('count'): # self.log(("Master Profile Count Diff, Was {0} / Now {1}" # .format(response['master_profile'].count, # self.master_profile[0].get('count')))) # to_be_updated = True self.module.warn("master_profile.count cannot be updated") # Cannot Update the SSH Key for now // Uncomment this block in the future to support it if response['linux_profile'].get('ssh_key') != self.linux_profile[0].get('ssh_key'): # self.log(("Linux Profile Diff SSH, Was {0} / Now {1}" # .format(response['linux_profile'].ssh.public_keys[0].key_data, # self.linux_profile[0].get('ssh_key')))) # to_be_updated = True self.module.warn("linux_profile.ssh_key cannot be updated") # self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username'))) # self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username'))) # Cannot Update the Username for now // Uncomment this block in the future to support it if response['linux_profile'].get('admin_username') != self.linux_profile[0].get('admin_username'): # self.log(("Linux Profile Diff User, Was {0} / Now {1}" # .format(response['linux_profile'].admin_username, # self.linux_profile[0].get('admin_username')))) # to_be_updated = True self.module.warn("linux_profile.admin_username cannot be updated") # Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it # if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles): # self.log("Agent Pool count is diff, need to updated") # to_be_updated = True for profile_result in response['agent_pool_profiles']: matched = False for profile_self in self.agent_pool_profiles: if profile_result['name'] == profile_self['name']: matched = True if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != profile_self['vm_size']: self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}" .format(profile_result['count'], profile_self['count'], profile_result['vm_size'], profile_self['vm_size']))) to_be_updated = True if not matched: self.log("Agent Pool not found") to_be_updated = True if to_be_updated: self.log("Need to Create / Update the ACS instance") if self.check_mode: return self.results self.results['state'] = self.create_update_acs() self.results['changed'] = True self.log("Creation / Update done") elif self.state == 'absent': self.delete_acs() self.log("ACS instance deleted") return self.results def create_update_acs(self): ''' Creates or updates a container service with the specified configuration of orchestrator, masters, and agents. :return: deserialized ACS instance state dictionary ''' self.log("Creating / Updating the ACS instance {0}".format(self.name)) service_principal_profile = None agentpools = [] if self.agent_pool_profiles: for profile in self.agent_pool_profiles: self.log("Trying to push the following Profile {0}".format(profile)) agentpools.append(create_agent_pool_profile_instance(profile)) if self.orchestration_platform == 'Kubernetes': service_principal_profile = create_service_principal_profile_instance(self.service_principal) parameters = ContainerService( location=self.location, tags=self.tags, orchestrator_profile=create_orch_platform_instance(self.orchestration_platform), service_principal_profile=service_principal_profile, linux_profile=create_linux_profile_instance(self.linux_profile), master_profile=create_master_profile_instance(self.master_profile), agent_pool_profiles=agentpools, diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile) ) # self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile)) # self.log("service_principal_profile : {0}".format(parameters.service_principal_profile)) # self.log("linux_profile : {0}".format(parameters.linux_profile)) # self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0])) # self.log("ssh : {0}".format(parameters.linux_profile.ssh)) # self.log("master_profile : {0}".format(parameters.master_profile)) # self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles)) # self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics)) try: poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name, parameters) response = self.get_poller_result(poller) except CloudError as exc: self.log('Error attempting to create the ACS instance.') self.fail("Error creating the ACS instance: {0}".format(str(exc))) return create_acs_dict(response) def delete_acs(self): ''' Deletes the specified container service in the specified subscription and resource group. The operation does not delete other resources created as part of creating a container service, including storage accounts, VMs, and availability sets. All the other resources created with the container service are part of the same resource group and can be deleted individually. :return: True ''' self.log("Deleting the ACS instance {0}".format(self.name)) try: poller = self.containerservice_client.container_services.delete(self.resource_group, self.name) self.get_poller_result(poller) except CloudError as e: self.log('Error attempting to delete the ACS instance.') self.fail("Error deleting the ACS instance: {0}".format(str(e))) return True def get_acs(self): ''' Gets the properties of the specified container service. :return: deserialized ACS instance state dictionary ''' self.log("Checking if the ACS instance {0} is present".format(self.name)) found = False try: response = self.containerservice_client.container_services.get(self.resource_group, self.name) found = True self.log("Response : {0}".format(response)) self.log("ACS instance : {0} found".format(response.name)) except CloudError as e: self.log('Did not find the ACS instance.') if found is True: return create_acs_dict(response) else: return False def main(): """Main execution""" AzureRMContainerService() if __name__ == '__main__': main()
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_acs.py
Python
bsd-3-clause
27,547
from __future__ import unicode_literals, division, absolute_import from tests import FlexGetBase class TestSortBy(FlexGetBase): __yaml__ = """ tasks: test1: sort_by: title mock: - {title: 'B C D', url: 'http://localhost/1'} - {title: 'A B C', url: 'http://localhost/2'} - {title: 'A P E', url: 'http://localhost/3'} test2: sort_by: field: title reverse: true mock: - {title: 'B C D', url: 'http://localhost/1'} - {title: 'A B C', url: 'http://localhost/2'} - {title: 'A P E', url: 'http://localhost/3'} test3: sort_by: reverse: true mock: - {title: 'B C D', url: 'http://localhost/1'} - {title: 'A B C', url: 'http://localhost/2'} - {title: 'A P E', url: 'http://localhost/3'} test_quality: sort_by: field: quality reverse: true mock: - {title: 'Test.720p'} - {title: 'Test.hdtv'} - {title: 'Test.1080p'} """ def test_sort_by_title(self): self.execute_task('test1') assert self.task.entries[0]['title'] == 'A B C', 'Entries sorted alphabetically by title' assert self.task.entries[1]['title'] == 'A P E', 'Entries sorted alphabetically by title' assert self.task.entries[2]['title'] == 'B C D', 'Entries sorted alphabetically by title' def test_sort_by_title_reverse(self): self.execute_task('test2') assert self.task.entries[0]['title'] == 'B C D', 'Entries sorted alphabetically by title' assert self.task.entries[1]['title'] == 'A P E', 'Entries sorted alphabetically by title' assert self.task.entries[2]['title'] == 'A B C', 'Entries sorted alphabetically by title' def test_sort_by_reverse(self): self.execute_task('test3') assert self.task.entries[0]['title'] == 'A P E', 'Entries sorted alphabetically by title' assert self.task.entries[1]['title'] == 'A B C', 'Entries sorted alphabetically by title' assert self.task.entries[2]['title'] == 'B C D', 'Entries sorted alphabetically by title' def test_quality_sort(self): self.execute_task('test_quality') assert self.task.entries[0]['title'] == 'Test.1080p', 'Entries should be sorted by descending quality' assert self.task.entries[1]['title'] == 'Test.720p', 'Entries should be sorted by descending quality' assert self.task.entries[2]['title'] == 'Test.hdtv', 'Entries should be sorted by descending quality'
ratoaq2/Flexget
tests/test_sort_by.py
Python
mit
2,714
"""Meta-estimators for building composite models with transformers In addition to its current contents, this module will eventually be home to refurbished versions of Pipeline and FeatureUnion. """ from ._column_transformer import ColumnTransformer, make_column_transformer from ._target import TransformedTargetRegressor __all__ = [ 'ColumnTransformer', 'make_column_transformer', 'TransformedTargetRegressor', ]
vortex-ape/scikit-learn
sklearn/compose/__init__.py
Python
bsd-3-clause
431
import json def lambda_handler(event, context): return { 'statusCode': 200, 'body': json.dumps('Hello from Lambda!') }
thaim/ansible
test/integration/targets/s3_bucket_notification/files/mini_lambda.py
Python
mit
145
import wx import sys import os import time import threading import math import pynotify import pygame.mixer sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/ext/pprzlink/lib/v1.0/python") from pprzlink.ivy import IvyMessagesInterface WIDTH = 150 HEIGHT = 40 UPDATE_INTERVAL = 250 class RadioWatchFrame(wx.Frame): def message_recv(self, ac_id, msg): if msg.name == "ROTORCRAFT_STATUS": self.rc_status = int(msg['rc_status']) if self.rc_status != 0 and not self.alertChannel.get_busy(): self.warn_timer = wx.CallLater(5, self.rclink_alert) # else: # self.notification.close() def gui_update(self): self.rc_statusText.SetLabel(["OK", "LOST", "REALLY LOST"][self.rc_status]) self.update_timer.Restart(UPDATE_INTERVAL) def rclink_alert(self): self.alertChannel.queue(self.alertSound) self.notification.show() time.sleep(5) def setFont(self, control): font = control.GetFont() size = font.GetPointSize() font.SetPointSize(size * 1.4) control.SetFont(font) def __init__(self): wx.Frame.__init__(self, id=-1, parent=None, name=u'RCWatchFrame', size=wx.Size(WIDTH, HEIGHT), title=u'RC Status') self.Bind(wx.EVT_CLOSE, self.OnClose) self.rc_statusText = wx.StaticText(self, -1, "UNKWN") pygame.mixer.init() self.alertSound = pygame.mixer.Sound("crossing.wav") self.alertChannel = pygame.mixer.Channel(False) self.setFont(self.rc_statusText) self.notification = pynotify.Notification("RC Link Warning!", "RC Link status not OK!", "dialog-warning") self.rc_status = -1 pynotify.init("RC Status") sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(self.rc_statusText, 1, wx.EXPAND) self.SetSizer(sizer) sizer.Layout() self.interface = IvyMessagesInterface("radiowatchframe") self.interface.subscribe(self.message_recv) self.update_timer = wx.CallLater(UPDATE_INTERVAL, self.gui_update) def OnClose(self, event): self.interface.shutdown() self.Destroy()
baspijhor/paparazzi
sw/ground_segment/python/dashboard/radiowatchframe.py
Python
gpl-2.0
2,290
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Simon Dodsley (simon@purestorage.com) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: purefa_volume version_added: '2.4' short_description: Manage volumes on Pure Storage FlashArrays description: - Create, delete or extend the capacity of a volume on Pure Storage FlashArray. author: - Simon Dodsley (@sdodsley) options: name: description: - The name of the volume. required: true target: description: - The name of the target volume, if copying. state: description: - Define whether the volume should exist or not. default: present choices: [ absent, present ] eradicate: description: - Define whether to eradicate the volume on delete or leave in trash. type: bool default: 'no' overwrite: description: - Define whether to overwrite a target volume if it already exisits. type: bool default: 'no' size: description: - Volume size in M, G, T or P units. extends_documentation_fragment: - purestorage.fa ''' EXAMPLES = r''' - name: Create new volume named foo purefa_volume: name: foo size: 1T fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 state: present - name: Extend the size of an existing volume named foo purefa_volume: name: foo size: 2T fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 state: present - name: Delete and eradicate volume named foo purefa_volume: name: foo eradicate: yes fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 state: absent - name: Create clone of volume bar named foo purefa_volume: name: foo target: bar fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 state: present - name: Overwrite volume bar with volume foo purefa_volume: name: foo target: bar overwrite: yes fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 state: present ''' RETURN = r''' ''' try: from purestorage import purestorage HAS_PURESTORAGE = True except ImportError: HAS_PURESTORAGE = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pure import get_system, purefa_argument_spec def human_to_bytes(size): """Given a human-readable byte string (e.g. 2G, 30M), return the number of bytes. Will return 0 if the argument has unexpected form. """ bytes = size[:-1] unit = size[-1] if bytes.isdigit(): bytes = int(bytes) if unit == 'P': bytes *= 1125899906842624 elif unit == 'T': bytes *= 1099511627776 elif unit == 'G': bytes *= 1073741824 elif unit == 'M': bytes *= 1048576 else: bytes = 0 else: bytes = 0 return bytes def get_volume(module, array): """Return Volume or None""" try: return array.get_volume(module.params['name']) except: return None def get_target(module, array): """Return Volume or None""" try: return array.get_volume(module.params['target']) except: return None def create_volume(module, array): """Create Volume""" size = module.params['size'] changed = True if not module.check_mode: try: array.create_volume(module.params['name'], size) except: changed = False module.exit_json(changed=changed) def copy_from_volume(module, array): """Create Volume Clone""" changed = False tgt = get_target(module, array) if tgt is None: changed = True if not module.check_mode: array.copy_volume(module.params['name'], module.params['target']) elif tgt is not None and module.params['overwrite']: changed = True if not module.check_mode: array.copy_volume(module.params['name'], module.params['target'], overwrite=module.params['overwrite']) module.exit_json(changed=changed) def update_volume(module, array): """Update Volume""" changed = True vol = array.get_volume(module.params['name']) if human_to_bytes(module.params['size']) > vol['size']: if not module.check_mode: array.extend_volume(module.params['name'], module.params['size']) else: changed = False module.exit_json(changed=changed) def delete_volume(module, array): """ Delete Volume""" changed = True if not module.check_mode: try: array.destroy_volume(module.params['name']) if module.params['eradicate']: try: array.eradicate_volume(module.params['name']) except: changed = False except: changed = False module.exit_json(changed=True) def main(): argument_spec = purefa_argument_spec() argument_spec.update(dict( name=dict(type='str', required=True), target=dict(type='str'), overwrite=dict(type='bool', default=False), eradicate=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), size=dict(type='str'), )) mutually_exclusive = [['size', 'target']] module = AnsibleModule(argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) if not HAS_PURESTORAGE: module.fail_json(msg='purestorage sdk is required for this module in volume') size = module.params['size'] state = module.params['state'] array = get_system(module) volume = get_volume(module, array) target = get_target(module, array) if state == 'present' and not volume and size: create_volume(module, array) elif state == 'present' and volume and size: update_volume(module, array) elif state == 'present' and volume and target: copy_from_volume(module, array) elif state == 'present' and volume and not target: copy_from_volume(module, array) elif state == 'absent' and volume: delete_volume(module, array) elif state == 'present' and not volume or not size: module.exit_json(changed=False) elif state == 'absent' and not volume: module.exit_json(changed=False) if __name__ == '__main__': main()
hryamzik/ansible
lib/ansible/modules/storage/purestorage/purefa_volume.py
Python
gpl-3.0
6,827
# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.l3 import keepalived_state_change from neutron.tests.functional import base class TestKeepalivedStateChange(base.BaseSudoTestCase): def setUp(self): super(TestKeepalivedStateChange, self).setUp() cfg.CONF.register_opt( cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location of Metadata Proxy UNIX domain ' 'socket'))) self.router_id = uuidutils.generate_uuid() self.conf_dir = self.get_default_temp_dir().path self.cidr = '169.254.128.1/24' self.interface_name = 'interface' self.monitor = keepalived_state_change.MonitorDaemon( self.get_temp_file_path('monitor.pid'), self.router_id, 1, 2, 'namespace', self.conf_dir, self.interface_name, self.cidr) mock.patch.object(self.monitor, 'notify_agent').start() self.line = '1: %s inet %s' % (self.interface_name, self.cidr) def test_parse_and_handle_event_wrong_device_completes_without_error(self): self.monitor.parse_and_handle_event( '1: wrong_device inet wrong_cidr') def _get_state(self): with open(os.path.join(self.monitor.conf_dir, 'state')) as state_file: return state_file.read() def test_parse_and_handle_event_writes_to_file(self): self.monitor.parse_and_handle_event('Deleted %s' % self.line) self.assertEqual('backup', self._get_state()) self.monitor.parse_and_handle_event(self.line) self.assertEqual('master', self._get_state()) def test_parse_and_handle_event_fails_writing_state(self): with mock.patch.object( self.monitor, 'write_state_change', side_effect=OSError): self.monitor.parse_and_handle_event(self.line) def test_parse_and_handle_event_fails_notifying_agent(self): with mock.patch.object( self.monitor, 'notify_agent', side_effect=Exception): self.monitor.parse_and_handle_event(self.line)
mattt416/neutron
neutron/tests/functional/agent/l3/test_keepalived_state_change.py
Python
apache-2.0
2,853
""" Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``, and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a 3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st, 2nd, and 3rd order terms in ``args``. .. math:: f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0 The 3rd order polynomial function is written in Cython and called in a Python wrapper named after the zero function. See the private ``_zeros`` Cython module in `scipy.optimize.cython_optimze` for more information. """ import numpy.testing as npt from scipy.optimize.cython_optimize import _zeros # CONSTANTS # Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9]. # The ARGS have 3 elements just to show how this could be done for any cubic # polynomial. A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions # absolute and relative tolerances and max iterations for zeros functions XTOL, RTOL, MITR = 0.001, 0.001, 10 EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0] # = [1.2599210498948732, # 1.2805791649874942, # 1.300591446851387, # 1.3200061217959123, # 1.338865900164339, # 1.3572088082974532, # 1.375068867074141, # 1.3924766500838337, # 1.4094597464129783, # 1.4260431471424087] # test bisect def test_bisect(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test ridder def test_ridder(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brenth def test_brenth(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brentq def test_brentq(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brentq with full output def test_brentq_full_output(): output = _zeros.full_output_example( (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR) npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL) npt.assert_equal(6, output['iterations']) npt.assert_equal(7, output['funcalls']) npt.assert_equal(0, output['error_num'])
WarrenWeckesser/scipy
scipy/optimize/tests/test_cython_optimize.py
Python
bsd-3-clause
2,638
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'core', 'version': '1.0'} DOCUMENTATION = r''' --- module: win_file version_added: "1.9.2" short_description: Creates, touches or removes files or directories. description: - Creates (empty) files, updates file modification stamps of existing files, and can create or remove directories. - Unlike M(file), does not modify ownership, permissions or manipulate links. notes: - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) requirements: [ ] author: "Jon Hawkesworth (@jhawkesworth)" options: path: description: - 'path to the file being managed. Aliases: I(dest), I(name)' required: true aliases: ['dest', 'name'] state: description: - If C(directory), all immediate subdirectories will be created if they do not exist. If C(file), the file will NOT be created if it does not exist, see the M(copy) or M(template) module if you want that behavior. If C(absent), directories will be recursively deleted, and files will be removed. If C(touch), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way C(touch) works from the command line). choices: [ file, directory, touch, absent ] ''' EXAMPLES = r''' - name: Create a file win_file: path: C:\Temp\foo.conf state: file - name: Touch a file (creates if not present, updates modification time if present) win_file: path: C:\Temp\foo.conf state: touch - name: Remove a file, if present win_file: path: C:\Temp\foo.conf state: absent - name: Create directory structure win_file: path: C:\Temp\folder\subfolder state: directory - name: Remove directory structure win_file: path: C:\Temp state: absent '''
camradal/ansible
lib/ansible/modules/windows/win_file.py
Python
gpl-3.0
2,738
import multiprocessing import os _is_travis = os.environ.get('TRAVIS') == 'true' workers = multiprocessing.cpu_count() if _is_travis: workers = 2 bind = ['0.0.0.0:8080', '0.0.0.0:8081', '0.0.0.0:8082'] keepalive = 120 errorlog = '-' pidfile = '/tmp/api_hour.pid' pythonpath = 'hello' backlog = 10240000
actframework/FrameworkBenchmarks
frameworks/Python/api_hour/yocto_http/etc/hello/api_hour/gunicorn_conf.py
Python
bsd-3-clause
309
from collections import defaultdict from openerp.tools import mute_logger from openerp.tests import common UID = common.ADMIN_USER_ID class TestORM(common.TransactionCase): """ test special behaviors of ORM CRUD functions TODO: use real Exceptions types instead of Exception """ def setUp(self): super(TestORM, self).setUp() cr, uid = self.cr, self.uid self.partner = self.registry('res.partner') self.users = self.registry('res.users') self.p1 = self.partner.name_create(cr, uid, 'W')[0] self.p2 = self.partner.name_create(cr, uid, 'Y')[0] self.ir_rule = self.registry('ir.rule') # sample unprivileged user employee_gid = self.ref('base.group_user') self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]}) @mute_logger('openerp.models') def testAccessDeletedRecords(self): """ Verify that accessing deleted records works as expected """ cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2 self.partner.unlink(cr, uid, [p1]) # read() is expected to skip deleted records because our API is not # transactional for a sequence of search()->read() performed from the # client-side... a concurrent deletion could therefore cause spurious # exceptions even when simply opening a list view! # /!\ Using unprileged user to detect former side effects of ir.rules! self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records") self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records") # Deleting an already deleted record should be simply ignored self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op") # Updating an already deleted record should raise, even as admin with self.assertRaises(Exception): self.partner.write(cr, uid, [p1], {'name': 'foo'}) @mute_logger('openerp.models') def testAccessFilteredRecords(self): """ Verify that accessing filtered records works as expected for non-admin user """ cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2 partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0] self.ir_rule.create(cr, uid, {'name': 'Y is invisible', 'domain_force': [('id', '!=', p1)], 'model_id': partner_model}) # search as unprivileged user partners = self.partner.search(cr, uid2, []) self.assertFalse(p1 in partners, "W should not be visible...") self.assertTrue(p2 in partners, "... but Y should be visible") # read as unprivileged user with self.assertRaises(Exception): self.partner.read(cr, uid2, [p1], ['name']) # write as unprivileged user with self.assertRaises(Exception): self.partner.write(cr, uid2, [p1], {'name': 'foo'}) # unlink as unprivileged user with self.assertRaises(Exception): self.partner.unlink(cr, uid2, [p1]) # Prepare mixed case self.partner.unlink(cr, uid, [p2]) # read mixed records: some deleted and some filtered with self.assertRaises(Exception): self.partner.read(cr, uid2, [p1,p2], ['name']) # delete mixed records: some deleted and some filtered with self.assertRaises(Exception): self.partner.unlink(cr, uid2, [p1,p2]) def test_multi_read(self): record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) records = self.partner.read(self.cr, UID, [record_id]) self.assertIsInstance(records, list) def test_one_read(self): record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) record = self.partner.read(self.cr, UID, record_id) self.assertIsInstance(record, dict) @mute_logger('openerp.models') def test_search_read(self): # simple search_read self.partner.create(self.cr, UID, {'name': 'MyPartner1'}) found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name']) self.assertEqual(len(found), 1) self.assertEqual(found[0]['name'], 'MyPartner1') self.assertTrue('id' in found[0]) # search_read correct order self.partner.create(self.cr, UID, {'name': 'MyPartner2'}) found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name") self.assertEqual(len(found), 2) self.assertEqual(found[0]['name'], 'MyPartner1') self.assertEqual(found[1]['name'], 'MyPartner2') found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc") self.assertEqual(len(found), 2) self.assertEqual(found[0]['name'], 'MyPartner2') self.assertEqual(found[1]['name'], 'MyPartner1') # search_read that finds nothing found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name']) self.assertEqual(len(found), 0) def test_exists(self): partner = self.partner.browse(self.cr, UID, []) # check that records obtained from search exist recs = partner.search([]) self.assertTrue(recs) self.assertEqual(recs.exists(), recs) # check that there is no record with id 0 recs = partner.browse([0]) self.assertFalse(recs.exists()) def test_groupby_date(self): partners = dict( A='2012-11-19', B='2012-12-17', C='2012-12-31', D='2013-01-07', E='2013-01-14', F='2013-01-28', G='2013-02-11', ) all_partners = [] partners_by_day = defaultdict(set) partners_by_month = defaultdict(set) partners_by_year = defaultdict(set) for name, date in partners.items(): p = self.partner.create(self.cr, UID, dict(name=name, date=date)) all_partners.append(p) partners_by_day[date].add(p) partners_by_month[date.rsplit('-', 1)[0]].add(p) partners_by_year[date.split('-', 1)[0]].add(p) def read_group(interval, domain=None): main_domain = [('id', 'in', all_partners)] if domain: domain = ['&'] + main_domain + domain else: domain = main_domain rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval) result = {} for r in rg: result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain'])) return result self.assertEqual(len(read_group('day')), len(partners_by_day)) self.assertEqual(len(read_group('month')), len(partners_by_month)) self.assertEqual(len(read_group('year')), len(partners_by_year)) rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)], ['date'], ['date:month', 'date:day'], lazy=False) self.assertEqual(len(rg), len(all_partners)) def test_write_duplicate(self): cr, uid, p1 = self.cr, self.uid, self.p1 self.partner.write(cr, uid, [p1, p1], {'name': 'X'}) def test_m2m_store_trigger(self): group_user = self.env.ref('base.group_user') user = self.env['res.users'].create({ 'name': 'test', 'login': 'test_m2m_store_trigger', 'groups_id': [(6, 0, [])], }) self.assertTrue(user.share) group_user.write({'users': [(4, user.id)]}) self.assertFalse(user.share) group_user.write({'users': [(3, user.id)]}) self.assertTrue(user.share) class TestInherits(common.TransactionCase): """ test the behavior of the orm for models that use _inherits; specifically: res.users, that inherits from res.partner """ def setUp(self): super(TestInherits, self).setUp() self.partner = self.registry('res.partner') self.user = self.registry('res.users') def test_default(self): """ `default_get` cannot return a dictionary or a new id """ defaults = self.user.default_get(self.cr, UID, ['partner_id']) if 'partner_id' in defaults: self.assertIsInstance(defaults['partner_id'], (bool, int, long)) def test_create(self): """ creating a user should automatically create a new partner """ partners_before = self.partner.search(self.cr, UID, []) foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) foo = self.user.browse(self.cr, UID, foo_id) self.assertNotIn(foo.partner_id.id, partners_before) def test_create_with_ancestor(self): """ creating a user with a specific 'partner_id' should not create a new partner """ par_id = self.partner.create(self.cr, UID, {'name': 'Foo'}) partners_before = self.partner.search(self.cr, UID, []) foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'}) partners_after = self.partner.search(self.cr, UID, []) self.assertEqual(set(partners_before), set(partners_after)) foo = self.user.browse(self.cr, UID, foo_id) self.assertEqual(foo.name, 'Foo') self.assertEqual(foo.partner_id.id, par_id) @mute_logger('openerp.models') def test_read(self): """ inherited fields should be read without any indirection """ foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'}) foo_values, = self.user.read(self.cr, UID, [foo_id]) partner_id = foo_values['partner_id'][0] partner_values, = self.partner.read(self.cr, UID, [partner_id]) self.assertEqual(foo_values['name'], partner_values['name']) foo = self.user.browse(self.cr, UID, foo_id) self.assertEqual(foo.name, foo.partner_id.name) @mute_logger('openerp.models') def test_copy(self): """ copying a user should automatically copy its partner, too """ foo_id = self.user.create(self.cr, UID, { 'name': 'Foo', 'login': 'foo', 'password': 'foo', 'supplier': True, }) foo_before, = self.user.read(self.cr, UID, [foo_id]) del foo_before['__last_update'] bar_id = self.user.copy(self.cr, UID, foo_id, { 'login': 'bar', 'password': 'bar', }) foo_after, = self.user.read(self.cr, UID, [foo_id]) del foo_after['__last_update'] self.assertEqual(foo_before, foo_after) foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) self.assertEqual(bar.name, 'Foo (copy)') self.assertEqual(bar.login, 'bar') self.assertEqual(foo.supplier, bar.supplier) self.assertNotEqual(foo.id, bar.id) self.assertNotEqual(foo.partner_id.id, bar.partner_id.id) @mute_logger('openerp.models') def test_copy_with_ancestor(self): """ copying a user with 'parent_id' in defaults should not duplicate the partner """ foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo', 'login_date': '2016-01-01', 'signature': 'XXX'}) par_id = self.partner.create(self.cr, UID, {'name': 'Bar'}) foo_before, = self.user.read(self.cr, UID, [foo_id]) del foo_before['__last_update'] del foo_before['login_date'] partners_before = self.partner.search(self.cr, UID, []) bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'}) foo_after, = self.user.read(self.cr, UID, [foo_id]) del foo_after['__last_update'] del foo_after['login_date'] partners_after = self.partner.search(self.cr, UID, []) self.assertEqual(foo_before, foo_after) self.assertEqual(set(partners_before), set(partners_after)) foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id]) self.assertNotEqual(foo.id, bar.id) self.assertEqual(bar.partner_id.id, par_id) self.assertEqual(bar.login, 'bar', "login is given from copy parameters") self.assertFalse(bar.password, "password should not be copied from original record") self.assertEqual(bar.name, 'Bar', "name is given from specific partner") self.assertEqual(bar.signature, foo.signature, "signature should be copied") CREATE = lambda values: (0, False, values) UPDATE = lambda id, values: (1, id, values) DELETE = lambda id: (2, id, False) FORGET = lambda id: (3, id, False) LINK_TO = lambda id: (4, id, False) DELETE_ALL = lambda: (5, False, False) REPLACE_WITH = lambda ids: (6, False, ids) def sorted_by_id(list_of_dicts): "sort dictionaries by their 'id' field; useful for comparisons" return sorted(list_of_dicts, key=lambda d: d.get('id')) class TestO2MSerialization(common.TransactionCase): """ test the orm method 'write' on one2many fields """ def setUp(self): super(TestO2MSerialization, self).setUp() self.partner = self.registry('res.partner') def test_no_command(self): " empty list of commands yields an empty list of records " results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', []) self.assertEqual(results, []) def test_CREATE_commands(self): " returns the VALUES dict as-is " values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', map(CREATE, values)) self.assertEqual(results, values) def test_LINK_TO_command(self): " reads the records from the database, records are returned with their ids. " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = map(LINK_TO, ids) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_bare_ids_command(self): " same as the equivalent LINK_TO commands " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', ids, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_UPDATE_command(self): " take the in-db records and merge the provided information in " id_foo = self.partner.create(self.cr, UID, {'name': 'foo'}) id_bar = self.partner.create(self.cr, UID, {'name': 'bar'}) id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'}) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [ LINK_TO(id_foo), UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}), UPDATE(id_baz, {'name': 'quux'}) ], ['name', 'city']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': id_foo, 'name': 'foo', 'city': False}, {'id': id_bar, 'name': 'qux', 'city': 'tagtag'}, {'id': id_baz, 'name': 'quux', 'city': 'tag'} ])) def test_DELETE_command(self): " deleted records are not returned at all. " ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(results, []) def test_mixed_commands(self): ids = [ self.partner.create(self.cr, UID, {'name': name}) for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply'] ] results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [ CREATE({'name': 'foo'}), UPDATE(ids[0], {'name': 'bar'}), LINK_TO(ids[1]), DELETE(ids[2]), UPDATE(ids[3], {'name': 'quux',}), UPDATE(ids[4], {'name': 'corge'}), CREATE({'name': 'grault'}), LINK_TO(ids[5]) ], ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'name': 'foo'}, {'id': ids[0], 'name': 'bar'}, {'id': ids[1], 'name': 'baz'}, {'id': ids[3], 'name': 'quux'}, {'id': ids[4], 'name': 'corge'}, {'name': 'grault'}, {'id': ids[5], 'name': 'garply'} ])) def test_LINK_TO_pairs(self): "LINK_TO commands can be written as pairs, instead of triplets" ids = [ self.partner.create(self.cr, UID, {'name': 'foo'}), self.partner.create(self.cr, UID, {'name': 'bar'}), self.partner.create(self.cr, UID, {'name': 'baz'}) ] commands = map(lambda id: (4, id), ids) results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', commands, ['name']) self.assertEqual(sorted_by_id(results), sorted_by_id([ {'id': ids[0], 'name': 'foo'}, {'id': ids[1], 'name': 'bar'}, {'id': ids[2], 'name': 'baz'} ])) def test_singleton_commands(self): "DELETE_ALL can appear as a singleton" results = self.partner.resolve_2many_commands( self.cr, UID, 'child_ids', [DELETE_ALL()], ['name']) self.assertEqual(results, [])
vileopratama/vitech
src/openerp/addons/base/tests/test_orm.py
Python
mit
18,738
from __future__ import unicode_literals from django.utils.functional import cached_property from django.contrib.contenttypes.models import ContentType from wagtail.wagtailcore.blocks import ChooserBlock class SnippetChooserBlock(ChooserBlock): def __init__(self, target_model, **kwargs): super(SnippetChooserBlock, self).__init__(**kwargs) self.target_model = target_model @cached_property def widget(self): from wagtail.wagtailsnippets.widgets import AdminSnippetChooser content_type = ContentType.objects.get_for_model(self.target_model) return AdminSnippetChooser(content_type)
iho/wagtail
wagtail/wagtailsnippets/blocks.py
Python
bsd-3-clause
637
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python Docs and examples: http://code.google.com/p/fantasm/ Copyright 2010 VendAsta Technologies Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from fantasm import constants from google.appengine.api.taskqueue.taskqueue import Queue class NoOpQueue( Queue ): """ A Queue instance that does not Queue """ def add(self, task, transactional=False): """ see taskqueue.Queue.add """ pass def knuthHash(number): """A decent hash function for integers.""" return (number * 2654435761) % 2**32 def boolConverter(boolStr): """ A converter that maps some common bool string to True """ return {'1': True, 'True': True, 'true': True}.get(boolStr, False) def outputAction(action): """ Outputs the name of the action @param action: an FSMAction instance """ if action: return str(action.__class__.__name__).split('.')[-1] def outputTransitionConfig(transitionConfig): """ Outputs a GraphViz directed graph node @param transitionConfig: a config._TransitionConfig instance @return: a string """ label = transitionConfig.event if transitionConfig.action: label += '/ ' + outputAction(transitionConfig.action) return '"%(fromState)s" -> "%(toState)s" [label="%(label)s"];' % \ {'fromState': transitionConfig.fromState.name, 'toState': transitionConfig.toState.name, 'label': label} def outputStateConfig(stateConfig, colorMap=None): """ Outputs a GraphViz directed graph node @param stateConfig: a config._StateConfig instance @return: a string """ colorMap = colorMap or {} actions = [] if stateConfig.entry: actions.append('entry/ %(entry)s' % {'entry': outputAction(stateConfig.entry)}) if stateConfig.action: actions.append('do/ %(do)s' % {'do': outputAction(stateConfig.action)}) if stateConfig.exit: actions.append('exit/ %(exit)s' % {'exit': outputAction(stateConfig.exit)}) label = '%(stateName)s|%(actions)s' % {'stateName': stateConfig.name, 'actions': '\\l'.join(actions)} if stateConfig.continuation: label += '|continuation = True' if stateConfig.fanInPeriod != constants.NO_FAN_IN: label += '|fan in period = %(fanin)ds' % {'fanin': stateConfig.fanInPeriod} shape = 'Mrecord' if colorMap.get(stateConfig.name): return '"%(stateName)s" [style=filled,fillcolor="%(fillcolor)s",shape=%(shape)s,label="{%(label)s}"];' % \ {'stateName': stateConfig.name, 'fillcolor': colorMap.get(stateConfig.name, 'white'), 'shape': shape, 'label': label} else: return '"%(stateName)s" [shape=%(shape)s,label="{%(label)s}"];' % \ {'stateName': stateConfig.name, 'shape': shape, 'label': label} def outputMachineConfig(machineConfig, colorMap=None, skipStateNames=None): """ Outputs a GraphViz directed graph of the state machine @param machineConfig: a config._MachineConfig instance @return: a string """ skipStateNames = skipStateNames or () lines = [] lines.append('digraph G {') lines.append('label="%(machineName)s"' % {'machineName': machineConfig.name}) lines.append('labelloc="t"') lines.append('"__start__" [label="start",shape=circle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];') lines.append('"__end__" [label="end",shape=doublecircle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];') for stateConfig in machineConfig.states.values(): if stateConfig.name in skipStateNames: continue lines.append(outputStateConfig(stateConfig, colorMap=colorMap)) if stateConfig.initial: lines.append('"__start__" -> "%(stateName)s"' % {'stateName': stateConfig.name}) if stateConfig.final: lines.append('"%(stateName)s" -> "__end__"' % {'stateName': stateConfig.name}) for transitionConfig in machineConfig.transitions.values(): if transitionConfig.fromState.name in skipStateNames or \ transitionConfig.toState.name in skipStateNames: continue lines.append(outputTransitionConfig(transitionConfig)) lines.append('}') return '\n'.join(lines)
rafasashi/userinfuser
serverside/fantasm/utils.py
Python
gpl-3.0
4,909
"""Views fo the node settings page.""" # -*- coding: utf-8 -*- from flask import request import logging from addons.dropbox.serializer import DropboxSerializer from addons.base import generic_views from website.project.decorators import must_have_addon, must_be_addon_authorizer logger = logging.getLogger(__name__) debug = logger.debug SHORT_NAME = 'dropbox' FULL_NAME = 'Dropbox' dropbox_account_list = generic_views.account_list( SHORT_NAME, DropboxSerializer ) dropbox_import_auth = generic_views.import_auth( SHORT_NAME, DropboxSerializer ) @must_have_addon(SHORT_NAME, 'node') @must_be_addon_authorizer(SHORT_NAME) def dropbox_folder_list(node_addon, **kwargs): """ Returns all the subsequent folders under the folder id passed. """ folder_id = request.args.get('folder_id') return node_addon.get_folders(folder_id=folder_id) dropbox_get_config = generic_views.get_config( SHORT_NAME, DropboxSerializer ) def _set_folder(node_addon, folder, auth): uid = folder['id'] node_addon.set_folder(uid, auth=auth) node_addon.save() dropbox_set_config = generic_views.set_config( SHORT_NAME, FULL_NAME, DropboxSerializer, _set_folder ) dropbox_deauthorize_node = generic_views.deauthorize_node( SHORT_NAME )
icereval/osf.io
addons/dropbox/views.py
Python
apache-2.0
1,287
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # vim: tabstop=4 shiftwidth=4 softtabstop=4 import unittest import inspect import logging from struct import pack, unpack_from, pack_into from nose.tools import ok_, eq_, raises from ryu.ofproto import ether from ryu.ofproto import inet from ryu.lib.packet.ethernet import ethernet from ryu.lib.packet.ipv4 import ipv4 from ryu.lib.packet.packet import Packet from ryu.lib.packet.packet_utils import checksum from ryu.lib import addrconv from ryu.lib.packet.igmp import igmp from ryu.lib.packet.igmp import igmpv3_query from ryu.lib.packet.igmp import igmpv3_report from ryu.lib.packet.igmp import igmpv3_report_group from ryu.lib.packet.igmp import IGMP_TYPE_QUERY from ryu.lib.packet.igmp import IGMP_TYPE_REPORT_V3 from ryu.lib.packet.igmp import MODE_IS_INCLUDE LOG = logging.getLogger(__name__) class Test_igmp(unittest.TestCase): """ Test case for Internet Group Management Protocol """ def setUp(self): self.msgtype = IGMP_TYPE_QUERY self.maxresp = 100 self.csum = 0 self.address = '225.0.0.1' self.buf = pack(igmp._PACK_STR, self.msgtype, self.maxresp, self.csum, addrconv.ipv4.text_to_bin(self.address)) self.g = igmp(self.msgtype, self.maxresp, self.csum, self.address) def tearDown(self): pass def find_protocol(self, pkt, name): for p in pkt.protocols: if p.protocol_name == name: return p def test_init(self): eq_(self.msgtype, self.g.msgtype) eq_(self.maxresp, self.g.maxresp) eq_(self.csum, self.g.csum) eq_(self.address, self.g.address) def test_parser(self): _res = self.g.parser(self.buf) if type(_res) is tuple: res = _res[0] else: res = _res eq_(res.msgtype, self.msgtype) eq_(res.maxresp, self.maxresp) eq_(res.csum, self.csum) eq_(res.address, self.address) def test_serialize(self): data = bytearray() prev = None buf = self.g.serialize(data, prev) res = unpack_from(igmp._PACK_STR, buffer(buf)) eq_(res[0], self.msgtype) eq_(res[1], self.maxresp) eq_(res[2], checksum(self.buf)) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) def _build_igmp(self): dl_dst = '11:22:33:44:55:66' dl_src = 'aa:bb:cc:dd:ee:ff' dl_type = ether.ETH_TYPE_IP e = ethernet(dl_dst, dl_src, dl_type) total_length = 20 + igmp._MIN_LEN nw_proto = inet.IPPROTO_IGMP nw_dst = '11.22.33.44' nw_src = '55.66.77.88' i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst, proto=nw_proto) p = Packet() p.add_protocol(e) p.add_protocol(i) p.add_protocol(self.g) p.serialize() return p def test_build_igmp(self): p = self._build_igmp() e = self.find_protocol(p, "ethernet") ok_(e) eq_(e.ethertype, ether.ETH_TYPE_IP) i = self.find_protocol(p, "ipv4") ok_(i) eq_(i.proto, inet.IPPROTO_IGMP) g = self.find_protocol(p, "igmp") ok_(g) eq_(g.msgtype, self.msgtype) eq_(g.maxresp, self.maxresp) eq_(g.csum, checksum(self.buf)) eq_(g.address, self.address) def test_to_string(self): igmp_values = {'msgtype': repr(self.msgtype), 'maxresp': repr(self.maxresp), 'csum': repr(self.csum), 'address': repr(self.address)} _g_str = ','.join(['%s=%s' % (k, igmp_values[k]) for k, v in inspect.getmembers(self.g) if k in igmp_values]) g_str = '%s(%s)' % (igmp.__name__, _g_str) eq_(str(self.g), g_str) eq_(repr(self.g), g_str) @raises(Exception) def test_malformed_igmp(self): m_short_buf = self.buf[1:igmp._MIN_LEN] igmp.parser(m_short_buf) def test_default_args(self): ig = igmp() buf = ig.serialize(bytearray(), None) res = unpack_from(igmp._PACK_STR, str(buf)) eq_(res[0], 0x11) eq_(res[1], 0) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) def test_json(self): jsondict = self.g.to_jsondict() g = igmp.from_jsondict(jsondict['igmp']) eq_(str(self.g), str(g)) class Test_igmpv3_query(unittest.TestCase): """ Test case for Internet Group Management Protocol v3 Membership Query Message""" def setUp(self): self.msgtype = IGMP_TYPE_QUERY self.maxresp = 100 self.csum = 0 self.address = '225.0.0.1' self.s_flg = 0 self.qrv = 2 self.qqic = 10 self.num = 0 self.srcs = [] self.s_qrv = self.s_flg << 3 | self.qrv self.buf = pack(igmpv3_query._PACK_STR, self.msgtype, self.maxresp, self.csum, addrconv.ipv4.text_to_bin(self.address), self.s_qrv, self.qqic, self.num) self.g = igmpv3_query( self.msgtype, self.maxresp, self.csum, self.address, self.s_flg, self.qrv, self.qqic, self.num, self.srcs) def setUp_with_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) self.buf = pack(igmpv3_query._PACK_STR, self.msgtype, self.maxresp, self.csum, addrconv.ipv4.text_to_bin(self.address), self.s_qrv, self.qqic, self.num) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_query( self.msgtype, self.maxresp, self.csum, self.address, self.s_flg, self.qrv, self.qqic, self.num, self.srcs) def tearDown(self): pass def find_protocol(self, pkt, name): for p in pkt.protocols: if p.protocol_name == name: return p def test_init(self): eq_(self.msgtype, self.g.msgtype) eq_(self.maxresp, self.g.maxresp) eq_(self.csum, self.g.csum) eq_(self.address, self.g.address) eq_(self.s_flg, self.g.s_flg) eq_(self.qrv, self.g.qrv) eq_(self.qqic, self.g.qqic) eq_(self.num, self.g.num) eq_(self.srcs, self.g.srcs) def test_init_with_srcs(self): self.setUp_with_srcs() self.test_init() def test_parser(self): _res = self.g.parser(self.buf) if type(_res) is tuple: res = _res[0] else: res = _res eq_(res.msgtype, self.msgtype) eq_(res.maxresp, self.maxresp) eq_(res.csum, self.csum) eq_(res.address, self.address) eq_(res.s_flg, self.s_flg) eq_(res.qrv, self.qrv) eq_(res.qqic, self.qqic) eq_(res.num, self.num) eq_(res.srcs, self.srcs) def test_parser_with_srcs(self): self.setUp_with_srcs() self.test_parser() def test_serialize(self): data = bytearray() prev = None buf = self.g.serialize(data, prev) res = unpack_from(igmpv3_query._PACK_STR, buffer(buf)) eq_(res[0], self.msgtype) eq_(res[1], self.maxresp) eq_(res[2], checksum(self.buf)) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) eq_(res[4], self.s_qrv) eq_(res[5], self.qqic) eq_(res[6], self.num) def test_serialize_with_srcs(self): self.setUp_with_srcs() data = bytearray() prev = None buf = self.g.serialize(data, prev) res = unpack_from(igmpv3_query._PACK_STR, buffer(buf)) (src1, src2, src3) = unpack_from('4s4s4s', buffer(buf), igmpv3_query._MIN_LEN) eq_(res[0], self.msgtype) eq_(res[1], self.maxresp) eq_(res[2], checksum(self.buf)) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) eq_(res[4], self.s_qrv) eq_(res[5], self.qqic) eq_(res[6], self.num) eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0])) eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1])) eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2])) def _build_igmp(self): dl_dst = '11:22:33:44:55:66' dl_src = 'aa:bb:cc:dd:ee:ff' dl_type = ether.ETH_TYPE_IP e = ethernet(dl_dst, dl_src, dl_type) total_length = len(ipv4()) + len(self.g) nw_proto = inet.IPPROTO_IGMP nw_dst = '11.22.33.44' nw_src = '55.66.77.88' i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst, proto=nw_proto, ttl=1) p = Packet() p.add_protocol(e) p.add_protocol(i) p.add_protocol(self.g) p.serialize() return p def test_build_igmp(self): p = self._build_igmp() e = self.find_protocol(p, "ethernet") ok_(e) eq_(e.ethertype, ether.ETH_TYPE_IP) i = self.find_protocol(p, "ipv4") ok_(i) eq_(i.proto, inet.IPPROTO_IGMP) g = self.find_protocol(p, "igmpv3_query") ok_(g) eq_(g.msgtype, self.msgtype) eq_(g.maxresp, self.maxresp) eq_(g.csum, checksum(self.buf)) eq_(g.address, self.address) eq_(g.s_flg, self.s_flg) eq_(g.qrv, self.qrv) eq_(g.qqic, self.qqic) eq_(g.num, self.num) eq_(g.srcs, self.srcs) def test_build_igmp_with_srcs(self): self.setUp_with_srcs() self.test_build_igmp() def test_to_string(self): igmp_values = {'msgtype': repr(self.msgtype), 'maxresp': repr(self.maxresp), 'csum': repr(self.csum), 'address': repr(self.address), 's_flg': repr(self.s_flg), 'qrv': repr(self.qrv), 'qqic': repr(self.qqic), 'num': repr(self.num), 'srcs': repr(self.srcs)} _g_str = ','.join(['%s=%s' % (k, igmp_values[k]) for k, v in inspect.getmembers(self.g) if k in igmp_values]) g_str = '%s(%s)' % (igmpv3_query.__name__, _g_str) eq_(str(self.g), g_str) eq_(repr(self.g), g_str) def test_to_string_with_srcs(self): self.setUp_with_srcs() self.test_to_string() @raises(Exception) def test_num_larger_than_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) + 1 self.buf = pack(igmpv3_query._PACK_STR, self.msgtype, self.maxresp, self.csum, addrconv.ipv4.text_to_bin(self.address), self.s_qrv, self.qqic, self.num) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_query( self.msgtype, self.maxresp, self.csum, self.address, self.s_flg, self.qrv, self.qqic, self.num, self.srcs) self.test_parser() @raises(Exception) def test_num_smaller_than_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) - 1 self.buf = pack(igmpv3_query._PACK_STR, self.msgtype, self.maxresp, self.csum, addrconv.ipv4.text_to_bin(self.address), self.s_qrv, self.qqic, self.num) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_query( self.msgtype, self.maxresp, self.csum, self.address, self.s_flg, self.qrv, self.qqic, self.num, self.srcs) self.test_parser() def test_default_args(self): prev = ipv4(proto=inet.IPPROTO_IGMP) g = igmpv3_query() prev.serialize(g, None) buf = g.serialize(bytearray(), prev) res = unpack_from(igmpv3_query._PACK_STR, str(buf)) buf = bytearray(buf) pack_into('!H', buf, 2, 0) buf = str(buf) eq_(res[0], IGMP_TYPE_QUERY) eq_(res[1], 100) eq_(res[2], checksum(buf)) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) eq_(res[4], 2) eq_(res[5], 0) eq_(res[6], 0) # srcs without num prev = ipv4(proto=inet.IPPROTO_IGMP) srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] g = igmpv3_query(srcs=srcs) prev.serialize(g, None) buf = g.serialize(bytearray(), prev) res = unpack_from(igmpv3_query._PACK_STR, str(buf)) buf = bytearray(buf) pack_into('!H', buf, 2, 0) buf = str(buf) eq_(res[0], IGMP_TYPE_QUERY) eq_(res[1], 100) eq_(res[2], checksum(buf)) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) eq_(res[4], 2) eq_(res[5], 0) eq_(res[6], len(srcs)) res = unpack_from('4s4s4s', str(buf), igmpv3_query._MIN_LEN) eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0])) eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1])) eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2])) def test_json(self): jsondict = self.g.to_jsondict() g = igmpv3_query.from_jsondict(jsondict['igmpv3_query']) eq_(str(self.g), str(g)) def test_json_with_srcs(self): self.setUp_with_srcs() self.test_json() class Test_igmpv3_report(unittest.TestCase): """ Test case for Internet Group Management Protocol v3 Membership Report Message""" def setUp(self): self.msgtype = IGMP_TYPE_REPORT_V3 self.csum = 0 self.record_num = 0 self.records = [] self.buf = pack(igmpv3_report._PACK_STR, self.msgtype, self.csum, self.record_num) self.g = igmpv3_report( self.msgtype, self.csum, self.record_num, self.records) def setUp_with_records(self): self.record1 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 0, '225.0.0.1') self.record2 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 2, '225.0.0.2', ['172.16.10.10', '172.16.10.27']) self.record3 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00') self.record4 = igmpv3_report_group( MODE_IS_INCLUDE, 2, 2, '225.0.0.4', ['172.16.10.10', '172.16.10.27'], 'abcde\x00\x00\x00') self.records = [self.record1, self.record2, self.record3, self.record4] self.record_num = len(self.records) self.buf = pack(igmpv3_report._PACK_STR, self.msgtype, self.csum, self.record_num) self.buf += self.record1.serialize() self.buf += self.record2.serialize() self.buf += self.record3.serialize() self.buf += self.record4.serialize() self.g = igmpv3_report( self.msgtype, self.csum, self.record_num, self.records) def tearDown(self): pass def find_protocol(self, pkt, name): for p in pkt.protocols: if p.protocol_name == name: return p def test_init(self): eq_(self.msgtype, self.g.msgtype) eq_(self.csum, self.g.csum) eq_(self.record_num, self.g.record_num) eq_(self.records, self.g.records) def test_init_with_records(self): self.setUp_with_records() self.test_init() def test_parser(self): _res = self.g.parser(str(self.buf)) if type(_res) is tuple: res = _res[0] else: res = _res eq_(res.msgtype, self.msgtype) eq_(res.csum, self.csum) eq_(res.record_num, self.record_num) eq_(repr(res.records), repr(self.records)) def test_parser_with_records(self): self.setUp_with_records() self.test_parser() def test_serialize(self): data = bytearray() prev = None buf = self.g.serialize(data, prev) res = unpack_from(igmpv3_report._PACK_STR, buffer(buf)) eq_(res[0], self.msgtype) eq_(res[1], checksum(self.buf)) eq_(res[2], self.record_num) def test_serialize_with_records(self): self.setUp_with_records() data = bytearray() prev = None buf = self.g.serialize(data, prev) res = unpack_from(igmpv3_report._PACK_STR, buffer(buf)) offset = igmpv3_report._MIN_LEN rec1 = igmpv3_report_group.parser(buffer(buf[offset:])) offset += len(rec1) rec2 = igmpv3_report_group.parser(buffer(buf[offset:])) offset += len(rec2) rec3 = igmpv3_report_group.parser(buffer(buf[offset:])) offset += len(rec3) rec4 = igmpv3_report_group.parser(buffer(buf[offset:])) eq_(res[0], self.msgtype) eq_(res[1], checksum(self.buf)) eq_(res[2], self.record_num) eq_(repr(rec1), repr(self.record1)) eq_(repr(rec2), repr(self.record2)) eq_(repr(rec3), repr(self.record3)) eq_(repr(rec4), repr(self.record4)) def _build_igmp(self): dl_dst = '11:22:33:44:55:66' dl_src = 'aa:bb:cc:dd:ee:ff' dl_type = ether.ETH_TYPE_IP e = ethernet(dl_dst, dl_src, dl_type) total_length = len(ipv4()) + len(self.g) nw_proto = inet.IPPROTO_IGMP nw_dst = '11.22.33.44' nw_src = '55.66.77.88' i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst, proto=nw_proto, ttl=1) p = Packet() p.add_protocol(e) p.add_protocol(i) p.add_protocol(self.g) p.serialize() return p def test_build_igmp(self): p = self._build_igmp() e = self.find_protocol(p, "ethernet") ok_(e) eq_(e.ethertype, ether.ETH_TYPE_IP) i = self.find_protocol(p, "ipv4") ok_(i) eq_(i.proto, inet.IPPROTO_IGMP) g = self.find_protocol(p, "igmpv3_report") ok_(g) eq_(g.msgtype, self.msgtype) eq_(g.csum, checksum(self.buf)) eq_(g.record_num, self.record_num) eq_(g.records, self.records) def test_build_igmp_with_records(self): self.setUp_with_records() self.test_build_igmp() def test_to_string(self): igmp_values = {'msgtype': repr(self.msgtype), 'csum': repr(self.csum), 'record_num': repr(self.record_num), 'records': repr(self.records)} _g_str = ','.join(['%s=%s' % (k, igmp_values[k]) for k, v in inspect.getmembers(self.g) if k in igmp_values]) g_str = '%s(%s)' % (igmpv3_report.__name__, _g_str) eq_(str(self.g), g_str) eq_(repr(self.g), g_str) def test_to_string_with_records(self): self.setUp_with_records() self.test_to_string() @raises(Exception) def test_record_num_larger_than_records(self): self.record1 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 0, '225.0.0.1') self.record2 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 2, '225.0.0.2', ['172.16.10.10', '172.16.10.27']) self.record3 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00') self.record4 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 2, '225.0.0.4', ['172.16.10.10', '172.16.10.27'], 'abc\x00') self.records = [self.record1, self.record2, self.record3, self.record4] self.record_num = len(self.records) + 1 self.buf = pack(igmpv3_report._PACK_STR, self.msgtype, self.csum, self.record_num) self.buf += self.record1.serialize() self.buf += self.record2.serialize() self.buf += self.record3.serialize() self.buf += self.record4.serialize() self.g = igmpv3_report( self.msgtype, self.csum, self.record_num, self.records) self.test_parser() @raises(Exception) def test_record_num_smaller_than_records(self): self.record1 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 0, '225.0.0.1') self.record2 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 2, '225.0.0.2', ['172.16.10.10', '172.16.10.27']) self.record3 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00') self.record4 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 2, '225.0.0.4', ['172.16.10.10', '172.16.10.27'], 'abc\x00') self.records = [self.record1, self.record2, self.record3, self.record4] self.record_num = len(self.records) - 1 self.buf = pack(igmpv3_report._PACK_STR, self.msgtype, self.csum, self.record_num) self.buf += self.record1.serialize() self.buf += self.record2.serialize() self.buf += self.record3.serialize() self.buf += self.record4.serialize() self.g = igmpv3_report( self.msgtype, self.csum, self.record_num, self.records) self.test_parser() def test_default_args(self): prev = ipv4(proto=inet.IPPROTO_IGMP) g = igmpv3_report() prev.serialize(g, None) buf = g.serialize(bytearray(), prev) res = unpack_from(igmpv3_report._PACK_STR, str(buf)) buf = bytearray(buf) pack_into('!H', buf, 2, 0) buf = str(buf) eq_(res[0], IGMP_TYPE_REPORT_V3) eq_(res[1], checksum(buf)) eq_(res[2], 0) # records without record_num prev = ipv4(proto=inet.IPPROTO_IGMP) record1 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 0, '225.0.0.1') record2 = igmpv3_report_group( MODE_IS_INCLUDE, 0, 2, '225.0.0.2', ['172.16.10.10', '172.16.10.27']) record3 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], 'abc\x00') record4 = igmpv3_report_group( MODE_IS_INCLUDE, 1, 2, '225.0.0.4', ['172.16.10.10', '172.16.10.27'], 'abc\x00') records = [record1, record2, record3, record4] g = igmpv3_report(records=records) prev.serialize(g, None) buf = g.serialize(bytearray(), prev) res = unpack_from(igmpv3_report._PACK_STR, str(buf)) buf = bytearray(buf) pack_into('!H', buf, 2, 0) buf = str(buf) eq_(res[0], IGMP_TYPE_REPORT_V3) eq_(res[1], checksum(buf)) eq_(res[2], len(records)) def test_json(self): jsondict = self.g.to_jsondict() g = igmpv3_report.from_jsondict(jsondict['igmpv3_report']) eq_(str(self.g), str(g)) def test_json_with_records(self): self.setUp_with_records() self.test_json() class Test_igmpv3_report_group(unittest.TestCase): """Test case for Group Records of Internet Group Management Protocol v3 Membership Report Message""" def setUp(self): self.type_ = MODE_IS_INCLUDE self.aux_len = 0 self.num = 0 self.address = '225.0.0.1' self.srcs = [] self.aux = None self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) def setUp_with_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) def setUp_with_aux(self): self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00' self.aux_len = len(self.aux) / 4 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) self.buf += self.aux self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) def setUp_with_srcs_and_aux(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00' self.aux_len = len(self.aux) / 4 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.buf += self.aux self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) def tearDown(self): pass def test_init(self): eq_(self.type_, self.g.type_) eq_(self.aux_len, self.g.aux_len) eq_(self.num, self.g.num) eq_(self.address, self.g.address) eq_(self.srcs, self.g.srcs) eq_(self.aux, self.g.aux) def test_init_with_srcs(self): self.setUp_with_srcs() self.test_init() def test_init_with_aux(self): self.setUp_with_aux() self.test_init() def test_init_with_srcs_and_aux(self): self.setUp_with_srcs_and_aux() self.test_init() def test_parser(self): _res = self.g.parser(self.buf) if type(_res) is tuple: res = _res[0] else: res = _res eq_(res.type_, self.type_) eq_(res.aux_len, self.aux_len) eq_(res.num, self.num) eq_(res.address, self.address) eq_(res.srcs, self.srcs) eq_(res.aux, self.aux) def test_parser_with_srcs(self): self.setUp_with_srcs() self.test_parser() def test_parser_with_aux(self): self.setUp_with_aux() self.test_parser() def test_parser_with_srcs_and_aux(self): self.setUp_with_srcs_and_aux() self.test_parser() def test_serialize(self): buf = self.g.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf)) eq_(res[0], self.type_) eq_(res[1], self.aux_len) eq_(res[2], self.num) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) def test_serialize_with_srcs(self): self.setUp_with_srcs() buf = self.g.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf)) (src1, src2, src3) = unpack_from('4s4s4s', buffer(buf), igmpv3_report_group._MIN_LEN) eq_(res[0], self.type_) eq_(res[1], self.aux_len) eq_(res[2], self.num) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0])) eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1])) eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2])) def test_serialize_with_aux(self): self.setUp_with_aux() buf = self.g.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf)) (aux, ) = unpack_from('%ds' % (self.aux_len * 4), buffer(buf), igmpv3_report_group._MIN_LEN) eq_(res[0], self.type_) eq_(res[1], self.aux_len) eq_(res[2], self.num) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) eq_(aux, self.aux) def test_serialize_with_srcs_and_aux(self): self.setUp_with_srcs_and_aux() buf = self.g.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, buffer(buf)) (src1, src2, src3) = unpack_from('4s4s4s', buffer(buf), igmpv3_report_group._MIN_LEN) (aux, ) = unpack_from('%ds' % (self.aux_len * 4), buffer(buf), igmpv3_report_group._MIN_LEN + 12) eq_(res[0], self.type_) eq_(res[1], self.aux_len) eq_(res[2], self.num) eq_(res[3], addrconv.ipv4.text_to_bin(self.address)) eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0])) eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1])) eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2])) eq_(aux, self.aux) def test_to_string(self): igmp_values = {'type_': repr(self.type_), 'aux_len': repr(self.aux_len), 'num': repr(self.num), 'address': repr(self.address), 'srcs': repr(self.srcs), 'aux': repr(self.aux)} _g_str = ','.join(['%s=%s' % (k, igmp_values[k]) for k, v in inspect.getmembers(self.g) if k in igmp_values]) g_str = '%s(%s)' % (igmpv3_report_group.__name__, _g_str) eq_(str(self.g), g_str) eq_(repr(self.g), g_str) def test_to_string_with_srcs(self): self.setUp_with_srcs() self.test_to_string() def test_to_string_with_aux(self): self.setUp_with_aux() self.test_to_string() def test_to_string_with_srcs_and_aux(self): self.setUp_with_srcs_and_aux() self.test_to_string() def test_len(self): eq_(len(self.g), 8) def test_len_with_srcs(self): self.setUp_with_srcs() eq_(len(self.g), 20) def test_len_with_aux(self): self.setUp_with_aux() eq_(len(self.g), 16) def test_len_with_srcs_and_aux(self): self.setUp_with_srcs_and_aux() eq_(len(self.g), 28) @raises def test_num_larger_than_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) + 1 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) self.test_parser() @raises def test_num_smaller_than_srcs(self): self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] self.num = len(self.srcs) - 1 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) for src in self.srcs: self.buf += pack('4s', addrconv.ipv4.text_to_bin(src)) self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) self.test_parser() @raises def test_aux_len_larger_than_aux(self): self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00' self.aux_len = len(self.aux) / 4 + 1 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) self.buf += self.aux self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) self.test_parser() @raises def test_aux_len_smaller_than_aux(self): self.aux = '\x01\x02\x03\x04\x05\x00\x00\x00' self.aux_len = len(self.aux) / 4 - 1 self.buf = pack(igmpv3_report_group._PACK_STR, self.type_, self.aux_len, self.num, addrconv.ipv4.text_to_bin(self.address)) self.buf += self.aux self.g = igmpv3_report_group( self.type_, self.aux_len, self.num, self.address, self.srcs, self.aux) self.test_parser() def test_default_args(self): rep = igmpv3_report_group() buf = rep.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, str(buf)) eq_(res[0], 0) eq_(res[1], 0) eq_(res[2], 0) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) # srcs without num srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] rep = igmpv3_report_group(srcs=srcs) buf = rep.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, str(buf)) eq_(res[0], 0) eq_(res[1], 0) eq_(res[2], len(srcs)) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) res = unpack_from('4s4s4s', str(buf), igmpv3_report_group._MIN_LEN) eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0])) eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1])) eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2])) # aux without aux_len aux = 'abcde' rep = igmpv3_report_group(aux=aux) buf = rep.serialize() res = unpack_from(igmpv3_report_group._PACK_STR, str(buf)) eq_(res[0], 0) eq_(res[1], 2) eq_(res[2], 0) eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0')) eq_(buf[igmpv3_report_group._MIN_LEN:], 'abcde\x00\x00\x00')
jalilm/ryu
ryu/tests/unit/packet/test_igmp.py
Python
apache-2.0
34,411
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- ## This file defines parameters for a prediction experiment. ############################################################################### # IMPORTANT!!! # This params file is dynamically generated by the RunExperimentPermutations # script. Any changes made manually will be over-written the next time # RunExperimentPermutations is run!!! ############################################################################### from nupic.frameworks.opf.exp_description_helpers import importBaseDescription # the sub-experiment configuration config ={ 'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}}, 'firstRecord': 0, 'lastRecord': 500, } mod = importBaseDescription('../base.py', config) locals().update(mod.__dict__)
ywcui1990/nupic
tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a_plus_b/description.py
Python
agpl-3.0
2,100
import re from itertools import islice #from geopy import util, units, format import util, units, format class Point(object): """ A geodetic point with latitude, longitude, and altitude. Latitude and longitude are floating point values in degrees. Altitude is a floating point value in kilometers. The reference level is never considered and is thus application dependent, so be consistent! The default for all values is 0. Points can be created in a number of ways... With longitude, latitude, and altitude: >>> p1 = Point(41.5, -81, 0) >>> p2 = Point(latitude=41.5, longitude=-81) With a sequence of 0 to 3 values (longitude, latitude, altitude): >>> p1 = Point([41.5, -81, 0]) >>> p2 = Point((41.5, -81)) Copy another `Point` instance: >>> p2 = Point(p1) >>> p2 == p1 True >>> p2 is p1 False Give an object with a 'point' attribute, such as a `Location` instance: >>> p = Point(location) Give a string containing at least latitude and longitude: >>> p1 = Point('41.5,-81.0') >>> p2 = Point('41.5 N -81.0 W') >>> p3 = Point('-41.5 S, 81.0 E, 2.5km') >>> p4 = Point('23 26m 22s N 23 27m 30s E 21.0mi') >>> p5 = Point('''3 26' 22" N 23 27' 30" E''') Point values can be accessed by name or by index: >>> p = Point(41.5, -81.0, 0) >>> p.latitude == p[0] True >>> p.longitude == p[1] True >>> p.altitude == p[2] True When unpacking (or iterating), a (latitude, longitude, altitude) tuple is returned >>> latitude, longitude, altitude = p """ UTIL_PATTERNS = dict( FLOAT=r'\d+(?:\.\d+)?', DEGREE=format.DEGREE, PRIME=format.PRIME, DOUBLE_PRIME=format.DOUBLE_PRIME, SEP=r'\s*[,;\s]\s*' ) POINT_PATTERN = re.compile(r""" \s* (?P<latitude> (?P<latitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s ][ ]* (?:(?P<latitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)? (?:(?P<latitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)? )?(?P<latitude_direction>[NS])?) %(SEP)s (?P<longitude> (?P<longitude_degrees>-?%(FLOAT)s)(?:[%(DEGREE)s\s][ ]* (?:(?P<longitude_arcminutes>%(FLOAT)s)[%(PRIME)s'm][ ]*)? (?:(?P<longitude_arcseconds>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)? )?(?P<longitude_direction>[EW])?)(?: %(SEP)s (?P<altitude> (?P<altitude_distance>-?%(FLOAT)s)[ ]* (?P<altitude_units>km|m|mi|ft|nm|nmi)))? \s*$ """ % UTIL_PATTERNS, re.X) def __new__(cls, latitude=None, longitude=None, altitude=None): single_arg = longitude is None and altitude is None if single_arg and not isinstance(latitude, util.NUMBER_TYPES): arg = latitude if arg is None: pass elif isinstance(arg, Point): return cls.from_point(arg) elif isinstance(arg, basestring): return cls.from_string(arg) else: try: seq = iter(arg) except TypeError: raise TypeError( "Failed to create Point instance from %r." % (arg,) ) else: return cls.from_sequence(seq) latitude = float(latitude or 0) if abs(latitude) > 90: latitude = ((latitude + 90) % 180) - 90 longitude = float(longitude or 0) if abs(longitude) > 180: longitude = ((longitude + 180) % 360) - 180 altitude = float(altitude or 0) self = super(Point, cls).__new__(cls) self.latitude = latitude self.longitude = longitude self.altitude = altitude return self def __getitem__(self, index): return (self.latitude, self.longitude, self.altitude)[index] def __setitem__(self, index, value): point = [self.latitude, self.longitude, self.altitude] point[index] = value self.latitude, self.longitude, self.altitude = point def __iter__(self): return iter((self.latitude, self.longitude, self.altitude)) def __repr__(self): return "Point(%r, %r, %r)" % ( self.latitude, self.longitude, self.altitude ) def format(self, altitude=None, deg_char='', min_char='m', sec_char='s'): latitude = "%s %s" % ( format.format_degrees(abs(self.latitude), symbols = {'deg': deg_char, 'arcmin': min_char, 'arcsec': sec_char}), self.latitude >= 0 and 'N' or 'S' ) longitude = "%s %s" % ( format.format_degrees(abs(self.longitude), symbols = {'deg': deg_char, 'arcmin': min_char, 'arcsec': sec_char}), self.longitude >= 0 and 'E' or 'W' ) coordinates = [latitude, longitude] if altitude is None: altitude = bool(self.altitude) if altitude: if not isinstance(altitude, basestring): altitude = 'km' coordinates.append(self.format_altitude(altitude)) return ", ".join(coordinates) def format_decimal(self, altitude=None): latitude = "%s" % self.latitude longitude = "%s" % self.longitude coordinates = [latitude, longitude] if altitude is None: altitude = bool(self.altitude) if altitude: if not isinstance(altitude, basestring): altitude = 'km' coordinates.append(self.format_altitude(altitude)) return ", ".join(coordinates) def format_altitude(self, unit='km'): return format.distance(self.altitude, unit) def __str__(self): return self.format() def __unicode__(self): return self.format( None, format.DEGREE, format.PRIME, format.DOUBLE_PRIME ) def __eq__(self, other): return tuple(self) == tuple(other) def __ne__(self, other): return tuple(self) != tuple(other) @classmethod def parse_degrees(cls, degrees, arcminutes, arcseconds, direction=None): negative = degrees < 0 or degrees.startswith('-') degrees = float(degrees or 0) arcminutes = float(arcminutes or 0) arcseconds = float(arcseconds or 0) if arcminutes or arcseconds: more = units.degrees(arcminutes=arcminutes, arcseconds=arcseconds) if negative: degrees -= more else: degrees += more if direction in [None, 'N', 'E']: return degrees elif direction in ['S', 'W']: return -degrees else: raise ValueError("Invalid direction! Should be one of [NSEW].") @classmethod def parse_altitude(cls, distance, unit): if distance is not None: distance = float(distance) CONVERTERS = { 'km': lambda d: d, 'm': lambda d: units.kilometers(meters=d), 'mi': lambda d: units.kilometers(miles=d), 'ft': lambda d: units.kilometers(feet=d), 'nm': lambda d: units.kilometers(nautical=d), 'nmi': lambda d: units.kilometers(nautical=d) } return CONVERTERS[unit](distance) else: return distance @classmethod def from_string(cls, string): """ Create and return a Point instance from a string containing latitude and longitude, and optionally, altitude. Latitude and longitude must be in degrees and may be in decimal form or indicate arcminutes and arcseconds (labeled with Unicode prime and double prime, ASCII quote and double quote or 'm' and 's'). The degree symbol is optional and may be included after the decimal places (in decimal form) and before the arcminutes and arcseconds otherwise. Coordinates given from south and west (indicated by S and W suffixes) will be converted to north and east by switching their signs. If no (or partial) cardinal directions are given, north and east are the assumed directions. Latitude and longitude must be separated by at least whitespace, a comma, or a semicolon (each with optional surrounding whitespace). Altitude, if supplied, must be a decimal number with given units. The following unit abbrevations (case-insensitive) are supported: km (kilometers) m (meters) mi (miles) ft (feet) nm, nmi (nautical miles) Some example strings the will work include: 41.5;-81.0 41.5,-81.0 41.5 -81.0 41.5 N -81.0 W -41.5 S;81.0 E 23 26m 22s N 23 27m 30s E 23 26' 22" N 23 27' 30" E """ match = re.match(cls.POINT_PATTERN, string) if match: latitude = cls.parse_degrees( match.group('latitude_degrees'), match.group('latitude_arcminutes'), match.group('latitude_arcseconds'), match.group('latitude_direction') ) longitude = cls.parse_degrees( match.group('longitude_degrees'), match.group('longitude_arcminutes'), match.group('longitude_arcseconds'), match.group('longitude_direction'), ) altitude = cls.parse_altitude( match.group('altitude_distance'), match.group('altitude_units') ) return cls(latitude, longitude, altitude) else: raise ValueError( "Failed to create Point instance from string: unknown format." ) @classmethod def from_sequence(cls, seq): """ Create and return a new Point instance from any iterable with 0 to 3 elements. The elements, if present, must be latitude, longitude, and altitude, respectively. """ args = tuple(islice(seq, 4)) return cls(*args) @classmethod def from_point(cls, point): """ Create and return a new Point instance from another Point instance. """ return cls(point.latitude, point.longitude, point.altitude)
nck0405/ChennaiEden
modules/geopy/point.py
Python
mit
10,655
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: elasticache_subnet_group version_added: "2.0" short_description: manage Elasticache subnet groups description: - Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5. options: state: description: - Specifies whether the subnet should be present or absent. required: true default: present choices: [ 'present' , 'absent' ] name: description: - Database subnet group identifier. required: true description: description: - Elasticache subnet group description. Only set when a new group is added. required: false default: null subnets: description: - List of subnet IDs that make up the Elasticache subnet group. required: false default: null author: "Tim Mahoney (@timmahoney)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Add or change a subnet group - elasticache_subnet_group: state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group subnets: - subnet-aaaaaaaa - subnet-bbbbbbbb # Remove a subnet group - elasticache_subnet_group: state: absent name: norwegian-blue ''' try: import boto from boto.elasticache.layer1 import ElastiCacheConnection from boto.regioninfo import RegionInfo from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state = dict(required=True, choices=['present', 'absent']), name = dict(required=True), description = dict(required=False), subnets = dict(required=False, type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or {} if state == 'present': for required in ['name', 'description', 'subnets']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'subnets']: if module.params.get(not_allowed): module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) """Get an elasticache connection""" try: endpoint = "elasticache.%s.amazonaws.com" % region connect_region = RegionInfo(name=region, endpoint=endpoint) conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=e.message) try: changed = False exists = False try: matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except BotoServerError as e: if e.error_code != 'CacheSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) if state == 'absent': if exists: conn.delete_cache_subnet_group(group_name) changed = True else: if not exists: new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True else: changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True except BotoServerError as e: if e.error_message != 'No modifications were requested.': module.fail_json(msg = e.error_message) else: changed = False module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
j00bar/ansible
lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
Python
gpl-3.0
5,426
"""i18n_subsites plugin creates i18n-ized subsites of the default site This plugin is designed for Pelican 3.4 and later """ import os import six import logging import posixpath from copy import copy from itertools import chain from operator import attrgetter from collections import OrderedDict from contextlib import contextmanager from six.moves.urllib.parse import urlparse import gettext import locale from pelican import signals from pelican.generators import ArticlesGenerator, PagesGenerator from pelican.settings import configure_settings from pelican.contents import Draft # Global vars _MAIN_SETTINGS = None # settings dict of the main Pelican instance _MAIN_LANG = None # lang of the main Pelican instance _MAIN_SITEURL = None # siteurl of the main Pelican instance _MAIN_STATIC_FILES = None # list of Static instances the main Pelican instance _SUBSITE_QUEUE = {} # map: lang -> settings overrides _SITE_DB = OrderedDict() # OrderedDict: lang -> siteurl _SITES_RELPATH_DB = {} # map: (lang, base_lang) -> relpath # map: generator -> list of removed contents that need interlinking _GENERATOR_DB = {} _NATIVE_CONTENT_URL_DB = {} # map: source_path -> content in its native lang _LOGGER = logging.getLogger(__name__) @contextmanager def temporary_locale(temp_locale=None): '''Enable code to run in a context with a temporary locale Resets the locale back when exiting context. Can set a temporary locale if provided ''' orig_locale = locale.setlocale(locale.LC_ALL) if temp_locale is not None: locale.setlocale(locale.LC_ALL, temp_locale) yield locale.setlocale(locale.LC_ALL, orig_locale) def initialize_dbs(settings): '''Initialize internal DBs using the Pelican settings dict This clears the DBs for e.g. autoreload mode to work ''' global _MAIN_SETTINGS, _MAIN_SITEURL, _MAIN_LANG, _SUBSITE_QUEUE _MAIN_SETTINGS = settings _MAIN_LANG = settings['DEFAULT_LANG'] _MAIN_SITEURL = settings['SITEURL'] _SUBSITE_QUEUE = settings.get('I18N_SUBSITES', {}).copy() prepare_site_db_and_overrides() # clear databases in case of autoreload mode _SITES_RELPATH_DB.clear() _NATIVE_CONTENT_URL_DB.clear() _GENERATOR_DB.clear() def prepare_site_db_and_overrides(): '''Prepare overrides and create _SITE_DB _SITE_DB.keys() need to be ready for filter_translations ''' _SITE_DB.clear() _SITE_DB[_MAIN_LANG] = _MAIN_SITEURL # make sure it works for both root-relative and absolute main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL for lang, overrides in _SUBSITE_QUEUE.items(): if 'SITEURL' not in overrides: overrides['SITEURL'] = posixpath.join(main_siteurl, lang) _SITE_DB[lang] = overrides['SITEURL'] # default subsite hierarchy if 'OUTPUT_PATH' not in overrides: overrides['OUTPUT_PATH'] = os.path.join( _MAIN_SETTINGS['OUTPUT_PATH'], lang) if 'CACHE_PATH' not in overrides: overrides['CACHE_PATH'] = os.path.join( _MAIN_SETTINGS['CACHE_PATH'], lang) if 'STATIC_PATHS' not in overrides: overrides['STATIC_PATHS'] = [] if ('THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and 'THEME_STATIC_PATHS' not in overrides): relpath = relpath_to_site(lang, _MAIN_LANG) overrides['THEME_STATIC_DIR'] = posixpath.join( relpath, _MAIN_SETTINGS['THEME_STATIC_DIR']) overrides['THEME_STATIC_PATHS'] = [] # to change what is perceived as translations overrides['DEFAULT_LANG'] = lang def subscribe_filter_to_signals(settings): '''Subscribe content filter to requested signals''' for sig in settings.get('I18N_FILTER_SIGNALS', []): sig.connect(filter_contents_translations) def initialize_plugin(pelican_obj): '''Initialize plugin variables and Pelican settings''' if _MAIN_SETTINGS is None: initialize_dbs(pelican_obj.settings) subscribe_filter_to_signals(pelican_obj.settings) def get_site_path(url): '''Get the path component of an url, excludes siteurl also normalizes '' to '/' for relpath to work, otherwise it could be interpreted as a relative filesystem path ''' path = urlparse(url).path if path == '': path = '/' return path def relpath_to_site(lang, target_lang): '''Get relative path from siteurl of lang to siteurl of base_lang the output is cached in _SITES_RELPATH_DB ''' path = _SITES_RELPATH_DB.get((lang, target_lang), None) if path is None: siteurl = _SITE_DB.get(lang, _MAIN_SITEURL) target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL) path = posixpath.relpath(get_site_path(target_siteurl), get_site_path(siteurl)) _SITES_RELPATH_DB[(lang, target_lang)] = path return path def save_generator(generator): '''Save the generator for later use initialize the removed content list ''' _GENERATOR_DB[generator] = [] def article2draft(article): '''Transform an Article to Draft''' draft = Draft(article._content, article.metadata, article.settings, article.source_path, article._context) draft.status = 'draft' return draft def page2hidden_page(page): '''Transform a Page to a hidden Page''' page.status = 'hidden' return page class GeneratorInspector(object): '''Inspector of generator instances''' generators_info = { ArticlesGenerator: { 'translations_lists': ['translations', 'drafts_translations'], 'contents_lists': [('articles', 'drafts')], 'hiding_func': article2draft, 'policy': 'I18N_UNTRANSLATED_ARTICLES', }, PagesGenerator: { 'translations_lists': ['translations', 'hidden_translations'], 'contents_lists': [('pages', 'hidden_pages')], 'hiding_func': page2hidden_page, 'policy': 'I18N_UNTRANSLATED_PAGES', }, } def __init__(self, generator): '''Identify the best known class of the generator instance The class ''' self.generator = generator self.generators_info.update(generator.settings.get( 'I18N_GENERATORS_INFO', {})) for cls in generator.__class__.__mro__: if cls in self.generators_info: self.info = self.generators_info[cls] break else: self.info = {} def translations_lists(self): '''Iterator over lists of content translations''' return (getattr(self.generator, name) for name in self.info.get('translations_lists', [])) def contents_list_pairs(self): '''Iterator over pairs of normal and hidden contents''' return (tuple(getattr(self.generator, name) for name in names) for names in self.info.get('contents_lists', [])) def hiding_function(self): '''Function for transforming content to a hidden version''' hiding_func = self.info.get('hiding_func', lambda x: x) return hiding_func def untranslated_policy(self, default): '''Get the policy for untranslated content''' return self.generator.settings.get(self.info.get('policy', None), default) def all_contents(self): '''Iterator over all contents''' translations_iterator = chain(*self.translations_lists()) return chain(translations_iterator, *(pair[i] for pair in self.contents_list_pairs() for i in (0, 1))) def filter_contents_translations(generator): '''Filter the content and translations lists of a generator Filters out 1) translations which will be generated in a different site 2) content that is not in the language of the currently generated site but in that of a different site, content in a language which has no site is generated always. The filtering method bay be modified by the respective untranslated policy ''' inspector = GeneratorInspector(generator) current_lang = generator.settings['DEFAULT_LANG'] langs_with_sites = _SITE_DB.keys() removed_contents = _GENERATOR_DB[generator] for translations in inspector.translations_lists(): for translation in translations[:]: # copy to be able to remove if translation.lang in langs_with_sites: translations.remove(translation) removed_contents.append(translation) hiding_func = inspector.hiding_function() untrans_policy = inspector.untranslated_policy(default='hide') for (contents, other_contents) in inspector.contents_list_pairs(): for content in other_contents: # save any hidden native content first if content.lang == current_lang: # in native lang # save the native URL attr formatted in the current locale _NATIVE_CONTENT_URL_DB[content.source_path] = content.url for content in contents[:]: # copy for removing in loop if content.lang == current_lang: # in native lang # save the native URL attr formatted in the current locale _NATIVE_CONTENT_URL_DB[content.source_path] = content.url elif content.lang in langs_with_sites and untrans_policy != 'keep': contents.remove(content) if untrans_policy == 'hide': other_contents.append(hiding_func(content)) elif untrans_policy == 'remove': removed_contents.append(content) def install_templates_translations(generator): '''Install gettext translations in the jinja2.Environment Only if the 'jinja2.ext.i18n' jinja2 extension is enabled the translations for the current DEFAULT_LANG are installed. ''' if 'jinja2.ext.i18n' in generator.settings['JINJA_EXTENSIONS']: domain = generator.settings.get('I18N_GETTEXT_DOMAIN', 'messages') localedir = generator.settings.get('I18N_GETTEXT_LOCALEDIR') if localedir is None: localedir = os.path.join(generator.theme, 'translations') current_lang = generator.settings['DEFAULT_LANG'] if current_lang == generator.settings.get('I18N_TEMPLATES_LANG', _MAIN_LANG): translations = gettext.NullTranslations() else: langs = [current_lang] try: translations = gettext.translation(domain, localedir, langs) except (IOError, OSError): _LOGGER.error(( "Cannot find translations for language '{}' in '{}' with " "domain '{}'. Installing NullTranslations.").format( langs[0], localedir, domain)) translations = gettext.NullTranslations() newstyle = generator.settings.get('I18N_GETTEXT_NEWSTYLE', True) generator.env.install_gettext_translations(translations, newstyle) def add_variables_to_context(generator): '''Adds useful iterable variables to template context''' context = generator.context # minimize attr lookup context['relpath_to_site'] = relpath_to_site context['main_siteurl'] = _MAIN_SITEURL context['main_lang'] = _MAIN_LANG context['lang_siteurls'] = _SITE_DB current_lang = generator.settings['DEFAULT_LANG'] extra_siteurls = _SITE_DB.copy() extra_siteurls.pop(current_lang) context['extra_siteurls'] = extra_siteurls def interlink_translations(content): '''Link content to translations in their main language so the URL (including localized month names) of the different subsites will be honored ''' lang = content.lang # sort translations by lang content.translations.sort(key=attrgetter('lang')) for translation in content.translations: relpath = relpath_to_site(lang, translation.lang) url = _NATIVE_CONTENT_URL_DB[translation.source_path] translation.override_url = posixpath.join(relpath, url) def interlink_translated_content(generator): '''Make translations link to the native locations for generators that may contain translated content ''' inspector = GeneratorInspector(generator) for content in inspector.all_contents(): interlink_translations(content) def interlink_removed_content(generator): '''For all contents removed from generation queue update interlinks link to the native location ''' current_lang = generator.settings['DEFAULT_LANG'] for content in _GENERATOR_DB[generator]: url = _NATIVE_CONTENT_URL_DB[content.source_path] relpath = relpath_to_site(current_lang, content.lang) content.override_url = posixpath.join(relpath, url) def interlink_static_files(generator): '''Add links to static files in the main site if necessary''' if generator.settings['STATIC_PATHS'] != []: return # customized STATIC_PATHS filenames = generator.context['filenames'] # minimize attr lookup relpath = relpath_to_site(generator.settings['DEFAULT_LANG'], _MAIN_LANG) for staticfile in _MAIN_STATIC_FILES: if staticfile.get_relative_source_path() not in filenames: staticfile = copy(staticfile) # prevent override in main site staticfile.override_url = posixpath.join(relpath, staticfile.url) generator.add_source_path(staticfile) def save_main_static_files(static_generator): '''Save the static files generated for the main site''' global _MAIN_STATIC_FILES # test just for current lang as settings change in autoreload mode if static_generator.settings['DEFAULT_LANG'] == _MAIN_LANG: _MAIN_STATIC_FILES = static_generator.staticfiles def update_generators(): '''Update the context of all generators Ads useful variables and translations into the template context and interlink translations ''' for generator in _GENERATOR_DB.keys(): install_templates_translations(generator) add_variables_to_context(generator) interlink_static_files(generator) interlink_removed_content(generator) interlink_translated_content(generator) def get_pelican_cls(settings): '''Get the Pelican class requested in settings''' cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): module, cls_name = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls def create_next_subsite(pelican_obj): '''Create the next subsite using the lang-specific config If there are no more subsites in the generation queue, update all the generators (interlink translations and removed content, add variables and translations to template context). Otherwise get the language and overrides for next the subsite in the queue and apply overrides. Then generate the subsite using a PELICAN_CLASS instance and its run method. Finally, restore the previous locale. ''' global _MAIN_SETTINGS if len(_SUBSITE_QUEUE) == 0: _LOGGER.debug( 'i18n: Updating cross-site links and context of all generators.') update_generators() _MAIN_SETTINGS = None # to initialize next time else: with temporary_locale(): settings = _MAIN_SETTINGS.copy() lang, overrides = _SUBSITE_QUEUE.popitem() settings.update(overrides) settings = configure_settings(settings) # to set LOCALE, etc. cls = get_pelican_cls(settings) new_pelican_obj = cls(settings) _LOGGER.debug(("Generating i18n subsite for language '{}' " "using class {}").format(lang, cls)) new_pelican_obj.run() # map: signal name -> function name _SIGNAL_HANDLERS_DB = { 'get_generators': initialize_plugin, 'article_generator_pretaxonomy': filter_contents_translations, 'page_generator_finalized': filter_contents_translations, 'get_writer': create_next_subsite, 'static_generator_finalized': save_main_static_files, 'generator_init': save_generator, } def register(): '''Register the plugin only if required signals are available''' for sig_name in _SIGNAL_HANDLERS_DB.keys(): if not hasattr(signals, sig_name): _LOGGER.error(( 'The i18n_subsites plugin requires the {} ' 'signal available for sure in Pelican 3.4.0 and later, ' 'plugin will not be used.').format(sig_name)) return for sig_name, handler in _SIGNAL_HANDLERS_DB.items(): sig = getattr(signals, sig_name) sig.connect(handler)
tijptjik/thegodsproject
plugins/i18n_subsites/i18n_subsites.py
Python
mit
17,012
__author__ = 'rolandh' RESEARCH_AND_SCHOLARSHIP = "http://refeds.org/category/research-and-scholarship" RELEASE = { "": ["eduPersonTargetedID"], RESEARCH_AND_SCHOLARSHIP: ["eduPersonPrincipalName", "eduPersonScopedAffiliation", "mail", "givenName", "sn", "displayName"] }
Runscope/pysaml2
src/saml2/entity_category/refeds.py
Python
bsd-2-clause
345
########################### 1. 導入所需模組 import cherrypy import os ########################### 2. 設定近端與遠端目錄 # 確定程式檔案所在目錄, 在 Windows 有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 設定在雲端與近端的資料儲存目錄 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示程式在雲端執行 download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # 表示程式在近端執行 download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" ########################### 3. 建立主物件 class HelloWorld(object): _cp_config = { # if there is no utf-8 encoding, no Chinese input available 'tools.encode.encoding': 'utf-8', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : 'file', 'tools.sessions.locking' : 'explicit', 'tools.sessions.storage_path' : data_dir+'/tmp', # session timeout is 60 minutes 'tools.sessions.timeout' : 60 } @cherrypy.expose def fileuploadform(self): return '''<h1>file upload</h1> <script src="/static/jquery.js" type="text/javascript"></script> <script src="/static/axuploader.js" type="text/javascript"></script> <script> $(document).ready(function(){ $('.prova').axuploader({url:'/fileaxupload', allowExt:['jpg','png','gif','7z','pdf','zip','flv','stl','txt'], finish:function(x,files) { alert('All files have been uploaded: '+files); }, enable:true, remotePath:function(){ return 'downloads/'; } }); }); </script> <div class="prova"></div> <input type="button" onclick="$('.prova').axuploader('disable')" value="asd" /> <input type="button" onclick="$('.prova').axuploader('enable')" value="ok" /> </section></body></html> ''' @cherrypy.expose def brythonuploadform(self): return '''<h1>file upload</h1> <script type="text/javascript" src="/static/Brython2.0.0-20140209-164925/brython.js"></script> <script type="text/javascript" > function getradio(tagname){ var radios = document.getElementsByName(tagname); for (var i = 0, length = radios.length; i < length; i++) { if (radios[i].checked) { // do whatever you want with the checked radio return radios[i].value; // only one radio can be logically checked, don't check the rest break; } } } function run_js(){ var cons = document.getElementById("console") var jscode = cons.value var t0 = (new Date()).getTime() eval(jscode) var t1 = (new Date()).getTime() console.log("Javascript code run in "+(t1-t0)+" ms") } </script> <script type="text/python3" src="/static/editor.py"></script> <script type="text/python3"> from browser import doc overwrite = 0 # add delete_program 1/7, seven steps to complete the ajax task, the last step is to add delete_program function on server # delete1 and delete2 parameters are also added into save_program function. delete1 = 0 delete2 = 0 def set_debug(ev): if ev.target.checked: __BRYTHON__.debug = 1 else: __BRYTHON__.debug = 0 def set_overwrite(ev): global overwrite if ev.target.checked: overwrite = 1 else: overwrite = 0 # add delete_program 2/7, client side add set_delete1 and set_delete2 functions. def set_delete1(ev): global delete1 if ev.target.checked: delete1 = 1 else: delete1 = 0 def set_delete2(ev): global delete2 if ev.target.checked: delete2 = 1 else: delete2 = 0 #### ajax process from browser import ajax,doc def on_complete(req): print(req.readyState) print('status',req.status) if req.status==200 or req.status==0: # show request text on id=result division doc["result"].html = req.text else: doc["result"].html = "error "+req.text def err_msg(): doc["result"].html = "server didn't reply after %s seconds" %timeout timeout = 4 def go(url): req = ajax.ajax() req.bind('complete', on_complete) req.set_timeout(timeout, err_msg) req.open('GET', url, True) req.send() def post(url): req = ajax.ajax() req.bind('complete', on_complete) req.set_timeout(timeout, err_msg) req.open('POST', url, True) req.set_header('content-type','application/x-www-form-urlencoded') # doc["filename"].value is the id=filename input field's value # editor.getValue() is the content on editor, need to send dictionary format data # while post url, need to save editor content into local_storage to use the previous load javascripts storage["py_src"] = editor.getValue() # add delete_program 3/7, two parameters added, this will also affect save_program function on server. req.send({'filename':doc["filename"].value, 'editor':editor.getValue(), 'overwrite':overwrite, 'delete1':delete1, 'delete2':delete2}) # get program from server def get_prog(ev): # ajax can only read data from server _name = '/brython_programs/'+doc["filename"].value try: editor.setValue(open(_name, encoding="utf-8").read()) doc["result"].html = doc["filename"].value+" loaded!" except: doc["result"].html = "can not get "+doc["filename"].value+"!" editor.scrollToRow(0) editor.gotoLine(0) reset_theme() def get_radio(ev): from javascript import JSObject filename = JSObject(getradio)("filename") # ajax can only read data from server doc["filename"].value = filename _name = '/brython_programs/'+filename editor.setValue(open(_name, encoding="utf-8").read()) doc["result"].html = filename+" loaded!" editor.scrollToRow(0) editor.gotoLine(0) reset_theme() # bindings doc['run_js'].bind('click',run_js) doc['set_debug'].bind('change',set_debug) doc['set_overwrite'].bind('change',set_overwrite) # add delete_program 4/7, two associated binds added doc['set_delete1'].bind('change',set_delete1) doc['set_delete2'].bind('change',set_delete2) # next functions are defined in editor.py doc['show_js'].bind('click',show_js) doc['run'].bind('click',run) doc['show_console'].bind('click',show_console) # get_prog and get _radio (working) doc['get_prog'].bind('click', get_prog) doc['get_radio'].bind('click', get_radio) # reset_the_src and clear_console (working) doc['reset_the_src'].bind('click',reset_the_src) doc['clear_console'].bind('click',clear_console) # clear_canvas and clear_src doc['clear_canvas'].bind('click',clear_canvas) doc['clear_src'].bind('click',clear_src) # only admin can save program to server doc['save_program'].bind('click',lambda ev:post('/save_program')) # add delete_program 5/7, delete_program button bind to execute delete_program on server. doc['delete_program'].bind('click',lambda ev:post('/delete_program')) </script> <script type="text/javascript"> window.onload=brython({debug:1, cache:'version'}); </script> <div class="prova"></div> <input type="button" onclick="$('.prova').axuploader('disable')" value="asd" /> <input type="button" onclick="$('.prova').axuploader('enable')" value="ok" /> </section></body></html> ''' @cherrypy.expose def fileaxupload(self, *args, **kwargs): filename = kwargs["ax-file-name"] flag = kwargs["start"] # 終於找到 bug, 因為從 kwargs[] 所取得的變數為字串, 而非數字, 先前用 flag == 0 是錯誤的 if flag == "0": # 若從 0 byte 送起, 表示要開啟新檔案 file = open(download_root_dir+"downloads/"+filename, "wb") else: file = open(download_root_dir+"downloads/"+filename, "ab") file.write(cherrypy.request.body.read()) file.close() return "files uploaded!" @cherrypy.expose def index(self, input1=None, input2=None): return "Hello world!"+str(input1)+_curdir @cherrypy.expose def inputform(self, input1=None, input2=None): return "input form"+str(input1) #index.exposed = True ########################### 4. 安排啟動設定 # 配合程式檔案所在目錄設定靜態目錄或靜態檔案 application_conf = {'/static':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': _curdir+"/static"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"} } ########################### 5. 在近端或遠端啟動程式 # 利用 HelloWorld() class 產生案例物件 root = HelloWorld() # 假如在 os 環境變數中存在 'OPENSHIFT_REPO_DIR', 表示程式在 OpenShift 環境中執行 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 雲端執行啟動 application = cherrypy.Application(root, config = application_conf) else: # 近端執行啟動 ''' cherrypy.server.socket_port = 8083 cherrypy.server.socket_host = '127.0.0.1' ''' cherrypy.quickstart(root, config = application_conf)
2014c2g12/c2g12
wsgi/w2/c2_w2.py
Python
gpl-2.0
9,606
#!/usr/bin/env python ############################################################################ # # Copyright (C) 2012, 2013 PX4 Development Team. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. Neither the name PX4 nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ # # PX4 firmware image generator # # The PX4 firmware file is a JSON-encoded Python object, containing # metadata fields and a zlib-compressed base64-encoded firmware image. # import sys import argparse import json import base64 import zlib import time import subprocess # # Construct a basic firmware description # def mkdesc(): proto = {} proto['magic'] = "PX4FWv1" proto['board_id'] = 0 proto['board_revision'] = 0 proto['version'] = "" proto['summary'] = "" proto['description'] = "" proto['git_identity'] = "" proto['build_time'] = 0 proto['image'] = bytes() proto['image_size'] = 0 return proto # Parse commandline parser = argparse.ArgumentParser(description="Firmware generator for the PX autopilot system.") parser.add_argument("--prototype", action="store", help="read a prototype description from a file") parser.add_argument("--board_id", action="store", help="set the board ID required") parser.add_argument("--board_revision", action="store", help="set the board revision required") parser.add_argument("--version", action="store", help="set a version string") parser.add_argument("--summary", action="store", help="set a brief description") parser.add_argument("--description", action="store", help="set a longer description") parser.add_argument("--git_identity", action="store", help="the working directory to check for git identity") parser.add_argument("--parameter_xml", action="store", help="the parameters.xml file") parser.add_argument("--airframe_xml", action="store", help="the airframes.xml file") parser.add_argument("--image", action="store", help="the firmware image") args = parser.parse_args() # Fetch the firmware descriptor prototype if specified if args.prototype != None: f = open(args.prototype,"r") desc = json.load(f) f.close() else: desc = mkdesc() desc['build_time'] = int(time.time()) if args.board_id != None: desc['board_id'] = int(args.board_id) if args.board_revision != None: desc['board_revision'] = int(args.board_revision) if args.version != None: desc['version'] = str(args.version) if args.summary != None: desc['summary'] = str(args.summary) if args.description != None: desc['description'] = str(args.description) if args.git_identity != None: cmd = " ".join(["git", "--git-dir", args.git_identity + "/.git", "describe", "--always", "--dirty"]) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout desc['git_identity'] = str(p.read().strip()) p.close() if args.parameter_xml != None: f = open(args.parameter_xml, "rb") bytes = f.read() desc['parameter_xml_size'] = len(bytes) desc['parameter_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') if args.airframe_xml != None: f = open(args.airframe_xml, "rb") bytes = f.read() desc['airframe_xml_size'] = len(bytes) desc['airframe_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') if args.image != None: f = open(args.image, "rb") bytes = f.read() desc['image_size'] = len(bytes) desc['image'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') print(json.dumps(desc, indent=4))
RickHutten/paparazzi
sw/tools/px4/px_mkfw.py
Python
gpl-2.0
4,811
#!/usr/bin/env python # This simple example shows how to do basic texture mapping. import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Load in the texture map. A texture is any unsigned char image. If it # is not of this type, you will have to map it through a lookup table # or by using vtkImageShiftScale. bmpReader = vtk.vtkBMPReader() bmpReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp") atext = vtk.vtkTexture() atext.SetInputConnection(bmpReader.GetOutputPort()) atext.InterpolateOn() # Create a plane source and actor. The vtkPlanesSource generates # texture coordinates. plane = vtk.vtkPlaneSource() planeMapper = vtk.vtkPolyDataMapper() planeMapper.SetInputConnection(plane.GetOutputPort()) planeActor = vtk.vtkActor() planeActor.SetMapper(planeMapper) planeActor.SetTexture(atext) # Create the RenderWindow, Renderer and both Actors ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size ren.AddActor(planeActor) ren.SetBackground(0.1, 0.2, 0.4) renWin.SetSize(500, 500) ren.ResetCamera() cam1 = ren.GetActiveCamera() cam1.Elevation(-30) cam1.Roll(-20) ren.ResetCameraClippingRange() iren.Initialize() renWin.Render() iren.Start()
CMUSV-VisTrails/WorkflowRecommendation
examples/vtk_examples/Rendering/TPlane.py
Python
bsd-3-clause
1,344
"""California housing dataset. The original database is available from StatLib http://lib.stat.cmu.edu/ The data contains 20,640 observations on 9 variables. This dataset contains the average house value as target variable and the following input variables (features): average income, housing average age, average rooms, average bedrooms, population, average occupation, latitude, and longitude in that order. References ---------- Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, Statistics and Probability Letters, 33 (1997) 291-297. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from io import BytesIO from os.path import join, exists from os import makedirs from zipfile import ZipFile try: # Python 2 from urllib2 import urlopen except ImportError: # Python 3+ from urllib.request import urlopen import numpy as np from .base import get_data_home, Bunch from ..externals import joblib DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\ "file=index&req=getit&lid=83" TARGET_FILENAME = "cal_housing.pkz" # Grab the module-level docstring to use as a description of the # dataset MODULE_DOCS = __doc__ def fetch_california_housing(data_home=None, download_if_missing=True): """Loader for the California housing dataset from StatLib. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing: optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns ------- dataset : dict-like object with the following attributes: dataset.data : ndarray, shape [20640, 8] Each row corresponding to the 8 feature values in order. dataset.target : numpy array of shape (20640,) Each value corresponds to the average house value in units of 100,000. dataset.feature_names : array of length 8 Array of ordered feature names used in the dataset. dataset.DESCR : string Description of the California housing dataset. Notes ------ This dataset consists of 20,640 samples and 9 features. """ data_home = get_data_home(data_home=data_home) if not exists(data_home): makedirs(data_home) if not exists(join(data_home, TARGET_FILENAME)): print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home)) fhandle = urlopen(DATA_URL) buf = BytesIO(fhandle.read()) zip_file = ZipFile(buf) try: cadata_fd = zip_file.open('cadata.txt', 'r') cadata = BytesIO(cadata_fd.read()) # skip the first 27 lines (documentation) cal_housing = np.loadtxt(cadata, skiprows=27) joblib.dump(cal_housing, join(data_home, TARGET_FILENAME), compress=6) finally: zip_file.close() else: cal_housing = joblib.load(join(data_home, TARGET_FILENAME)) feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms", "Population", "AveOccup", "Latitude", "Longitude"] target, data = cal_housing[:, 0], cal_housing[:, 1:] # avg rooms = total rooms / households data[:, 2] /= data[:, 5] # avg bed rooms = total bed rooms / households data[:, 3] /= data[:, 5] # avg occupancy = population / housholds data[:, 5] = data[:, 4] / data[:, 5] # target in units of 100,000 target = target / 100000.0 return Bunch(data=data, target=target, feature_names=feature_names, DESCR=MODULE_DOCS)
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/datasets/california_housing.py
Python
apache-2.0
3,825