repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
csferrie/python-qinfer
refs/heads/master
src/qinfer/parallel.py
3
#!/usr/bin/python # -*- coding: utf-8 -*- ## # parallel.py: Tools for distributing computation. ## # © 2017, Chris Ferrie (csferrie@gmail.com) and # Christopher Granade (cgranade@cgranade.com). # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ## ## FEATURES ################################################################## from __future__ import absolute_import from __future__ import division # Ensures that a/b is always a float. ## EXPORTS ################################################################### __all__ = ['DirectViewParallelizedModel'] ## IMPORTS ################################################################### import numpy as np from qinfer.derived_models import DerivedModel import warnings try: import ipyparallel as ipp interactive = ipp.interactive except ImportError: try: import IPython.parallel as ipp interactive = ipp.interactive except (ImportError, AttributeError): import warnings warnings.warn( "Could not import IPython parallel. " "Parallelization support will be disabled." ) ipp = None interactive = lambda fn: fn ## LOGGING ################################################################### import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) ## CLASSES ################################################################### class DirectViewParallelizedModel(DerivedModel): r""" Given an instance of a :class:`Model`, parallelizes execution of that model's likelihood by breaking the ``modelparams`` array into segments and executing a segment on each member of a :class:`~ipyparallel.DirectView`. This :class:`Model` assumes that it has ownership over the DirectView, such that no other processes will send tasks during the lifetime of the Model. If you are having trouble pickling your model, consider switching to ``dill`` by calling ``direct_view.use_dill()``. This mode gives more support for closures. :param qinfer.Model serial_model: Model to be parallelized. This model will be distributed to the engines in the direct view, such that the model must support pickling. :param ipyparallel.DirectView direct_view: Direct view onto the engines that will be used to parallelize evaluation of the model's likelihood function. :param bool purge_client: If ``True``, then this model will purge results and metadata from the IPython client whenever the model cache is cleared. This is useful for solving memory leaks caused by very large numbers of calls to ``likelihood``. By default, this is disabled, since enabling this option can cause data loss if the client is being sent other tasks during the operation of this model. :param int serial_threshold: Sets the number of model vectors below which the serial model is to be preferred. By default, this is set to ``10 * n_engines``, where ``n_engines`` is the number of engines exposed by ``direct_view``. """ ## INITIALIZER ## def __init__(self, serial_model, direct_view, purge_client=False, serial_threshold=None): if ipp is None: raise RuntimeError( "This model requires IPython parallelization support, " "but an error was raised importing IPython.parallel." ) self._dv = direct_view self._purge_client = purge_client self._serial_threshold = ( 10 * self.n_engines if serial_threshold is None else int(serial_threshold) ) super(DirectViewParallelizedModel, self).__init__(serial_model) ## SPECIAL METHODS ## def __getstate__(self): # Since instances of this class will be pickled as they are passed to # remote engines, we need to be careful not to include _dv return { '_underlying_model': self._underlying_model, '_dv': None, '_call_count': self._call_count, '_sim_count': self._sim_count, '_serial_threshold': self._serial_threshold } ## PROPERTIES ## # Provide _serial_model as a back-compat. @property def _serial_model(self): warnings.warn("_serial_model is deprecated in favor of _underlying_model.", DeprecationWarning ) return self._underlying_model @_serial_model.setter def _serial_model(self, value): warnings.warn("_serial_model is deprecated in favor of _underlying_model.", DeprecationWarning ) self._underlying_model = value @property def n_engines(self): """ The number of engines seen by the direct view owned by this parallelized model. :rtype: int """ return len(self._dv) if self._dv is not None else 0 ## METHODS ## def clear_cache(self): """ Clears any cache associated with the serial model and the engines seen by the direct view. """ self.underlying_model.clear_cache() try: logger.info('DirectView results has {} items. Clearing.'.format( len(self._dv.results) )) self._dv.purge_results('all') if self._purge_client: self._dv.client.purge_everything() except: pass def likelihood(self, outcomes, modelparams, expparams): """ Returns the likelihood for the underlying (serial) model, distributing the model parameter array across the engines controlled by this parallelized model. Returns what the serial model would return, see :attr:`~Model.likelihood` """ # By calling the superclass implementation, we can consolidate # call counting there. super(DirectViewParallelizedModel, self).likelihood(outcomes, modelparams, expparams) # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if modelparams.shape[0] <= self._serial_threshold: return self.underlying_model.likelihood(outcomes, modelparams, expparams) if self._dv is None: raise RuntimeError( "No direct view provided; this may be because the instance was " "loaded from a pickle or NumPy saved array without providing a " "new direct view." ) # Need to decorate with interactive to overcome namespace issues with # remote engines. @interactive def serial_likelihood(mps, sm, os, eps): return sm.likelihood(os, mps, eps) # TODO: check whether there's a better way to pass the extra parameters # that doesn't use so much memory. # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. L = self._dv.map_sync( serial_likelihood, np.array_split(modelparams, self.n_engines, axis=0), [self.underlying_model] * self.n_engines, [outcomes] * self.n_engines, [expparams] * self.n_engines ) return np.concatenate(L, axis=1) def simulate_experiment(self, modelparams, expparams, repeat=1, split_by_modelparams=True): """ Simulates the underlying (serial) model using the parallel engines. Returns what the serial model would return, see :attr:`~Simulatable.simulate_experiment` :param bool split_by_modelparams: If ``True``, splits up ``modelparams`` into `n_engines` chunks and distributes across engines. If ``False``, splits up ``expparams``. """ # By calling the superclass implementation, we can consolidate # simulation counting there. super(DirectViewParallelizedModel, self).simulate_experiment(modelparams, expparams, repeat=repeat) if self._dv is None: raise RuntimeError( "No direct view provided; this may be because the instance was " "loaded from a pickle or NumPy saved array without providing a " "new direct view." ) # Need to decorate with interactive to overcome namespace issues with # remote engines. @interactive def serial_simulator(sm, mps, eps, r): return sm.simulate_experiment(mps, eps, repeat=r) if split_by_modelparams: # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if modelparams.shape[0] <= self._serial_threshold: return self.underlying_model.simulate_experiment(modelparams, expparams, repeat=repeat) # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. os = self._dv.map_sync( serial_simulator, [self.underlying_model] * self.n_engines, np.array_split(modelparams, self.n_engines, axis=0), [expparams] * self.n_engines, [repeat] * self.n_engines ) return np.concatenate(os, axis=0) else: # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if expparams.shape[0] <= self._serial_threshold: return self.underlying_model.simulate_experiment(modelparams, expparams, repeat=repeat) # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. os = self._dv.map_sync( serial_simulator, [self.underlying_model] * self.n_engines, [modelparams] * self.n_engines, np.array_split(expparams, self.n_engines, axis=0), [repeat] * self.n_engines ) return np.concatenate(os, axis=1)
ToonTownInfiniteRepo/ToontownInfinite
refs/heads/master
toontown/dna/ply/ctokens.py
363
# ---------------------------------------------------------------------- # ctokens.py # # Token specifications for symbols in ANSI C and C++. This file is # meant to be used as a library in other tokenizers. # ---------------------------------------------------------------------- # Reserved words tokens = [ # Literals (identifier, integer constant, float constant, string constant, char const) 'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST', # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement (++,--) 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) 'ARROW', # Ternary operator (?) 'TERNARY', # Delimeters ( ) [ ] { } , . ; : 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMA', 'PERIOD', 'SEMI', 'COLON', # Ellipsis (...) 'ELLIPSIS', ] # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MODULO = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'^=' # Increment/decrement t_INCREMENT = r'\+\+' t_DECREMENT = r'--' # -> t_ARROW = r'->' # ? t_TERNARY = r'\?' # Delimeters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Identifiers t_ID = r'[A-Za-z_][A-Za-z0-9_]*' # Integer literal t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' # Floating literal t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal t_STRING = r'\"([^\\\n]|(\\.))*?\"' # Character constant 'c' or L'c' t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' # Comment (C-Style) def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t # Comment (C++-Style) def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
georgewhr/dbwrt
refs/heads/master
tools/perf/scripts/python/net_dropmonitor.py
2669
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") except: return for line in f: loc = int(line.split()[0], 16) name = line.split()[2] kallsyms.append((loc, name)) kallsyms.sort() def get_sym(sloc): loc = int(sloc) # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start # kallsyms[i][0] > loc for all end <= i < len(kallsyms) start, end = -1, len(kallsyms) while end != start + 1: pivot = (start + end) // 2 if loc < kallsyms[pivot][0]: end = pivot else: start = pivot # Now (start == -1 or kallsyms[start][0] <= loc) # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) if start >= 0: symloc, name = kallsyms[start] return (name, loc - symloc) else: return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
andyzsf/django
refs/heads/master
tests/shortcuts/urls.py
11
from django.conf.urls import url from . import views urlpatterns = [ url(r'^render_to_response/$', views.render_to_response_view), url(r'^render_to_response/request_context/$', views.render_to_response_view_with_request_context), url(r'^render_to_response/content_type/$', views.render_to_response_view_with_content_type), url(r'^render_to_response/dirs/$', views.render_to_response_view_with_dirs), url(r'^render_to_response/context_instance_misuse/$', views.render_to_response_with_context_instance_misuse), url(r'^render/$', views.render_view), url(r'^render/base_context/$', views.render_view_with_base_context), url(r'^render/content_type/$', views.render_view_with_content_type), url(r'^render/dirs/$', views.render_with_dirs), url(r'^render/status/$', views.render_view_with_status), url(r'^render/current_app/$', views.render_view_with_current_app), url(r'^render/current_app_conflict/$', views.render_view_with_current_app_conflict), ]
michael-donat/ansible
refs/heads/devel
lib/ansible/utils/__init__.py
2520
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
sekikn/ambari
refs/heads/trunk
ambari-server/src/test/python/TestServerUpgrade.py
2
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os os.environ["ROOT"] = "" import StringIO import sys from ambari_commons.exceptions import FatalException from unittest import TestCase from mock.mock import patch, MagicMock from ambari_commons import os_utils import platform import shutil project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../")) shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties") _search_file = os_utils.search_file os_utils.search_file = MagicMock(return_value="/tmp/ambari.properties") with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))): with patch("os.path.isdir", return_value = MagicMock(return_value=True)): with patch("os.access", return_value = MagicMock(return_value=True)): with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}): from ambari_server.serverUpgrade import set_current, SetCurrentVersionOptions import ambari_server os_utils.search_file = _search_file @patch.object(platform, "linux_distribution", new = MagicMock(return_value=('Redhat', '6.4', 'Final'))) @patch("os.path.isdir", new = MagicMock(return_value=True)) @patch("os.access", new = MagicMock(return_value=True)) class TestServerUpgrade(TestCase): @patch("ambari_server.serverUpgrade.is_server_runing") @patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set') @patch('ambari_server.serverUpgrade.get_validated_string_input') @patch('ambari_server.serverUpgrade.get_ambari_properties') @patch('ambari_server.serverUtils.get_ambari_server_api_base') @patch('ambari_commons.logging_utils.get_verbose') @patch('urllib2.urlopen') def test_set_current(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock, get_ambari_properties_mock, get_validated_string_input_mock, no_finalize_options_set_mock, is_server_runing_mock): options = MagicMock() options.cluster_name = 'cc' options.desired_repo_version = 'HDP-2.2.2.0-2561' options.force_repo_version = None # Case when server is not running is_server_runing_mock.return_value = False, None try: set_current(options) self.fail("Server is not running - should error out") except FatalException: pass # expected is_server_runing_mock.return_value = True, 11111 # Test insufficient options case no_finalize_options_set_mock.return_value = True try: set_current(options) self.fail("Should error out") except FatalException: pass # expected no_finalize_options_set_mock.return_value = False # Test normal flow get_validated_string_input_mock.return_value = 'dummy_string' p = get_ambari_properties_mock.return_value p.get_property.side_effect = ["8080", "false", "false"] get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/' get_verbose_mock.retun_value = False set_current(options) self.assertTrue(urlopen_mock.called) request = urlopen_mock.call_args_list[0][0][0] self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions') self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": false}}') self.assertEquals(request.origin_req_host, '127.0.0.1') self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='}) @patch("ambari_server.serverUpgrade.is_server_runing") @patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set') @patch('ambari_server.serverUpgrade.get_validated_string_input') @patch('ambari_server.serverUpgrade.get_ambari_properties') @patch('ambari_server.serverUtils.get_ambari_server_api_base') @patch('ambari_commons.logging_utils.get_verbose') @patch('urllib2.urlopen') def test_set_current_with_force(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock, get_ambari_properties_mock, get_validated_string_input_mock, no_finalize_options_set_mock, is_server_runing_mock): options = MagicMock() options.cluster_name = 'cc' options.desired_repo_version = 'HDP-2.2.2.0-2561' options.force_repo_version = True # Case when server is not running is_server_runing_mock.return_value = False, None try: set_current(options) self.fail("Server is not running - should error out") except FatalException: pass # expected is_server_runing_mock.return_value = True, 11111 # Test insufficient options case no_finalize_options_set_mock.return_value = True try: set_current(options) self.fail("Should error out") except FatalException: pass # expected no_finalize_options_set_mock.return_value = False # Test normal flow get_validated_string_input_mock.return_value = 'dummy_string' p = get_ambari_properties_mock.return_value p.get_property.side_effect = ["8080", "false", "false"] get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/' get_verbose_mock.retun_value = False set_current(options) self.assertTrue(urlopen_mock.called) request = urlopen_mock.call_args_list[0][0][0] self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions') self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": true}}') self.assertEquals(request.origin_req_host, '127.0.0.1') self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='}) def testCurrentVersionOptions(self): # Negative test cases options = MagicMock() options.cluster_name = None options.desired_repo_version = 'HDP-2.2.2.0-2561' cvo = SetCurrentVersionOptions(options) self.assertTrue(cvo.no_finalize_options_set()) options = MagicMock() options.cluster_name = 'cc' options.desired_repo_version = None cvo = SetCurrentVersionOptions(options) self.assertTrue(cvo.no_finalize_options_set()) # Positive test case options = MagicMock() options.cluster_name = 'cc' options.desired_repo_version = 'HDP-2.2.2.0-2561' cvo = SetCurrentVersionOptions(options) self.assertFalse(cvo.no_finalize_options_set())
bgris/ODL_bgris
refs/heads/master
lib/python3.5/site-packages/matplotlib/spines.py
10
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import matplotlib import matplotlib.artist as martist from matplotlib.artist import allow_rasterization from matplotlib import docstring import matplotlib.transforms as mtransforms import matplotlib.lines as mlines import matplotlib.patches as mpatches import matplotlib.path as mpath import matplotlib.cbook as cbook import numpy as np import warnings rcParams = matplotlib.rcParams class Spine(mpatches.Patch): """an axis spine -- the line noting the data area boundaries Spines are the lines connecting the axis tick marks and noting the boundaries of the data area. They can be placed at arbitrary positions. See function:`~matplotlib.spines.Spine.set_position` for more information. The default position is ``('outward',0)``. Spines are subclasses of class:`~matplotlib.patches.Patch`, and inherit much of their behavior. Spines draw a line or a circle, depending if function:`~matplotlib.spines.Spine.set_patch_line` or function:`~matplotlib.spines.Spine.set_patch_circle` has been called. Line-like is the default. """ def __str__(self): return "Spine" @docstring.dedent_interpd def __init__(self, axes, spine_type, path, **kwargs): """ - *axes* : the Axes instance containing the spine - *spine_type* : a string specifying the spine type - *path* : the path instance used to draw the spine Valid kwargs are: %(Patch)s """ super(Spine, self).__init__(**kwargs) self.axes = axes self.set_figure(self.axes.figure) self.spine_type = spine_type self.set_facecolor('none') self.set_edgecolor(rcParams['axes.edgecolor']) self.set_linewidth(rcParams['axes.linewidth']) self.set_capstyle('projecting') self.axis = None self.set_zorder(2.5) self.set_transform(self.axes.transData) # default transform self._bounds = None # default bounds self._smart_bounds = False # Defer initial position determination. (Not much support for # non-rectangular axes is currently implemented, and this lets # them pass through the spines machinery without errors.) self._position = None if not isinstance(path, matplotlib.path.Path): msg = "'path' must be an instance of 'matplotlib.path.Path'" raise ValueError(msg) self._path = path # To support drawing both linear and circular spines, this # class implements Patch behavior two ways. If # self._patch_type == 'line', behave like a mpatches.PathPatch # instance. If self._patch_type == 'circle', behave like a # mpatches.Ellipse instance. self._patch_type = 'line' # Behavior copied from mpatches.Ellipse: # Note: This cannot be calculated until this is added to an Axes self._patch_transform = mtransforms.IdentityTransform() def set_smart_bounds(self, value): """set the spine and associated axis to have smart bounds""" self._smart_bounds = value # also set the axis if possible if self.spine_type in ('left', 'right'): self.axes.yaxis.set_smart_bounds(value) elif self.spine_type in ('top', 'bottom'): self.axes.xaxis.set_smart_bounds(value) self.stale = True def get_smart_bounds(self): """get whether the spine has smart bounds""" return self._smart_bounds def set_patch_circle(self, center, radius): """set the spine to be circular""" self._patch_type = 'circle' self._center = center self._width = radius * 2 self._height = radius * 2 self._angle = 0 # circle drawn on axes transform self.set_transform(self.axes.transAxes) self.stale = True def set_patch_line(self): """set the spine to be linear""" self._patch_type = 'line' self.stale = True # Behavior copied from mpatches.Ellipse: def _recompute_transform(self): """NOTE: This cannot be called until after this has been added to an Axes, otherwise unit conversion will fail. This maxes it very important to call the accessor method and not directly access the transformation member variable. """ assert self._patch_type == 'circle' center = (self.convert_xunits(self._center[0]), self.convert_yunits(self._center[1])) width = self.convert_xunits(self._width) height = self.convert_yunits(self._height) self._patch_transform = mtransforms.Affine2D() \ .scale(width * 0.5, height * 0.5) \ .rotate_deg(self._angle) \ .translate(*center) def get_patch_transform(self): if self._patch_type == 'circle': self._recompute_transform() return self._patch_transform else: return super(Spine, self).get_patch_transform() def get_path(self): return self._path def _ensure_position_is_set(self): if self._position is None: # default position self._position = ('outward', 0.0) # in points self.set_position(self._position) def register_axis(self, axis): """register an axis An axis should be registered with its corresponding spine from the Axes instance. This allows the spine to clear any axis properties when needed. """ self.axis = axis if self.axis is not None: self.axis.cla() self.stale = True def cla(self): """Clear the current spine""" self._position = None # clear position if self.axis is not None: self.axis.cla() def is_frame_like(self): """return True if directly on axes frame This is useful for determining if a spine is the edge of an old style MPL plot. If so, this function will return True. """ self._ensure_position_is_set() position = self._position if cbook.is_string_like(position): if position == 'center': position = ('axes', 0.5) elif position == 'zero': position = ('data', 0) if len(position) != 2: raise ValueError("position should be 2-tuple") position_type, amount = position if position_type == 'outward' and amount == 0: return True else: return False def _adjust_location(self): """automatically set spine bounds to the view interval""" if self.spine_type == 'circle': return if self._bounds is None: if self.spine_type in ('left', 'right'): low, high = self.axes.viewLim.intervaly elif self.spine_type in ('top', 'bottom'): low, high = self.axes.viewLim.intervalx else: raise ValueError('unknown spine spine_type: %s' % self.spine_type) if self._smart_bounds: # attempt to set bounds in sophisticated way if low > high: # handle inverted limits low, high = high, low viewlim_low = low viewlim_high = high del low, high if self.spine_type in ('left', 'right'): datalim_low, datalim_high = self.axes.dataLim.intervaly ticks = self.axes.get_yticks() elif self.spine_type in ('top', 'bottom'): datalim_low, datalim_high = self.axes.dataLim.intervalx ticks = self.axes.get_xticks() # handle inverted limits ticks = list(ticks) ticks.sort() ticks = np.array(ticks) if datalim_low > datalim_high: datalim_low, datalim_high = datalim_high, datalim_low if datalim_low < viewlim_low: # Data extends past view. Clip line to view. low = viewlim_low else: # Data ends before view ends. cond = (ticks <= datalim_low) & (ticks >= viewlim_low) tickvals = ticks[cond] if len(tickvals): # A tick is less than or equal to lowest data point. low = tickvals[-1] else: # No tick is available low = datalim_low low = max(low, viewlim_low) if datalim_high > viewlim_high: # Data extends past view. Clip line to view. high = viewlim_high else: # Data ends before view ends. cond = (ticks >= datalim_high) & (ticks <= viewlim_high) tickvals = ticks[cond] if len(tickvals): # A tick is greater than or equal to highest data # point. high = tickvals[0] else: # No tick is available high = datalim_high high = min(high, viewlim_high) else: low, high = self._bounds v1 = self._path.vertices assert v1.shape == (2, 2), 'unexpected vertices shape' if self.spine_type in ['left', 'right']: v1[0, 1] = low v1[1, 1] = high elif self.spine_type in ['bottom', 'top']: v1[0, 0] = low v1[1, 0] = high else: raise ValueError('unable to set bounds for spine "%s"' % self.spine_type) @allow_rasterization def draw(self, renderer): self._adjust_location() ret = super(Spine, self).draw(renderer) self.stale = False return ret def _calc_offset_transform(self): """calculate the offset transform performed by the spine""" self._ensure_position_is_set() position = self._position if cbook.is_string_like(position): if position == 'center': position = ('axes', 0.5) elif position == 'zero': position = ('data', 0) assert len(position) == 2, "position should be 2-tuple" position_type, amount = position assert position_type in ('axes', 'outward', 'data') if position_type == 'outward': if amount == 0: # short circuit commonest case self._spine_transform = ('identity', mtransforms.IdentityTransform()) elif self.spine_type in ['left', 'right', 'top', 'bottom']: offset_vec = {'left': (-1, 0), 'right': (1, 0), 'bottom': (0, -1), 'top': (0, 1), }[self.spine_type] # calculate x and y offset in dots offset_x = amount * offset_vec[0] / 72.0 offset_y = amount * offset_vec[1] / 72.0 self._spine_transform = ('post', mtransforms.ScaledTranslation( offset_x, offset_y, self.figure.dpi_scale_trans)) else: warnings.warn('unknown spine type "%s": no spine ' 'offset performed' % self.spine_type) self._spine_transform = ('identity', mtransforms.IdentityTransform()) elif position_type == 'axes': if self.spine_type in ('left', 'right'): self._spine_transform = ('pre', mtransforms.Affine2D.from_values( # keep y unchanged, fix x at # amount 0, 0, 0, 1, amount, 0)) elif self.spine_type in ('bottom', 'top'): self._spine_transform = ('pre', mtransforms.Affine2D.from_values( # keep x unchanged, fix y at # amount 1, 0, 0, 0, 0, amount)) else: warnings.warn('unknown spine type "%s": no spine ' 'offset performed' % self.spine_type) self._spine_transform = ('identity', mtransforms.IdentityTransform()) elif position_type == 'data': if self.spine_type in ('right', 'top'): # The right and top spines have a default position of 1 in # axes coordinates. When specifying the position in data # coordinates, we need to calculate the position relative to 0. amount -= 1 if self.spine_type in ('left', 'right'): self._spine_transform = ('data', mtransforms.Affine2D().translate( amount, 0)) elif self.spine_type in ('bottom', 'top'): self._spine_transform = ('data', mtransforms.Affine2D().translate( 0, amount)) else: warnings.warn('unknown spine type "%s": no spine ' 'offset performed' % self.spine_type) self._spine_transform = ('identity', mtransforms.IdentityTransform()) def set_position(self, position): """set the position of the spine Spine position is specified by a 2 tuple of (position type, amount). The position types are: * 'outward' : place the spine out from the data area by the specified number of points. (Negative values specify placing the spine inward.) * 'axes' : place the spine at the specified Axes coordinate (from 0.0-1.0). * 'data' : place the spine at the specified data coordinate. Additionally, shorthand notations define a special positions: * 'center' -> ('axes',0.5) * 'zero' -> ('data', 0.0) """ if position in ('center', 'zero'): # special positions pass else: if len(position) != 2: raise ValueError("position should be 'center' or 2-tuple") if position[0] not in ['outward', 'axes', 'data']: msg = ("position[0] should be in [ 'outward' | 'axes' |" " 'data' ]") raise ValueError(msg) self._position = position self._calc_offset_transform() self.set_transform(self.get_spine_transform()) if self.axis is not None: self.axis.reset_ticks() self.stale = True def get_position(self): """get the spine position""" self._ensure_position_is_set() return self._position def get_spine_transform(self): """get the spine transform""" self._ensure_position_is_set() what, how = self._spine_transform if what == 'data': # special case data based spine locations data_xform = self.axes.transScale + \ (how + self.axes.transLimits + self.axes.transAxes) if self.spine_type in ['left', 'right']: result = mtransforms.blended_transform_factory( data_xform, self.axes.transData) elif self.spine_type in ['top', 'bottom']: result = mtransforms.blended_transform_factory( self.axes.transData, data_xform) else: raise ValueError('unknown spine spine_type: %s' % self.spine_type) return result if self.spine_type in ['left', 'right']: base_transform = self.axes.get_yaxis_transform(which='grid') elif self.spine_type in ['top', 'bottom']: base_transform = self.axes.get_xaxis_transform(which='grid') else: raise ValueError('unknown spine spine_type: %s' % self.spine_type) if what == 'identity': return base_transform elif what == 'post': return base_transform + how elif what == 'pre': return how + base_transform else: raise ValueError("unknown spine_transform type: %s" % what) def set_bounds(self, low, high): """Set the bounds of the spine.""" if self.spine_type == 'circle': raise ValueError( 'set_bounds() method incompatible with circular spines') self._bounds = (low, high) self.stale = True def get_bounds(self): """Get the bounds of the spine.""" return self._bounds @classmethod def linear_spine(cls, axes, spine_type, **kwargs): """ (staticmethod) Returns a linear :class:`Spine`. """ # all values of 13 get replaced upon call to set_bounds() if spine_type == 'left': path = mpath.Path([(0.0, 13), (0.0, 13)]) elif spine_type == 'right': path = mpath.Path([(1.0, 13), (1.0, 13)]) elif spine_type == 'bottom': path = mpath.Path([(13, 0.0), (13, 0.0)]) elif spine_type == 'top': path = mpath.Path([(13, 1.0), (13, 1.0)]) else: raise ValueError('unable to make path for spine "%s"' % spine_type) result = cls(axes, spine_type, path, **kwargs) result.set_visible(rcParams['axes.spines.{0}'.format(spine_type)]) return result @classmethod def circular_spine(cls, axes, center, radius, **kwargs): """ (staticmethod) Returns a circular :class:`Spine`. """ path = mpath.Path.unit_circle() spine_type = 'circle' result = cls(axes, spine_type, path, **kwargs) result.set_patch_circle(center, radius) return result def set_color(self, c): """ Set the edgecolor. ACCEPTS: matplotlib color arg or sequence of rgba tuples .. seealso:: :meth:`set_facecolor`, :meth:`set_edgecolor` For setting the edge or face color individually. """ # The facecolor of a spine is always 'none' by default -- let # the user change it manually if desired. self.set_edgecolor(c) self.stale = True
invitu/odoomrp-wip
refs/heads/8.0
purchase_pricelist_rules/__openerp__.py
11
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "Pricelist Rules - Purchase extension", "version": "1.0", "depends": [ "purchase", "account", "purchase_discount", "product", "product_pricelist_rules", ], "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", "contributors": [ "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <ajuaristio@gmail.com>", ], "category": "Hidden/Dependency", "website": "http://www.odoomrp.com", "summary": "", "data": [ "views/purchase_pricelist_view.xml", "views/purchase_view.xml", "views/pricelist_view.xml", "security/ir.model.access.csv", "security/purchase_pricelist_rules_security.xml", ], "installable": True, "auto_install": True, }
fanhero/thumbor
refs/heads/master
thumbor/storages/mixed_storage.py
1
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com thumbor@googlegroups.com from thumbor.storages import BaseStorage from tornado import gen class Storage(BaseStorage): def __init__(self, context, file_storage=None, crypto_storage=None, detector_storage=None): BaseStorage.__init__(self, context) self.file_storage = file_storage self.crypto_storage = crypto_storage self.detector_storage = detector_storage def _init_file_storage(self): if self.file_storage is None: self.context.modules.importer.import_item( config_key='file_storage', item_value=self.context.config.MIXED_STORAGE_FILE_STORAGE, class_name='Storage' ) self.file_storage = self.context.modules.file_storage = self.context.modules.importer.file_storage(self.context) def _init_crypto_storage(self): if self.crypto_storage is None: self.context.modules.importer.import_item( config_key='crypto_storage', item_value=self.context.config.MIXED_STORAGE_CRYPTO_STORAGE, class_name='Storage' ) self.crypto_storage = self.context.modules.crypto_storage = self.context.modules.importer.crypto_storage(self.context) def _init_detector_storage(self): if self.detector_storage is None: self.context.modules.importer.import_item( config_key='detector_storage', item_value=self.context.config.MIXED_STORAGE_DETECTOR_STORAGE, class_name='Storage' ) self.detector_storage = self.context.modules.detector_storage = \ self.context.modules.importer.detector_storage(self.context) def put(self, path, bytes): self._init_file_storage() self.file_storage.put(path, bytes) def put_detector_data(self, path, data): self._init_detector_storage() self.detector_storage.put_detector_data(path, data) def put_crypto(self, path): self._init_crypto_storage() self.crypto_storage.put_crypto(path) @gen.coroutine def get_crypto(self, path): self._init_crypto_storage() result = yield gen.maybe_future(self.crypto_storage.get_crypto(path)) raise gen.Return(result) @gen.coroutine def get_detector_data(self, path): self._init_detector_storage() result = yield gen.maybe_future(self.detector_storage.get_detector_data(path)) raise gen.Return(result) @gen.coroutine def get(self, path): self._init_file_storage() result = yield gen.maybe_future(self.file_storage.get(path)) raise gen.Return(result) @gen.coroutine def exists(self, path): print(path) self._init_file_storage() if self.context.config.VARIABLE_RESULTS_ENABLED: self.file_storage.set_bucket(path.split('/')[0]) result = yield gen.maybe_future(self.file_storage.exists(path)) raise gen.Return(result) def resolve_original_photo_path(self, request, filename): return self.file_storage.resolve_original_photo_path(request, filename)
dougfelt/nototools
refs/heads/master
nototools/extra_locale_data.py
1
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extra locale data that's still missing from CLDR.""" __author__ = 'roozbeh@google.com (Roozbeh Pournader)' LIKELY_SUBTAGS = { 'abr': ('abr', 'Latn', 'GH'), # Abron 'abq': ('abq', 'Cyrl', 'RU'), # Abaza 'ada': ('ada', 'Latn', 'GH'), # Adangme 'ae': ('ae', 'Avst', 'ZZ'), # Avestan 'aeb': ('aeb', 'Arab', 'TN'), # Tunisian Arabic 'aii': ('aii', 'Syrc', 'IQ'), # Assyrian Neo-Aramaic 'ain': ('ain', 'Kana', 'JP'), # Ainu 'akk': ('akk', 'Xsux', 'ZZ'), # Akkadian 'akz': ('akz', 'Latn', 'US'), # Alabama 'ale': ('ale', 'Latn', 'US'), # Aleut 'aln': ('aln', 'Latn', 'XK'), # Gheg Albanian 'an': ('an', 'Latn', 'ES'), # Aragonese 'anp': ('anp', 'Deva', 'IN'), # Angika 'arc': ('arc', 'Armi', 'ZZ'), # Imperial Aramaic 'aro': ('aro', 'Latn', 'BO'), # Araona 'arp': ('arp', 'Latn', 'US'), # Arapaho 'arq': ('arq', 'Arab', 'DZ'), # Algerian Arabic 'arw': ('arw', 'Latn', 'GY'), # Arawak 'ary': ('ary', 'Arab', 'MA'), # Moroccan Arabic 'arz': ('arz', 'Arab', 'EG'), # Egyptian Arabic 'avk': ('avk', 'Latn', '001'), # Kotava 'azb': ('azb', 'Arab', 'IR'), # Southern Azerbaijani 'bar': ('bar', 'Latn', 'AT'), # Bavarian 'ber': ('ber', 'Arab', 'MA'), # Berber 'bej': ('bej', 'Arab', 'SD'), # Beja 'bci': ('bci', 'Latn', 'CI'), # Baoulé 'bgc': ('bgc', 'Deva', 'IN'), # Haryanvi 'bhi': ('bhi', 'Deva', 'IN'), # Bhilali 'bhk': ('bhk', 'Latn', 'PH'), # Albay Bikol 'bla': ('bla', 'Latn', 'CA'), # Blackfoot 'blt': ('blt', 'Tavt', 'VN'), # Tai Dam 'bpy': ('bpy', 'Beng', 'IN'), # Bishnupriya 'bqi': ('bqi', 'Arab', 'IR'), # Bakhtiari 'bsq': ('bsq', 'Bass', 'LR'), # Bassa 'bzx': ('bzx', 'Latn', 'ML'), # Kelengaxo Bozo 'cad': ('cad', 'Latn', 'US'), # Caddo 'car': ('car', 'Latn', 'VE'), # Galibi Carib 'cay': ('cay', 'Latn', 'CA'), # Cayuga 'chn': ('chn', 'Latn', 'US'), # Chinook Jargon 'cho': ('cho', 'Latn', 'US'), # Choctaw 'chy': ('chy', 'Latn', 'US'), # Cheyenne 'cjs': ('cjs', 'Cyrl', 'RU'), # Shor 'ckt': ('ckt', 'Cyrl', 'RU'), # Chukchi 'cop': ('cop', 'Copt', 'EG'), # Coptic 'cpf': ('cpf', 'Latn', 'HT'), # Creoles, French 'cps': ('cps', 'Latn', 'PH'), # Capiznon 'crh': ('crh', 'Latn', 'UA'), # Crimean Tatar 'crs': ('crs', 'Latn', 'SC'), # Seselwa Creole French 'ctd': ('ctd', 'Latn', 'MM'), # Tedim Chin 'dak': ('dak', 'Latn', 'US'), # Dakota 'dcc': ('dcc', 'Arab', 'IN'), # Deccan 'del': ('del', 'Latn', 'US'), # Delaware 'din': ('din', 'Latn', 'SS'), # Dinka 'dng': ('dng', 'Cyrl', 'KG'), # Dungan 'dtp': ('dtp', 'Latn', 'MY'), # Central Dusun 'egl': ('egl', 'Latn', 'IT'), # Emilian 'egy': ('egy', 'Egyp', 'ZZ'), # Ancient Egyptian 'eka': ('eka', 'Latn', 'NG'), # Ekajuk 'eky': ('eky', 'Kali', 'TH'), # Eastern Kayah 'esu': ('esu', 'Latn', 'US'), # Central Yupik 'ett': ('ett', 'Ital', 'IT'), # Etruscan 'evn': ('evn', 'Latn', 'CN'), # Evenki 'ext': ('ext', 'Latn', 'ES'), # Extremaduran 'ffm': ('ffm', 'Latn', 'ML'), # Maasina Fulfulde 'frc': ('frc', 'Latn', 'US'), # Cajun French 'frr': ('frr', 'Latn', 'DE'), # Northern Frisian 'frs': ('frs', 'Latn', 'DE'), # Eastern Frisian 'fud': ('fud', 'Latn', 'WF'), # East Futuna 'fuq': ('fuq', 'Latn', 'NE'), # Central-Eastern Niger Fulfulde 'fuv': ('fuv', 'Latn', 'NG'), # Nigerian Fulfulde 'gan': ('gan', 'Hans', 'CN'), # Gan Chinese 'gay': ('gay', 'Latn', 'ID'), # Gayo 'gba': ('gba', 'Latn', 'CF'), # Gbaya 'gbz': ('gbz', 'Arab', 'IR'), # Zoroastrian Dari 'gld': ('gld', 'Cyrl', 'RU'), # Nanai 'gom': ('gom', 'Deva', 'IN'), # Goan Konkani 'got': ('got', 'Goth', 'ZZ'), # Gothic 'grb': ('grb', 'Latn', 'LR'), # Grebo 'grc': ('grc', 'Grek', 'ZZ'), # Ancient Greek 'guc': ('guc', 'Latn', 'CO'), # Wayuu 'gur': ('gur', 'Latn', 'GH'), # Frafra 'hai': ('hai', 'Latn', 'CA'), # Haida 'hak': ('hak', 'Hant', 'CN'), # Hakka Chinese 'haz': ('haz', 'Arab', 'AF'), # Hazaragi 'hif': ('hif', 'Deva', 'FJ'), # Fiji Hindi 'hit': ('hit', 'Xsux', 'ZZ'), # Hittite 'hmd': ('hmd', 'Plrd', 'CN'), # A-Hmao 'hmn': ('hmn', 'Latn', 'CN'), # Hmong 'hnj': ('hnj', 'Latn', 'LA'), # Hmong Njua 'hno': ('hno', 'Arab', 'PK'), # Northern Hindko 'hop': ('hop', 'Latn', 'US'), # Hopi 'hsn': ('hsn', 'Hans', 'CN'), # Xiang Chinese 'hup': ('hup', 'Latn', 'US'), # Hupa 'hz': ('hz', 'Latn', 'NA'), # Herero 'iba': ('iba', 'Latn', 'MY'), # Iban 'ikt': ('ikt', 'Latn', 'CA'), # Inuinnaqtun 'izh': ('izh', 'Latn', 'RU'), # Ingrian 'jam': ('jam', 'Latn', 'JM'), # Jamaican Creole English 'jpr': ('jpr', 'Hebr', 'IL'), # Judeo-Persian 'jrb': ('jrb', 'Hebr', 'IL'), # Jedeo-Arabic 'jut': ('jut', 'Latn', 'DK'), # Jutish 'kac': ('kac', 'Latn', 'MM'), # Kachin 'kca': ('kca', 'Cyrl', 'RU'), # Khanty 'kfy': ('kfy', 'Deva', 'IN'), # Kumaoni 'kjh': ('kjh', 'Cyrl', 'RU'), # Khakas 'kkh': ('kkh', 'Lana', 'MM'), # Khün 'khn': ('khn', 'Deva', 'IN'), # Khandesi 'kiu': ('kiu', 'Latn', 'TR'), # Kirmanjki 'kpy': ('kpy', 'Cyrl', 'RU'), # Koryak 'kr': ('kr', 'Arab', 'NG'), # Kanuri 'krj': ('krj', 'Latn', 'PH'), # Kinaray-a 'kut': ('kut', 'Latn', 'CA'), # Kutenai 'kxm': ('kxm', 'Thai', 'TH'), # Northern Khmer 'kyu': ('kyu', 'Kali', 'MM'), # Western Kayah 'lab': ('lab', 'Lina', 'ZZ'), # Linear A 'lad': ('lad', 'Latn', 'IL'), # Ladino 'lam': ('lam', 'Latn', 'ZM'), # Lamba 'laj': ('laj', 'Latn', 'UG'), # Lango 'lfn': ('lfn', 'Latn', '001'), # Lingua Franca Nova 'lij': ('lij', 'Latn', 'IT'), # Ligurian 'liv': ('liv', 'Latn', 'LV'), # Livonian 'ljp': ('ljp', 'Latn', 'ID'), # Lampung Api 'lrc': ('lrc', 'Arab', 'IR'), # Northern Luri 'ltg': ('ltg', 'Latn', 'LV'), # Latgalian 'lui': ('lui', 'Latn', 'US'), # Luiseno 'lun': ('lun', 'Latn', 'ZM'), # Lunda 'lus': ('lus', 'Latn', 'IN'), # Mizo 'lut': ('lut', 'Latn', 'US'), # Lushootseed 'lzh': ('lzh', 'Hant', 'CN'), # Literary Chinese 'lzz': ('lzz', 'Latn', 'TR'), # Laz 'mdt': ('mdt', 'Latn', 'CG'), # Mbere 'mfa': ('mfa', 'Arab', 'TH'), # Pattani Malay 'mic': ('mic', 'Latn', 'CA'), # Micmac 'mnc': ('mnc', 'Mong', 'CN'), # Manchu 'mns': ('mns', 'Cyrl', 'RU'), # Mansi 'mro': ('mro', 'Mroo', 'BD'), # Mru (dlf, also Latn?) 'mtr': ('mtr', 'Deva', 'IN'), # Mewari 'mus': ('mus', 'Latn', 'US'), # Creek 'mwl': ('mwl', 'Latn', 'PT'), # Mirandese 'mwv': ('mwv', 'Latn', 'ID'), # Mentawai 'myx': ('myx', 'Latn', 'UG'), # Masaaba 'myz': ('myz', 'Mand', 'ZZ'), # Classical Mandaic 'mzn': ('mzn', 'Arab', 'IR'), # Mazanderani 'nan': ('nan', 'Latn', 'CN'), # Min Nan Chinese 'ndc': ('ndc', 'Latn', 'MZ'), # Ndau 'ngl': ('ngl', 'Latn', 'MZ'), # Lomwe 'nia': ('nia', 'Latn', 'ID'), # Nias 'njo': ('njo', 'Latn', 'IN'), # Ao Naga 'noe': ('noe', 'Deva', 'IN'), # Nimadi 'nog': ('nog', 'Cyrl', 'RU'), # Nogai 'non': ('non', 'Runr', 'ZZ'), # Old Norse 'nov': ('nov', 'Latn', '001'), # Novial 'nyo': ('nyo', 'Latn', 'UG'), # Nyoro 'nzi': ('nzi', 'Latn', 'GH'), # Nzima 'ohu': ('ohu', 'Hung', 'HR'), # Old Hungarian 'oj': ('oj', 'Latn', 'CA'), # Ojibwa 'osa': ('osa', 'Latn', 'US'), # Osage 'osc': ('osc', 'Ital', 'ZZ'), # Oscan 'otk': ('otk', 'Orkh', 'ZZ'), # Old Turkish 'pal': ('pal', 'Phli', 'ZZ'), # Pahlavi FIXME: should really be 'Phlv' 'pcd': ('pcd', 'Latn', 'FR'), # Picard 'pdc': ('pdc', 'Latn', 'US'), # Pennsylvania German 'pdt': ('pdt', 'Latn', 'CA'), # Plautdietsch 'peo': ('peo', 'Xpeo', 'ZZ'), # Old Persian 'pfl': ('pfl', 'Latn', 'DE'), # Palatine German 'phn': ('phn', 'Phnx', 'ZZ'), # Phoenician 'pi': ('pi', 'Brah', 'ZZ'), # Pali 'pms': ('pms', 'Latn', 'IT'), # Piedmontese 'pnt': ('pnt', 'Grek', 'GR'), # Pontic 'prs': ('prs', 'Arab', 'AF'), # Dari 'qug': ('qug', 'Latn', 'EC'), # Chimborazo Highland Quichua 'rom': ('rom', 'Latn', 'RO'), # Romany 'sck': ('sck', 'Deva', 'IN'), # Sadri 'skr': ('skr', 'Arab', 'PK'), # Seraiki 'sou': ('sou', 'Thai', 'TH'), # Southern Thai 'swv': ('swv', 'Deva', 'IN'), # Shekhawati 'tab': ('tab', 'Cyrl', 'RU'), # Tabassaran (dlf) 'ude': ('ude', 'Cyrl', 'RU'), # Udihe (dlf) 'uga': ('uga', 'Ugar', 'ZZ'), # Ugaritic 'vep': ('vep', 'Latn', 'RU'), # Veps 'vmw': ('vmw', 'Latn', 'MZ'), # Makhuwa 'wbr': ('wbr', 'Deva', 'IN'), # Wagdi 'wbq': ('wbq', 'Telu', 'IN'), # Waddar 'wls': ('wls', 'Latn', 'WF'), # Wallisian 'wtm': ('wtm', 'Deva', 'IN'), # Mewati 'yrk': ('yrk', 'Cyrl', 'RU'), # Nenets (dlf) 'xnr': ('xnr', 'Deva', 'IN'), # Kangri 'xum': ('xum', 'Ital', 'ZZ'), # Umbrian (dlf) 'zdj': ('zdj', 'Arab', 'KM'), # Ngazidja Comorian 'und-Mult': ('skr', 'Mult', 'ZZ'), # ancient writing system for Saraiki, # Arabic now used 'und-Hung': ('ohu', 'Hung', 'ZZ'), # Old Hungarian, Carpathian basin 'und-Hluw': ('hlu', 'Hluw', 'ZZ'), # Hieroglyphic Luwian 'und-Ahom': ('aho', 'Ahom', 'ZZ'), # Ahom } ENGLISH_SCRIPT_NAMES = { 'Cans': u'Canadian Aboriginal', # shorten name for display purposes, #match Noto font name } ENGLISH_LANGUAGE_NAMES = { 'abr': u'Abron', 'abq': u'Abaza', 'aho': u'Ahom', 'aii': u'Assyrian Neo-Aramaic', 'akz': u'Alabama', 'amo': u'Amo', 'aoz': u'Uab Meto', 'atj': u'Atikamekw', 'bap': u'Bantawa', 'bci': u'Baoulé', 'ber': u'Berber', 'bft': u'Balti', 'bfy': u'Bagheli', 'bgc': u'Haryanvi', 'bgx': u'Balkan Gagauz Turkish', 'bh': u'Bihari', 'bhb': u'Bhili', 'bhi': u'Bhilali', 'bhk': u'Albay Bikol', 'bjj': u'Kanauji', 'bku': u'Buhid', 'blt': u'Tai Dam', 'bmq': u'Bomu', 'bqi': u'Bakhtiari', 'bqv': u'Koro Wachi', 'bsq': u'Bassa', 'bto': u'Rinconada Bikol', 'btv': u'Bateri', 'buc': u'Bushi', 'bvb': u'Bube', 'bya': u'Batak', 'bze': u'Jenaama Bozo', 'bzx': u'Kelengaxo Bozo', 'ccp': u'Chakma', 'cja': u'Western Cham', 'cjs': u'Shor', 'cjm': u'Eastern Cham', 'ckt': u'Chukchi', 'cpf': u'French-based Creoles', 'crj': u'Southern East Cree', 'crk': u'Plains Cree', 'crl': u'Northern East Cree', 'crm': u'Moose Cree', 'crs': u'Seselwa Creole French', 'csw': u'Swampy Cree', 'ctd': u'Tedim Chin', 'dcc': u'Deccan', 'dng': u'Dungan', 'dnj': u'Dan', 'dtm': u'Tomo Kan Dogon', 'eky': u'Eastern Kayah', 'ett': u'Etruscan', 'evn': u'Evenki', 'ffm': u'Maasina Fulfulde', 'fud': u'East Futuna', 'fuq': u'Central-Eastern Niger Fulfulde', 'fuv': u'Nigerian Fulfulde', 'gbm': u'Garhwali', 'gcr': u'Guianese Creole French', 'ggn': u'Eastern Gurung', 'gjk': u'Kachi Koli', 'gju': u'Gujari', 'gld': u'Nanai', 'gos': u'Gronings', 'grt': u'Garo', 'gub': u'Guajajára', 'gvr': u'Western Gurung', 'haz': u'Hazaragi', 'hlu': u'Hieroglyphic Luwian', 'hmd': u'A-Hmao', 'hnd': u'Southern Hindko', 'hne': u'Chhattisgarhi', 'hnj': u'Hmong Njua', 'hnn': u'Hanunoo', 'hno': u'Northern Hindko', 'hoc': u'Ho', 'hoj': u'Haroti', 'hop': u'Hopi', 'ikt': u'Inuinnaqtun', 'jml': u'Jumli', 'kao': u'Xaasongaxango', 'kca': u'Khanty', 'kck': u'Kalanga', 'kdt': u'Kuy', 'kfr': u'Kachchi', 'kfy': u'Kumaoni', 'kge': u'Komering', 'khb': u'Lü', 'khn': u'Khandesi', 'kht': u'Khamti', 'kjg': u'Khmu', 'kjh': u'Khakas', 'kkh': u'Khün', 'kpy': u'Koryak', 'kvr': u'Kerinci', 'kvx': u'Parkari Koli', 'kxm': u'Northern Khmer', 'kxp': u'Wadiyara Koli', 'kyu': u'Western Kayah', 'lab': u'Linear A', 'laj': u'Lango', 'lbe': u'Lak', 'lbw': u'Tolaki', 'lcp': u'Western Lawa', 'lep': u'Lepcha', 'lif': u'Limbu', 'lis': u'Lisu', 'ljp': u'Lampung Api', 'lki': u'Laki', 'lmn': u'Lambadi', 'lrc': u'Northern Luri', 'lut': u'Lushootseed', 'luz': u'Southern Luri', 'lwl': u'Eastern Lawa', 'maz': u'Central Mazahua', 'mdh': u'Maguindanaon', 'mdt': u'Mbere', 'mfa': u'Pattani Malay', 'mgp': u'Eastern Magar', 'mgy': u'Mbunga', 'mns': u'Mansi', 'mnw': u'Mon', 'moe': u'Montagnais', 'mrd': u'Western Magar', 'mro': u'Mru', 'mru': u'Cameroon Mono', 'mtr': u'Mewari', 'mvy': u'Indus Kohistani', 'mwk': u'Kita Maninkakan', 'mxc': u'Manyika', 'myx': u'Masaaba', 'myz': u'Classical Mandaic', 'nch': u'Central Huasteca Nahuatl', 'ndc': u'Ndau', 'ngl': u'Lomwe', 'nhe': u'Eastern Huasteca Nahuatl', 'nhw': u'Western Huasteca Nahuatl', 'nij': u'Ngaju', 'nod': u'Northern Thai', 'noe': u'Nimadi', 'nsk': u'Naskapi', 'nxq': u'Naxi', 'ohu': u'Old Hungarian', 'osc': u'Oscan', 'otk': u'Old Turkish', 'pcm': u'Nigerian Pidgin', 'pka': u'Ardhamāgadhī Prākrit', 'pko': u'Pökoot', 'pra': u'Prakrit', # language family name 'prd': u'Parsi-Dari', 'prs': u'Dari', 'puu': u'Punu', 'rcf': u'Réunion Creole French', 'rej': u'Rejang', 'ria': u'Riang', # (India) 'rjs': u'Rajbanshi', 'rkt': u'Rangpuri', 'rmf': u'Kalo Finnish Romani', 'rmo': u'Sinte Romani', 'rmt': u'Domari', 'rmu': u'Tavringer Romani', 'rng': u'Ronga', 'rob': u'Tae’', 'ryu': u'Central Okinawan', 'saf': u'Safaliba', 'sck': u'Sadri', 'scs': u'North Slavey', 'sdh': u'Southern Kurdish', 'sef': u'Cebaara Senoufo', 'skr': u'Seraiki', 'smp': u'Samaritan', 'sou': u'Southern Thai', 'srb': u'Sora', 'srx': u'Sirmauri', 'swv': u'Shekhawati', 'sxn': u'Sangir', 'syi': u'Seki', 'syl': u'Sylheti', 'tab': u'Tabassaran', 'taj': u'Eastern Tamang', 'tbw': u'Tagbanwa', 'tdd': u'Tai Nüa', 'tdg': u'Western Tamang', 'tdh': u'Thulung', 'thl': u'Dangaura Tharu', 'thq': u'Kochila Tharu', 'thr': u'Rana Tharu', 'tkt': u'Kathoriya Tharu', 'tli': u'Tlingit', 'tsf': u'Southwestern Tamang', 'tsg': u'Tausug', 'tsj': u'Tshangla', 'ttj': u'Tooro', 'tts': u'Northeastern Thai', 'ude': u'Udihe', 'uli': u'Ulithian', 'unr': u'Mundari', 'unx': u'Munda', 'vic': u'Virgin Islands Creole English', 'vmw': u'Makhu', 'wbr': u'Wagdi', 'wbq': u'Waddar', 'wls': u'Wallisian', 'wtm': u'Mewati', 'xav': u'Xavánte', 'xcr': u'Carian', 'xlc': u'Lycian', 'xld': u'Lydian', 'xmn': u'Manichaean Middle Persian', 'xmr': u'Meroitic', 'xna': u'Ancient North Arabian', 'xnr': u'Kangri', 'xpr': u'Parthian', 'xsa': u'Sabaean', 'xsr': u'Sherpa', 'xum': u'Umbrian', 'yrk': u'Nenets', 'yua': u'Yucatec Maya', 'zdj': u'Ngazidja Comorian', 'zmi': u'Negeri Sembilan Malay', } # Supplement mapping of languages to scripts LANG_TO_SCRIPTS = { 'ber': ['Arab', 'Latn', 'Tfng'], 'hak': ['Hans', 'Hant', 'Latn'], 'nan': ['Hans', 'Hant', 'Latn'], 'yue': ['Hant'], } # Supplement mapping of regions to lang_scripts REGION_TO_LANG_SCRIPTS = { 'CN': ['hak-Hans', 'hak-Latn', 'nan-Hans', 'nan-Latn', 'yue-Hans'], 'HK': ['yue-Hant'], 'MN': ['mn-Mong'], 'MY': ['zh-Hans'], 'TW': ['hak-Hant', 'hak-Latn', 'nan-Hant', 'nan-Latn'], } PARENT_LOCALES = { 'ky-Latn': 'root', 'sd-Deva': 'root', 'tg-Arab': 'root', 'ug-Cyrl': 'root', } NATIVE_NAMES = { 'mn-Mong': u'ᠮᠣᠨᠭᠭᠣᠯ ᠬᠡᠯᠡ', } EXEMPLARS = { 'und-Avst': r'[\U010b00-\U010b35]', 'und-Bali': r'[\u1b05-\u1b33]', 'und-Bamu': r'[\ua6a0-\ua6ef]', 'und-Cham': r'[\uaa00-\uaa28 \uaa50-\uaa59]', 'und-Copt': r'[\u2c80-\u2cb1]', 'und-Egyp': r'[\U013000-\U01303f]', 'und-Hira': r'[\u3041-\u3096\u3099-\u309f\U01b000-\U01b001]', 'und-Java': r'[\ua984-\ua9b2]', 'und-Kali': r'[\ua90a-\ua925 \ua900-\ua909]', 'und-Kana': r'[\u30a0-\u30ff \u31f0-\u31ff]', 'und-Khar': r'[\U010a10-\U010a13\U010a15-\U010a17\U010a19-\U010a33' r'\U010A38-\U010a3a]', 'und-Kthi': r'[\U11080-\U110C1]', 'und-Lana': r'[\u1a20-\u1a4c]', 'und-Lepc': r'[\u1c00-\u1c23]', 'und-Linb': r'[\U010000-\U01000b \U010080-\U01009f]', 'und-Mand': r'[\u0840-\u0858]', 'und-Mtei': r'[\uabc0-\uabe2]', 'und-Orkh': r'[\U010c00-\U010c48]', 'und-Phag': r'[\ua840-\ua877]', 'und-Saur': r'[\ua882-\ua8b3]', 'und-Sund': r'[\u1b83-\u1ba0]', 'und-Sylo': r'[\ua800-\ua82b]', 'und-Tavt': r'[\uaa80-\uaaaf \uaadb-\uaadf]', 'und-Tglg': r'[\u1700-\u170c \u170e-\u1711]', 'und-Ugar': r'[\U010380-\U01039d \U01039f]', 'und-Xsux': r'[\U012000-\U01202f]', 'und-Zmth': r'[\U010AC0-\U010AE6 \U010AEB-\U010AF6]', 'und-Zsye': r'[\u2049\u231a\u231b\u2600\u260e\u2614\u2615\u26fa\u2708\u2709' r'\u270f\u3297\U01f004\U01f170\U01f193\U01f197\U01f30d\U01f318' r'\U01f332\U01f334\U01f335\U01f344\U01f346\U01f352\U01f381' r'\U01f393\U01f3a7\U01f3b8\U01f3e1\U01f402\U01f40a\U01f418' r'\U01f419\U01f41b\U01f41f\U01f422\U01f424\U01f427\U01f44c' r'\U01f44d\U01f453\U01f463\U01f4bb\U01f4ce\U01f4d3\U01f4d6' r'\U01f4e1\U01f4fb\U01f511\U01f525\U01f565\U01F63a\U01f680' r'\U01f681\U01f683\U01f686\U01f68c\U01f6a2\U01f6a3\U01f6b4]', 'und-Zsym': r'[\u20ac\u20b9\u2103\u2109\u2115\u2116\u211a\u211e\u2122' r'\u21d0-\u21d3\u2203\u2205\u2207\u2208\u220f\u221e\u2248\u2284' r'\u231a\u23e3\u23f3\u2400\u2460\u24b6\u2523\u2533\u2602\u260e' r'\u2615\u261c\u2637\u263a\u264f\u2656\u2663\u266b' r'\u267b\u267f\u26f9\u2708\u2740\u2762\u2a6b\u2a93\u2a64\u2e19' r'\u4dc3\U010137\U01017B\U0101ef\U01d122\U01d15e\U01d161]' }
rawrgulmuffins/flask
refs/heads/master
flask/exthook.py
142
# -*- coding: utf-8 -*- """ flask.exthook ~~~~~~~~~~~~~ Redirect imports for extensions. This module basically makes it possible for us to transition from flaskext.foo to flask_foo without having to force all extensions to upgrade at the same time. When a user does ``from flask.ext.foo import bar`` it will attempt to import ``from flask_foo import bar`` first and when that fails it will try to import ``from flaskext.foo import bar``. We're switching from namespace packages because it was just too painful for everybody involved. This is used by `flask.ext`. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import sys import os from ._compat import reraise class ExtensionImporter(object): """This importer redirects imports from this submodule to other locations. This makes it possible to transition from the old flaskext.name to the newer flask_name without people having a hard time. """ def __init__(self, module_choices, wrapper_module): self.module_choices = module_choices self.wrapper_module = wrapper_module self.prefix = wrapper_module + '.' self.prefix_cutoff = wrapper_module.count('.') + 1 def __eq__(self, other): return self.__class__.__module__ == other.__class__.__module__ and \ self.__class__.__name__ == other.__class__.__name__ and \ self.wrapper_module == other.wrapper_module and \ self.module_choices == other.module_choices def __ne__(self, other): return not self.__eq__(other) def install(self): sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self] def find_module(self, fullname, path=None): if fullname.startswith(self.prefix): return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff] for path in self.module_choices: realname = path % modname try: __import__(realname) except ImportError: exc_type, exc_value, tb = sys.exc_info() # since we only establish the entry in sys.modules at the # very this seems to be redundant, but if recursive imports # happen we will call into the move import a second time. # On the second invocation we still don't have an entry for # fullname in sys.modules, but we will end up with the same # fake module name and that import will succeed since this # one already has a temporary entry in the modules dict. # Since this one "succeeded" temporarily that second # invocation now will have created a fullname entry in # sys.modules which we have to kill. sys.modules.pop(fullname, None) # If it's an important traceback we reraise it, otherwise # we swallow it and try the next choice. The skipped frame # is the one from __import__ above which we don't care about if self.is_important_traceback(realname, tb): reraise(exc_type, exc_value, tb.tb_next) continue module = sys.modules[fullname] = sys.modules[realname] if '.' not in modname: setattr(sys.modules[self.wrapper_module], modname, module) return module raise ImportError('No module named %s' % fullname) def is_important_traceback(self, important_module, tb): """Walks a traceback's frames and checks if any of the frames originated in the given important module. If that is the case then we were able to import the module itself but apparently something went wrong when the module was imported. (Eg: import of an import failed). """ while tb is not None: if self.is_important_frame(important_module, tb): return True tb = tb.tb_next return False def is_important_frame(self, important_module, tb): """Checks a single frame if it's important.""" g = tb.tb_frame.f_globals if '__name__' not in g: return False module_name = g['__name__'] # Python 2.7 Behavior. Modules are cleaned up late so the # name shows up properly here. Success! if module_name == important_module: return True # Some python versions will will clean up modules so early that the # module name at that point is no longer set. Try guessing from # the filename then. filename = os.path.abspath(tb.tb_frame.f_code.co_filename) test_string = os.path.sep + important_module.replace('.', os.path.sep) return test_string + '.py' in filename or \ test_string + os.path.sep + '__init__.py' in filename
prataprc/tayra
refs/heads/master
tayra/runtime.py
1
# -*- coding: utf-8 -*- # This file is subject to the terms and conditions defined in # file 'LICENSE', which is part of this source code package. # Copyright (c) 2011 R Pratap Chakravarthy """ Run-time engine, a stack machine interpreting instructions generated by :class:`tayra.codegen.InstrGen`. Instructions are nothing but methods on the stack machine object. References automatically made available in a template script. ------------------------------------------------------------- ``_m``, Reference to :class:`StackMachine` instance used to generate the final HTML text. ``this``, Every template script can be viewed as an object instance which can be referenced using ``this``. In case of template scripts making use of inheritance feature, ``this`` will always refer to the template script at the end of the inheritance chain. ``local``, For non-inheriting template scripts ``this`` and ``local`` refer to the same object. In case of template scripts using inheritance feature, unlike ``this`` symbol which refers to the template script at the end of the inheritance chain, ``local`` will always refer to the template script object in which it is used. ``parent``, In case of inheriting scripts, ``parent`` will refer to the base template from which ``local`` template script derives. ``next``, In case of inheriting scripts, ``next`` will refer to the deriving template script. All names ``this``, ``local``, ``parent``, ``next`` refer to the same type of object, template-module. Having a refence to template-module allows developers to access global variables and functions defined in the module. In case if a template script is part of an inheritance chain, then, attribute references on the template-module will propogate towards the top of the chain until an attribute is found in one of the base module. ``_compiler``, Refers to :class:`tayra.compiler.TTLCompiler` plugin instance. ``_context``, Refers to the context dictionary. ``_ttlhash``, Refers to the hash value generated from template text. ``_ttlfile``, File name of template script. ``__compiler``, Temporary variable that is optionally created for compiling and importing library templates. Refers to a :class:`tayra.compiler.TTLCompiler` plugin instance. ``__ttlcode``, Temporary variable that is optionally created while importing library templates. ``<Implemented-interfaces>``, When a template script implements an interface, interface name and plugin name will also be made available in the global context. Other than this every template script will have the following standard imports .. code-block :: python :linenos: import imp from io import StringIO from pluggdapps.plugin import Plugin, implements import pluggdapps.utils as h from tayra import BaseTTLPlugin """ import re import pluggdapps.utils as h from tayra.lexer import TTLLexer from tayra.interfaces import ITayraTags, ITayraExpression, ITayraFilterBlock __traceback_hide__ = True class StackMachine( object ) : """Stack machine instruction interpreterr. Intructions are method calls on this object. Supported instructions are,:: indent, append, extend, pushbuf, popbuf, popbuftext, popobject, handletag, evalexprs, inherit Initializing a stack machine can be a costly operation, so to avoid creating a stack machine object everytime, create it once and call :meth:`_init` before interpreting a new template-module. """ def __init__( self, compiler ): self.compiler = compiler self.encoding = compiler.encoding self.tagplugins = [ compiler.qp( ITayraTags, name ) for name in compiler['tag.plugins'] ] self.tagplugins.append( compiler.qp( ITayraTags, 'tayra.Tags' )) self.exprplugins = { p.caname.split('.', 1)[1] : p for p in compiler.qps(ITayraExpression) } self.exprdefault = compiler.qp( ITayraExpression, compiler['expression.default'] ) def _init( self, ifile ): self.bufstack = [ [] ] self.ifile = ifile self.htmlindent = '' #---- Stack machine instructions def indent( self ) : """Add indentation to output HTML text.""" return self.append( self.htmlindent ) def upindent( self, up='' ) : """Increase the indentation level for subsequent HTML text by ``up`` spaces.""" self.htmlindent += up return self.htmlindent def downindent( self, down='' ) : """Decrease the indentation level for subsequent HTML text by ``down`` spaces.""" self.htmlindent = self.htmlindent[:-len(down)] return self.htmlindent def append( self, value ) : """Append ``value`` string to recently pushed buffer in the buffer stack.""" self.bufstack[-1].append( value ) return value def extend( self, value ) : """Extend recently pushed buffer in the buffer stack using ``value`` list.""" if isinstance(value, list) : self.bufstack[-1].extend( value ) else : raise Exception( 'Unable to extend context stack' ) def pushbuf( self, buf=None ) : """Push an empty buffer into the buffer stack.""" buf = [] self.bufstack.append( buf ) return buf def popbuf( self ) : """Pop the recently pushed buffer in the buffer stack and return the same. Return type is a list of strings.""" return self.bufstack.pop(-1) def popbuftext( self ) : """Same as :meth:`popbuf` and additionaly join all the string elements in the buffer and return the same. Return type is string.""" x = ''.join( self.popbuf() ) return x def popobject( self ): """Return the first element from recently pushed buffer in the buffer stack. Discard the remaining buffer. If recently pushed buffer is empty, return None.""" lst = self.popbuf() return lst[0] if lst else None regex_tag = re.compile( r'(\{[^\}]*\})|(%s=%s)|(%s)|([^ \t\r\n]+)' % ( TTLLexer.attrname, TTLLexer.attrvalue, TTLLexer.attrvalue )) def handletag( self, contents, tagbegin, **kwargs ): """Parse ``tagbegin`` for tokens, style and attribute. Along with them add ``content`` to pass them to tag-plugin handlers. :meth:`append` the html text returned by the handler into the buffer stack. There are two types of pruning, inner-prunning (specified by <... !>) and outer-prunning (specified by <! ...>). Pruning modifiers are detected by the AST and passed as part of modifier dictionary. optional key-word arguments, ``indent``, Boolean to keep indentation in output html. ``nl``, String. ``iprune``, Boolean, if True, should remove all whitespaces before and after the content enclosed by this tag. ``oprune``, Boolean, if True, should remove all leading and trailining whitespaces around this tag element. """ indent = kwargs.get('indent', False) nl = kwargs.get('nl', '') if kwargs.get('iprune',False) : contents = self.iprune(contents) if kwargs.get('oprune', False) : contents = contents tagbegin = tagbegin.replace('\n', ' ')[1:-1] # remove < and > if tagbegin[-1] == '/' : # Self closing tag if contents.strip('\n \t\r') : self.compiler.pa.logwarn( "skipping content after self closing tag %r" % contents ) contents = '' tagbegin = tagbegin[:-1] try : tagname, tagbegin = tagbegin.split(' ', 1) except : tagname, tagbegin = tagbegin, '' styles, attributes, tokens = [], [], [] for m in self.regex_tag.finditer( tagbegin ) : if not m : continue parts = m.groups() parts[0] and styles.append( parts[0][1:-1].strip() ) parts[1] and attributes.append( parts[1] ) tokens.extend( parts[2:] ) tokens = list( filter( None, tokens )) for plugin in self.tagplugins : html = plugin.handle( self, tagname, tokens, styles, attributes, contents ) if html == None : continue self.append( html ) break else : raise Exception("Unable to handle tag %r" % tagname) def evalexprs( self, name, text, filters, globals_, locals_ ) : """Evaluate expression ``text`` in ``globals_`` and ``locals_`` context using plugin prefix in ``text`` or using the TTLCompiler['expression.default'] plugin. Convert the resulting object to string, pipe them to the list of filters specified by ``filters``. ``filters`` is comma separated value of escape filters to be applied on the output string. """ exprp=self.exprplugins['expression'+name] if name else self.exprdefault out = exprp.eval( self, text, globals_, locals_ ) if not filters : return out filters = h.parsecsv( filters ) if 'n' in filters : return out # TODO : if exprdefault and exprp are same plugin, we might end up # calling it twice. Try to improve the performance. for f in filters : out1 = exprp.filter( self, f, out ) if out1 == None : out1 = self.exprdefault.filter( self, f, out ) if out1 != None : out = out1 break return out def inherit( self, ttlloc, childglobals ): """Special instruction used by @inherit directive.""" compiler = self.compiler() code = compiler.compilettl( file=ttlloc ) # inherit module parent_context = childglobals['_context'] parent_context.update({ 'this' : childglobals['this'], 'parent' : None, 'next' : childglobals['local'], }) module = compiler.load( code, context=parent_context ) childglobals['this']._linkparent( Namespace( None, module )) childglobals['local'].parent = module return module def hitch( self, obj, cls, interfacefunc, *args, **kwargs ) : def fnhitched( self, *a, **kw ) : kwargs.update( kw ) return interfacefunc( self, *(args+a), **kwargs ) return fnhitched.__get__( obj, cls ) def iprune(self, contents): return contents.strip(' \n\r\t\f') class Namespace( object ): """Namespace wrapper for template-modules to traverse the interitance chain for referred attributes.""" def __init__( self, parentnm, localmod ): self._parentnm = parentnm self._localmod = localmod def __getattr__( self, name ): if self._parentnm : return getattr( self._localmod, name, getattr( self._parentnm, name, None ) ) else : return getattr( self._localmod, name, None ) def __setattr__( self, name, value ): if name in [ '_parentnm', '_localmod' ] : self.__dict__[name] = value else : setattr( self._localmod, name, value ) return value def _linkparent( self, parentnm ): nm, parnm = self, self._parentnm while parnm : nm, parnm = parnm, parnm._parentnm nm._parentnm = parentnm return parentnm
alphafoobar/intellij-community
refs/heads/master
python/helpers/pydev/pydevd_plugins/django_debug.py
51
from pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK import inspect from pydevd_constants import STATE_SUSPEND, GetThreadId, DictContains, DictIterItems from pydevd_file_utils import NormFileToServer, GetFileNameAndBaseFromFile from pydevd_breakpoints import LineBreakpoint, get_exception_name import pydevd_vars import traceback import pydev_log from pydevd_frame_utils import add_exception_to_frame, FCode, cached_call, just_raised DJANGO_SUSPEND = 2 class DjangoLineBreakpoint(LineBreakpoint): def __init__(self, file, line, condition, func_name, expression): self.file = file LineBreakpoint.__init__(self, line, condition, func_name, expression) def is_triggered(self, template_frame_file, template_frame_line): return self.file == template_frame_file and self.line == template_frame_line def __str__(self): return "DjangoLineBreakpoint: %s-%d" %(self.file, self.line) def add_line_breakpoint(plugin, pydb, type, file, line, condition, expression, func_name): if type == 'django-line': breakpoint = DjangoLineBreakpoint(file, line, condition, func_name, expression) if not hasattr(pydb, 'django_breakpoints'): _init_plugin_breaks(pydb) return breakpoint, pydb.django_breakpoints return None def add_exception_breakpoint(plugin, pydb, type, exception): if type == 'django': if not hasattr(pydb, 'django_exception_break'): _init_plugin_breaks(pydb) pydb.django_exception_break[exception] = True pydb.setTracingForUntracedContexts() return True return False def _init_plugin_breaks(pydb): pydb.django_exception_break = {} pydb.django_breakpoints = {} def remove_exception_breakpoint(plugin, pydb, type, exception): if type == 'django': try: del pydb.django_exception_break[exception] return True except: pass return False def get_breakpoints(plugin, pydb, type): if type == 'django-line': return pydb.django_breakpoints return None def _inherits(cls, *names): if cls.__name__ in names: return True inherits_node = False for base in inspect.getmro(cls): if base.__name__ in names: inherits_node = True break return inherits_node def _is_django_render_call(frame): try: name = frame.f_code.co_name if name != 'render': return False if not DictContains(frame.f_locals, 'self'): return False cls = frame.f_locals['self'].__class__ inherits_node = _inherits(cls, 'Node') if not inherits_node: return False clsname = cls.__name__ return clsname != 'TextNode' and clsname != 'NodeList' except: traceback.print_exc() return False def _is_django_context_get_call(frame): try: if not DictContains(frame.f_locals, 'self'): return False cls = frame.f_locals['self'].__class__ return _inherits(cls, 'BaseContext') except: traceback.print_exc() return False def _is_django_resolve_call(frame): try: name = frame.f_code.co_name if name != '_resolve_lookup': return False if not DictContains(frame.f_locals, 'self'): return False cls = frame.f_locals['self'].__class__ clsname = cls.__name__ return clsname == 'Variable' except: traceback.print_exc() return False def _is_django_suspended(thread): return thread.additionalInfo.suspend_type == DJANGO_SUSPEND def suspend_django(mainDebugger, thread, frame, cmd=CMD_SET_BREAK): frame = DjangoTemplateFrame(frame) if frame.f_lineno is None: return None #try: # if thread.additionalInfo.filename == frame.f_code.co_filename and thread.additionalInfo.line == frame.f_lineno: # return None # don't stay twice on the same line #except AttributeError: # pass pydevd_vars.addAdditionalFrameById(GetThreadId(thread), {id(frame): frame}) mainDebugger.setSuspend(thread, cmd) thread.additionalInfo.suspend_type = DJANGO_SUSPEND thread.additionalInfo.filename = frame.f_code.co_filename thread.additionalInfo.line = frame.f_lineno return frame def _find_django_render_frame(frame): while frame is not None and not _is_django_render_call(frame): frame = frame.f_back return frame #======================================================================================================================= # Django Frame #======================================================================================================================= def _read_file(filename): f = open(filename, "r") s = f.read() f.close() return s def _offset_to_line_number(text, offset): curLine = 1 curOffset = 0 while curOffset < offset: if curOffset == len(text): return -1 c = text[curOffset] if c == '\n': curLine += 1 elif c == '\r': curLine += 1 if curOffset < len(text) and text[curOffset + 1] == '\n': curOffset += 1 curOffset += 1 return curLine def _get_source(frame): try: node = frame.f_locals['self'] if hasattr(node, 'source'): return node.source else: pydev_log.error_once("WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make " " django template breakpoints working") return None except: pydev_log.debug(traceback.format_exc()) return None def _get_template_file_name(frame): try: source = _get_source(frame) if source is None: pydev_log.debug("Source is None\n") return None fname = source[0].name if fname == '<unknown source>': pydev_log.debug("Source name is %s\n" % fname) return None else: filename, base = GetFileNameAndBaseFromFile(fname) return filename except: pydev_log.debug(traceback.format_exc()) return None def _get_template_line(frame): source = _get_source(frame) file_name = _get_template_file_name(frame) try: return _offset_to_line_number(_read_file(file_name), source[1][0]) except: return None class DjangoTemplateFrame: def __init__(self, frame): file_name = _get_template_file_name(frame) self.back_context = frame.f_locals['context'] self.f_code = FCode('Django Template', file_name) self.f_lineno = _get_template_line(frame) self.f_back = frame self.f_globals = {} self.f_locals = self.collect_context(self.back_context) self.f_trace = None def collect_context(self, context): res = {} try: for d in context.dicts: for k, v in d.items(): res[k] = v except AttributeError: pass return res def changeVariable(self, name, value): for d in self.back_context.dicts: for k, v in d.items(): if k == name: d[k] = value def change_variable(plugin, frame, attr, expression): if isinstance(frame, DjangoTemplateFrame): result = eval(expression, frame.f_globals, frame.f_locals) frame.changeVariable(attr, result) return result return False def _is_django_exception_break_context(frame): try: name = frame.f_code.co_name except: name = None return name in ['_resolve_lookup', 'find_template'] #======================================================================================================================= # Django Step Commands #======================================================================================================================= def can_not_skip(plugin, mainDebugger, pydb_frame, frame): if mainDebugger.django_breakpoints and _is_django_render_call(frame): filename = _get_template_file_name(frame) django_breakpoints_for_file = mainDebugger.django_breakpoints.get(filename) if django_breakpoints_for_file: return True return False def has_exception_breaks(plugin): if len(plugin.main_debugger.django_exception_break) > 0: return True return False def has_line_breaks(plugin): for file, breakpoints in DictIterItems(plugin.main_debugger.django_breakpoints): if len(breakpoints) > 0: return True return False def cmd_step_into(plugin, mainDebugger, frame, event, args, stop_info, stop): mainDebugger, filename, info, thread = args plugin_stop = False if _is_django_suspended(thread): stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame) plugin_stop = stop_info['django_stop'] stop = stop and _is_django_resolve_call(frame.f_back) and not _is_django_context_get_call(frame) if stop: info.pydev_django_resolve_frame = 1 #we remember that we've go into python code from django rendering frame return stop, plugin_stop def cmd_step_over(plugin, mainDebugger, frame, event, args, stop_info, stop): mainDebugger, filename, info, thread = args plugin_stop = False if _is_django_suspended(thread): stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame) plugin_stop = stop_info['django_stop'] stop = False return stop, plugin_stop else: if event == 'return' and info.pydev_django_resolve_frame is not None and _is_django_resolve_call(frame.f_back): #we return to Django suspend mode and should not stop before django rendering frame info.pydev_step_stop = info.pydev_django_resolve_frame info.pydev_django_resolve_frame = None thread.additionalInfo.suspend_type = DJANGO_SUSPEND stop = info.pydev_step_stop is frame and event in ('line', 'return') return stop, plugin_stop def stop(plugin, mainDebugger, frame, event, args, stop_info, arg, step_cmd): mainDebugger, filename, info, thread = args if DictContains(stop_info, 'django_stop') and stop_info['django_stop']: frame = suspend_django(mainDebugger, thread, frame, step_cmd) if frame: mainDebugger.doWaitSuspend(thread, frame, event, arg) return True return False def get_breakpoint(plugin, mainDebugger, pydb_frame, frame, event, args): mainDebugger, filename, info, thread = args flag = False django_breakpoint = None new_frame = None type = 'django' if event == 'call' and info.pydev_state != STATE_SUSPEND and \ mainDebugger.django_breakpoints and _is_django_render_call(frame): filename = _get_template_file_name(frame) pydev_log.debug("Django is rendering a template: %s\n" % filename) django_breakpoints_for_file = mainDebugger.django_breakpoints.get(filename) if django_breakpoints_for_file: pydev_log.debug("Breakpoints for that file: %s\n" % django_breakpoints_for_file) template_line = _get_template_line(frame) pydev_log.debug("Tracing template line: %d\n" % template_line) if DictContains(django_breakpoints_for_file, template_line): django_breakpoint = django_breakpoints_for_file[template_line] flag = True new_frame = DjangoTemplateFrame(frame) return flag, django_breakpoint, new_frame, type def suspend(plugin, mainDebugger, thread, frame, bp_type): if bp_type == 'django': return suspend_django(mainDebugger, thread, frame) return None def exception_break(plugin, mainDebugger, pydb_frame, frame, args, arg): mainDebugger, filename, info, thread = args exception, value, trace = arg if mainDebugger.django_exception_break and \ get_exception_name(exception) in ['VariableDoesNotExist', 'TemplateDoesNotExist', 'TemplateSyntaxError'] and \ just_raised(trace) and _is_django_exception_break_context(frame): render_frame = _find_django_render_frame(frame) if render_frame: suspend_frame = suspend_django(mainDebugger, thread, render_frame, CMD_ADD_EXCEPTION_BREAK) if suspend_frame: add_exception_to_frame(suspend_frame, (exception, value, trace)) flag = True thread.additionalInfo.message = 'VariableDoesNotExist' suspend_frame.f_back = frame frame = suspend_frame return (flag, frame) return None
Darkmoth/python-django-4
refs/heads/master
Thing/env/Lib/site-packages/django/utils/dateparse.py
63
"""Functions to parse datetime objects.""" # We're using regular expressions rather than time.strptime because: # - They provide both validation and parsing. # - They're more flexible for datetimes. # - The date/datetime/time constructors produce friendlier error messages. import datetime import re from django.utils import six from django.utils.timezone import get_fixed_timezone, utc date_re = re.compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$' ) time_re = re.compile( r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?' ) datetime_re = re.compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})' r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?' r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$' ) standard_duration_re = re.compile( r'^' r'(?:(?P<days>-?\d+) )?' r'((?:(?P<hours>\d+):)(?=\d+:\d+))?' r'(?:(?P<minutes>\d+):)?' r'(?P<seconds>\d+)' r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?' r'$' ) # Support the sections of ISO 8601 date representation that are accepted by # timedelta iso8601_duration_re = re.compile( r'^P' r'(?:(?P<days>\d+(.\d+)?)D)?' r'(?:T' r'(?:(?P<hours>\d+(.\d+)?)H)?' r'(?:(?P<minutes>\d+(.\d+)?)M)?' r'(?:(?P<seconds>\d+(.\d+)?)S)?' r')?' r'$' ) def parse_date(value): """Parses a string and return a datetime.date. Raises ValueError if the input is well formatted but not a valid date. Returns None if the input isn't well formatted. """ match = date_re.match(value) if match: kw = {k: int(v) for k, v in six.iteritems(match.groupdict())} return datetime.date(**kw) def parse_time(value): """Parses a string and return a datetime.time. This function doesn't support time zone offsets. Raises ValueError if the input is well formatted but not a valid time. Returns None if the input isn't well formatted, in particular if it contains an offset. """ match = time_re.match(value) if match: kw = match.groupdict() if kw['microsecond']: kw['microsecond'] = kw['microsecond'].ljust(6, '0') kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None} return datetime.time(**kw) def parse_datetime(value): """Parses a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raises ValueError if the input is well formatted but not a valid datetime. Returns None if the input isn't well formatted. """ match = datetime_re.match(value) if match: kw = match.groupdict() if kw['microsecond']: kw['microsecond'] = kw['microsecond'].ljust(6, '0') tzinfo = kw.pop('tzinfo') if tzinfo == 'Z': tzinfo = utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == '-': offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None} kw['tzinfo'] = tzinfo return datetime.datetime(**kw) def parse_duration(value): """Parses a duration string and returns a datetime.timedelta. The preferred format for durations in Django is '%d %H:%M:%S.%f'. Also supports ISO 8601 representation. """ match = standard_duration_re.match(value) if not match: match = iso8601_duration_re.match(value) if match: kw = match.groupdict() if kw.get('microseconds'): kw['microseconds'] = kw['microseconds'].ljust(6, '0') kw = {k: float(v) for k, v in six.iteritems(kw) if v is not None} return datetime.timedelta(**kw)
pybel/pybel
refs/heads/master
src/pybel/struct/summary/__init__.py
1
# -*- coding: utf-8 -*- """Summary functions for BEL graphs.""" from . import edge_summary, errors, node_summary, provenance from .edge_summary import * from .errors import * from .node_summary import * from .provenance import * __all__ = [k for k in locals() if not k.startswith('_')]
isandlaTech/cohorte-demos
refs/heads/dev
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/sleekxmpp/plugins/xep_0020/__init__.py
12
""" SleekXMPP: The Sleek XMPP Library Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout This file is part of SleekXMPP. See the file LICENSE for copying permission. """ from sleekxmpp.plugins.base import register_plugin from sleekxmpp.plugins.xep_0020 import stanza from sleekxmpp.plugins.xep_0020.stanza import FeatureNegotiation from sleekxmpp.plugins.xep_0020.feature_negotiation import XEP_0020 register_plugin(XEP_0020)
rastala/mmlspark
refs/heads/master
tools/hdi/update_livy.py
1
#!/usr/bin/env python # Copyright (C) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in project root for information. import sys, json if len(sys.argv) != 3: raise Exception(("Not enough" if len(sys.argv) < 3 else "Too many") + " arguments.") _, config_file, maven_pkg = sys.argv with open(config_file) as conf_file: conf=json.load(conf_file) conf["session_configs"]["conf"] = {} conf["session_configs"]["conf"]["spark.jars.packages"] = maven_pkg with open(config_file, "w") as outfile: json.dump(conf, outfile, indent=2, sort_keys=True)
SDSG-Invenio/invenio
refs/heads/invenio-inis
invenio/legacy/websubmit/webinterface.py
6
# This file is part of Invenio. # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __lastupdated__ = """$Date$""" __revision__ = "$Id$" import os import errno import time import cgi import sys import shutil from urllib import urlencode from collections import defaultdict from invenio.legacy.bibrecord import create_record from invenio.config import \ CFG_ACCESS_CONTROL_LEVEL_SITE, \ CFG_SITE_LANG, \ CFG_SITE_NAME, \ CFG_SITE_URL, \ CFG_SITE_SECURE_URL, \ CFG_WEBSUBMIT_STORAGEDIR, \ CFG_PREFIX, \ CFG_CERN_SITE from invenio.utils.crossref import get_marcxml_for_doi, CrossrefError from invenio.utils import apache from invenio.legacy.dbquery import run_sql from invenio.modules.access.engine import acc_authorize_action from invenio.modules.access.control import acc_is_role from invenio.legacy.webpage import warning_page from invenio.legacy.webuser import getUid, page_not_authorized, collect_user_info, \ isGuestUser from invenio.legacy.search_engine import is_user_owner_of_record from invenio.ext.legacy.handler import wash_urlargd, WebInterfaceDirectory from invenio.utils.url import make_canonical_urlargd, redirect_to_url from invenio.base.i18n import gettext_set_language from invenio.legacy.bibdocfile.api import stream_file, \ decompose_file, propose_next_docname from invenio.ext.logging import register_exception from invenio.utils.html import is_html_text_editor_installed from invenio.legacy.websubmit.icon_creator import create_icon, InvenioWebSubmitIconCreatorError from invenio.legacy.ckeditor.connector import process_CKEditor_upload, send_response import invenio.legacy.template websubmit_templates = invenio.legacy.template.load('websubmit') from invenio.legacy.websearch.adminlib import get_detailed_page_tabs from invenio.utils.json import json, CFG_JSON_AVAILABLE import invenio.legacy.template from flask import session webstyle_templates = invenio.legacy.template.load('webstyle') websearch_templates = invenio.legacy.template.load('websearch') from invenio.legacy.websubmit.engine import home, action, interface, endaction, makeCataloguesTable class WebInterfaceSubmitPages(WebInterfaceDirectory): _exports = ['summary', 'sub', 'direct', '', 'attachfile', 'uploadfile', \ 'getuploadedfile', 'upload_video', ('continue', 'continue_'), \ 'doilookup'] def uploadfile(self, req, form): """ Similar to /submit, but only consider files. Nice for asynchronous Javascript uploads. Should be used to upload a single file. Also try to create an icon, and return URL to file(s) + icon(s) Authentication is performed based on session ID passed as parameter instead of cookie-based authentication, due to the use of this URL by the Flash plugin (to upload multiple files at once), which does not route cookies. FIXME: consider adding /deletefile and /modifyfile functions + parsing of additional parameters to rename files, add comments, restrictions, etc. """ argd = wash_urlargd(form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: action = "" try: recid_fd = file(os.path.join(curdir, 'SN')) recid = recid_fd.read() recid_fd.close() except: recid = '' user_is_owner = False if recid: user_is_owner = is_user_owner_of_record(user_info, recid) try: categ_fd = file(os.path.join(curdir, 'combo%s' %argd['doctype'])) categ = categ_fd.read() categ_fd.close() except IOError: categ = '*' # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action, categ=categ) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0 and not user_is_owner: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response added_files = {} for key, formfields in form.items(): filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath(os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist n = 1 while os.path.exists(os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension # This may be dangerous if the file size is bigger than the available memory fp = open(os.path.join(dir_to_open, filename), "w") fp.write(formfields.file.read()) fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() try: # Create icon (icon_path, icon_name) = create_icon( { 'input-file' : os.path.join(dir_to_open, filename), 'icon-name' : filename, # extension stripped automatically 'icon-file-format' : 'gif', 'multipage-icon' : False, 'multipage-icon-delay' : 100, 'icon-scale' : "300>", # Resize only if width > 300 'verbosity' : 0, }) icons_dir = os.path.join(os.path.join(curdir, 'icons', str(user_info['uid']), key)) if not os.path.exists(icons_dir): # Create uid/icons dir if needed try: os.makedirs(icons_dir) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that # directory already exists, # then continue, else report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) os.rename(os.path.join(icon_path, icon_name), os.path.join(icons_dir, icon_name)) added_files[key] = {'name': filename, 'iconName': icon_name} except InvenioWebSubmitIconCreatorError as e: # We could not create the icon added_files[key] = {'name': filename} continue else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) # Send our response if CFG_JSON_AVAILABLE: return json.dumps(added_files) def upload_video(self, req, form): """ A clone of uploadfile but for (large) videos. Does not copy the uploaded file to the websubmit directory. Instead, the path to the file is stored inside the submission directory. """ def gcd(a, b): """ the euclidean algorithm """ while a: a, b = b % a, a return b from invenio.modules.encoder.extract import extract_frames from invenio.modules.encoder.config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME from invenio.modules.encoder.encode import determine_aspect from invenio.modules.encoder.utils import probe from invenio.modules.encoder.metadata import ffprobe_metadata from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_TMP_VIDEO_PREFIX argd = wash_urlargd(form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: act = "" # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response json_response = {} for key, formfields in form.items(): filename = key.replace("[]", "") if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath(os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist while os.path.exists(os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension #-------------# # VIDEO STUFF # #-------------# ## Remove all previous uploads filelist = os.listdir(os.path.split(formfields.file.name)[0]) for afile in filelist: if argd['access'] in afile: os.remove(os.path.join(os.path.split(formfields.file.name)[0], afile)) ## Check if the file is a readable video ## We must exclude all image and audio formats that are readable by ffprobe if (os.path.splitext(filename)[1] in ['jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png', 'tga', 'jp2', 'j2k', 'jpf', 'jpm', 'mj2', 'biff', 'cgm', 'exif', 'img', 'mng', 'pic', 'pict', 'raw', 'wmf', 'jpe', 'jif', 'jfif', 'jfi', 'tif', 'webp', 'svg', 'ai', 'ps', 'psd', 'wav', 'mp3', 'pcm', 'aiff', 'au', 'flac', 'wma', 'm4a', 'wv', 'oga', 'm4a', 'm4b', 'm4p', 'm4r', 'aac', 'mp4', 'vox', 'amr', 'snd'] or not probe(formfields.file.name)): formfields.file.close() raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## We have no "delete" attribute in Python 2.4 if sys.hexversion < 0x2050000: ## We need to rename first and create a dummy file ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split(formfields.file.name)[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd['access'] + "_" + os.path.split(formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) dummy = open(formfields.file.name, "w") dummy.close() formfields.file.close() else: # Mark the NamedTemporatyFile as not to be deleted formfields.file.delete = False formfields.file.close() ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split(formfields.file.name)[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd['access'] + "_" + os.path.split(formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) # Write the path to the temp file to a file in STORAGEDIR fp = open(os.path.join(dir_to_open, "filepath"), "w") fp.write(new_tmp_fullpath) fp.close() fp = open(os.path.join(dir_to_open, "filename"), "w") fp.write(filename) fp.close() ## We are going to extract some thumbnails for websubmit ## sample_dir = os.path.join(curdir, 'files', str(user_info['uid']), CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR) try: ## Remove old thumbnails shutil.rmtree(sample_dir) except OSError: register_exception(req=req, alert_admin=False) try: os.makedirs(os.path.join(curdir, 'files', str(user_info['uid']), sample_dir)) except OSError: register_exception(req=req, alert_admin=False) try: extract_frames(input_file=new_tmp_fullpath, output_file=os.path.join(sample_dir, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME), size="600x600", numberof=5) json_response['frames'] = [] for extracted_frame in os.listdir(sample_dir): json_response['frames'].append(extracted_frame) except: ## If the frame extraction fails, something was bad with the video os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to detect the aspect. if this fails, the video is not readable ## or a wrong file might have been uploaded try: (aspect, width, height) = determine_aspect(new_tmp_fullpath) if aspect: aspx, aspy = aspect.split(':') else: the_gcd = gcd(width, height) aspx = str(width / the_gcd) aspy = str(height / the_gcd) json_response['aspx'] = aspx json_response['aspy'] = aspy except TypeError: ## If the aspect detection completely fails os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to extract some metadata from the video container metadata = ffprobe_metadata(new_tmp_fullpath) json_response['meta_title'] = metadata['format'].get('TAG:title') json_response['meta_description'] = metadata['format'].get('TAG:description') json_response['meta_year'] = metadata['format'].get('TAG:year') json_response['meta_author'] = metadata['format'].get('TAG:author') ## Empty file name else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) ## We found our file, we can break the loop break; # Send our response if CFG_JSON_AVAILABLE: dumped_response = json.dumps(json_response) # store the response in the websubmit directory # this is needed if the submission is not finished and continued later response_dir = os.path.join(curdir, 'files', str(user_info['uid']), "response") try: os.makedirs(response_dir) except OSError: # register_exception(req=req, alert_admin=False) pass fp = open(os.path.join(response_dir, "response"), "w") fp.write(dumped_response) fp.close() return dumped_response def getuploadedfile(self, req, form): """ Stream uploaded files. For the moment, restrict to files in ./curdir/files/uid or ./curdir/icons/uid directory, so that we are sure we stream files only to the user who uploaded them. """ argd = wash_urlargd(form, {'indir': (str, None), 'doctype': (str, None), 'access': (str, None), 'icon': (int, 0), 'key': (str, None), 'filename': (str, None), 'nowait': (int, 0)}) if None in argd.values(): raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) uid = getUid(req) if argd['icon']: file_path = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access'], 'icons', str(uid), argd['key'], argd['filename'] ) else: file_path = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access'], 'files', str(uid), argd['key'], argd['filename'] ) abs_file_path = os.path.abspath(file_path) if abs_file_path.startswith(CFG_WEBSUBMIT_STORAGEDIR): # Check if file exist. Note that icon might not yet have # been created. if not argd['nowait']: for i in range(5): if os.path.exists(abs_file_path): return stream_file(req, abs_file_path) time.sleep(1) else: if os.path.exists(abs_file_path): return stream_file(req, abs_file_path) # Send error 404 in all other cases raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND) def attachfile(self, req, form): """ Process requests received from CKEditor to upload files. If the uploaded file is an image, create an icon version """ if not is_html_text_editor_installed(): return apache.HTTP_NOT_FOUND if 'type' not in form: form['type'] = 'File' if 'upload' not in form or \ not form['type'] in \ ['File', 'Image', 'Flash', 'Media']: #return apache.HTTP_NOT_FOUND pass filetype = form['type'].lower() uid = getUid(req) # URL where the file can be fetched after upload user_files_path = '%(CFG_SITE_URL)s/submit/getattachedfile/%(uid)s' % \ {'uid': uid, 'CFG_SITE_URL': CFG_SITE_URL, 'filetype': filetype} # Path to directory where uploaded files are saved user_files_absolute_path = '%(CFG_PREFIX)s/var/tmp/attachfile/%(uid)s/%(filetype)s' % \ {'uid': uid, 'CFG_PREFIX': CFG_PREFIX, 'filetype': filetype} try: os.makedirs(user_files_absolute_path) except: pass user_info = collect_user_info(req) (auth_code, auth_message) = acc_authorize_action(user_info, 'attachsubmissionfile') msg = "" if user_info['email'] == 'guest': # User is guest: must login prior to upload msg = 'Please login before uploading file.' elif auth_code: # User cannot submit msg = 'Sorry, you are not allowed to submit files.' ## elif len(form['upload']) != 1: ## msg = 'Sorry, you must upload one single file' else: # Process the upload and get the response (msg, uploaded_file_path, uploaded_file_name, uploaded_file_url, callback_function) = \ process_CKEditor_upload(form, uid, user_files_path, user_files_absolute_path) if uploaded_file_path: # Create an icon if form.get('type','') == 'Image': try: (icon_path, icon_name) = create_icon( { 'input-file' : uploaded_file_path, 'icon-name' : os.path.splitext(uploaded_file_name)[0], 'icon-file-format' : os.path.splitext(uploaded_file_name)[1][1:] or 'gif', 'multipage-icon' : False, 'multipage-icon-delay' : 100, 'icon-scale' : "700>", # Resize only if width > 700 'verbosity' : 0, }) # Move original file to /original dir, and replace it with icon file original_user_files_absolute_path = os.path.join(user_files_absolute_path, 'original') if not os.path.exists(original_user_files_absolute_path): # Create /original dir if needed os.mkdir(original_user_files_absolute_path) os.rename(uploaded_file_path, original_user_files_absolute_path + os.sep + uploaded_file_name) os.rename(icon_path + os.sep + icon_name, uploaded_file_path) except InvenioWebSubmitIconCreatorError as e: pass user_files_path += '/' + filetype + '/' + uploaded_file_name else: user_files_path = '' if not msg: msg = 'No valid file found' # Send our response send_response(req, msg, user_files_path, callback_function) def _lookup(self, component, path): """ This handler is invoked for the dynamic URLs (for getting and putting attachments) Eg: /submit/getattachedfile/41336978/image/myfigure.png /submit/attachfile/41336978/image/myfigure.png """ if component == 'getattachedfile' and len(path) > 2: uid = path[0] # uid of the submitter file_type = path[1] # file, image, flash or media (as # defined by CKEditor) if file_type in ['file', 'image', 'flash', 'media']: file_name = '/'.join(path[2:]) # the filename def answer_get(req, form): """Accessing files attached to submission.""" form['file'] = file_name form['type'] = file_type form['uid'] = uid return self.getattachedfile(req, form) return answer_get, [] # All other cases: file not found return None, [] def getattachedfile(self, req, form): """ Returns a file uploaded to the submission 'drop box' by the CKEditor. """ argd = wash_urlargd(form, {'file': (str, None), 'type': (str, None), 'uid': (int, 0)}) # Can user view this record, i.e. can user access its # attachments? uid = getUid(req) user_info = collect_user_info(req) if not argd['file'] is None: # Prepare path to file on disk. Normalize the path so that # ../ and other dangerous components are removed. path = os.path.abspath(CFG_PREFIX + '/var/tmp/attachfile/' + \ '/' + str(argd['uid']) + \ '/' + argd['type'] + '/' + argd['file']) # Check that we are really accessing attachements # directory, for the declared record. if path.startswith(CFG_PREFIX + '/var/tmp/attachfile/') and os.path.exists(path): return stream_file(req, path) # Send error 404 in all other cases return(apache.HTTP_NOT_FOUND) def continue_(self, req, form): """ Continue an interrupted submission. """ args = wash_urlargd(form, {'access': (str, ''), 'doctype': (str, '')}) ln = args['ln'] _ = gettext_set_language(ln) access = args['access'] doctype = args['doctype'] if not access or not doctype: return warning_page(_("Sorry, invalid arguments"), req=req, ln=ln) user_info = collect_user_info(req) email = user_info['email'] res = run_sql("SELECT action, status FROM sbmSUBMISSIONS WHERE id=%s AND email=%s and doctype=%s", (access, email, doctype)) if res: action, status = res[0] if status == 'finished': return warning_page(_("Note: the requested submission has already been completed"), req=req, ln=ln) redirect_to_url(req, CFG_SITE_SECURE_URL + '/submit/direct?' + urlencode({ 'sub': action + doctype, 'access': access})) return warning_page(_("Sorry, you don't seem to have initiated a submission with the provided access number"), req=req, ln=ln) def direct(self, req, form): """Directly redirected to an initialized submission.""" args = wash_urlargd(form, {'sub': (str, ''), 'access' : (str, '')}) sub = args['sub'] access = args['access'] ln = args['ln'] _ = gettext_set_language(ln) uid = getUid(req) if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1: return page_not_authorized(req, "direct", navmenuid='submit', text=_("Submissions are not available")) myQuery = req.args if not sub: return warning_page(_("Sorry, 'sub' parameter missing..."), req, ln=ln) res = run_sql("SELECT docname,actname FROM sbmIMPLEMENT WHERE subname=%s", (sub,)) if not res: return warning_page(_("Sorry. Cannot analyse parameter"), req, ln=ln) else: # get document type doctype = res[0][0] # get action name action = res[0][1] # get category categ = req.form.get('combo%s' % doctype, '*') # retrieve other parameter values params = dict(form) # Check if user is authorized, based on doctype/action/categ, # in order to give guest users a chance to log in if needed: (auth_code, auth_message) = acc_authorize_action(req, 'submit', authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=doctype, act=action, categ=categ) if not auth_code == 0 and isGuestUser(uid): # Propose to login redirection_params = params redirection_params['referer'] = CFG_SITE_SECURE_URL + req.unparsed_uri return redirect_to_url(req, "%s/youraccount/login%s" % ( CFG_SITE_SECURE_URL, make_canonical_urlargd(redirection_params, {})), norobot=True) # else: continue, and let main interface control the access # find existing access number if not access: # create 'unique' access number pid = os.getpid() now = time.time() access = "%i_%s" % (now, pid) # retrieve 'dir' value res = run_sql ("SELECT dir FROM sbmACTION WHERE sactname=%s", (action,)) dir = res[0][0] mainmenu = req.headers_in.get('referer') params['access'] = access params['act'] = action params['doctype'] = doctype params['startPg'] = '1' params['mainmenu'] = mainmenu params['ln'] = ln params['indir'] = dir url = "%s/submit?%s" % (CFG_SITE_SECURE_URL, urlencode(params)) redirect_to_url(req, url) def sub(self, req, form): """DEPRECATED: /submit/sub is deprecated now, so raise email to the admin (but allow submission to continue anyway)""" args = wash_urlargd(form, {'password': (str, '')}) uid = getUid(req) if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1: return page_not_authorized(req, "../sub/", navmenuid='submit') try: raise DeprecationWarning, 'submit/sub handler has been used. Please use submit/direct. e.g. "submit/sub?RN=123@SBIFOO" -> "submit/direct?RN=123&sub=SBIFOO"' except DeprecationWarning: register_exception(req=req, alert_admin=True) ln = args['ln'] _ = gettext_set_language(ln) #DEMOBOO_RN=DEMO-BOOK-2008-001&ln=en&password=1223993532.26572%40APPDEMOBOO params = dict(form) password = args['password'] if password: del params['password'] if "@" in password: params['access'], params['sub'] = password.split('@', 1) else: params['sub'] = password else: args = str(req.args).split('@') if len(args) > 1: params = {'sub' : args[-1]} args = '@'.join(args[:-1]) params.update(cgi.parse_qs(args)) else: return warning_page(_("Sorry, invalid URL..."), req, ln=ln) url = "%s/submit/direct?%s" % (CFG_SITE_SECURE_URL, urlencode(params, doseq=True)) redirect_to_url(req, url) def summary(self, req, form): args = wash_urlargd(form, { 'doctype': (str, ''), 'act': (str, ''), 'access': (str, ''), 'indir': (str, '')}) ln = args['ln'] uid = getUid(req) if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1: return page_not_authorized(req, "../summary", navmenuid='submit') t = "" curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, args['indir'], args['doctype'], args['access']) try: assert(curdir == os.path.abspath(curdir)) except AssertionError: register_exception(req=req, alert_admin=True, prefix='Possible cracking tentative: indir="%s", doctype="%s", access="%s"' % (args['indir'], args['doctype'], args['access'])) return warning_page("Invalid parameters", req, ln) subname = "%s%s" % (args['act'], args['doctype']) res = run_sql("select sdesc,fidesc,pagenb,level from sbmFIELD where subname=%s " "order by pagenb,fieldnb", (subname,)) nbFields = 0 values = [] for arr in res: if arr[0] != "": val = { 'mandatory' : (arr[3] == 'M'), 'value' : '', 'page' : arr[2], 'name' : arr[0], } if os.path.exists(os.path.join(curdir, curdir, arr[1])): fd = open(os.path.join(curdir, arr[1]),"r") value = fd.read() fd.close() value = value.replace("\n"," ") value = value.replace("Select:","") else: value = "" val['value'] = value values.append(val) return websubmit_templates.tmpl_submit_summary( ln = args['ln'], values = values, ) def doilookup(self, req, form): """ Returns the metadata from the crossref website based on the DOI. """ args = wash_urlargd(form, { 'doi': (str, '')}) response = defaultdict(list) if args['doi']: doi = args['doi'] try: marcxml_template = get_marcxml_for_doi(doi) except CrossrefError: # Just ignore Crossref errors pass else: record = create_record(marcxml_template)[0] if record: # We need to convert this record structure to a simple dictionary for key, value in record.items(): # key, value = (773, [([('0', 'PER:64142'), ...], ' ', ' ', '', 47)]) for val in value: # val = ([('0', 'PER:64142'), ...], ' ', ' ', '', 47) ind1 = val[1].replace(" ", "_") ind2 = val[2].replace(" ", "_") for (k, v) in val[0]: # k, v = ('0', 'PER:5409') response[key+ind1+ind2+k].append(v) # The output dictionary is something like: # {"100__a": ['Smith, J.'], # "700__a": ['Anderson, J.', 'Someoneelse, E.'], # "700__u": ['University1', 'University2']} # return dictionary as JSON return json.dumps(response) def index(self, req, form): args = wash_urlargd(form, { 'c': (str, CFG_SITE_NAME), 'doctype': (str, ''), 'act': (str, ''), 'startPg': (str, "1"), 'access': (str, ''), 'mainmenu': (str, ''), 'fromdir': (str, ''), 'nextPg': (str, ''), 'nbPg': (str, ''), 'curpage': (str, '1'), 'step': (str, '0'), 'mode': (str, 'U'), }) ## Strip whitespace from beginning and end of doctype and action: args["doctype"] = args["doctype"].strip() args["act"] = args["act"].strip() def _index(req, c, ln, doctype, act, startPg, access, mainmenu, fromdir, nextPg, nbPg, curpage, step, mode): auth_args = {} if doctype: auth_args['doctype'] = doctype if act: auth_args['act'] = act uid = getUid(req) if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1: return page_not_authorized(req, "direct", navmenuid='submit') if CFG_CERN_SITE: ## HACK BEGIN: this is a hack for CMS and ATLAS draft user_info = collect_user_info(req) if doctype == 'CMSPUB' and act == "" and 'cds-admin [CERN]' not in user_info['group'] and not user_info['email'].lower() == 'cds.support@cern.ch': if isGuestUser(uid): return redirect_to_url(req, "%s/youraccount/login%s" % ( CFG_SITE_SECURE_URL, make_canonical_urlargd({'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri, 'ln' : args['ln']}, {})) , norobot=True) if 'cms-publication-committee-chair [CERN]' not in user_info['group']: return page_not_authorized(req, "../submit", text="In order to access this submission interface you need to be member of the CMS Publication Committee Chair.", navmenuid='submit') elif doctype == 'ATLPUB' and 'cds-admin [CERN]' not in user_info['group'] and not user_info['email'].lower() == 'cds.support@cern.ch': if isGuestUser(uid): return redirect_to_url(req, "%s/youraccount/login%s" % ( CFG_SITE_SECURE_URL, make_canonical_urlargd({'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri, 'ln' : args['ln']}, {})) , norobot=True) if 'atlas-gen [CERN]' not in user_info['group']: return page_not_authorized(req, "../submit", text="In order to access this submission interface you need to be member of ATLAS.", navmenuid='submit') ## HACK END if doctype == "": catalogues_text, at_least_one_submission_authorized, submission_exists = makeCataloguesTable(req, ln=CFG_SITE_LANG) if not at_least_one_submission_authorized and submission_exists: if isGuestUser(uid): return redirect_to_url(req, "%s/youraccount/login%s" % ( CFG_SITE_SECURE_URL, make_canonical_urlargd({'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri, 'ln' : args['ln']}, {})) , norobot=True) else: return page_not_authorized(req, "../submit", uid=uid, navmenuid='submit') return home(req, catalogues_text, c, ln) elif act == "": return action(req, c, ln, doctype) elif int(step)==0: return interface(req, c, ln, doctype, act, startPg, access, mainmenu, fromdir, nextPg, nbPg, curpage) else: return endaction(req, c, ln, doctype, act, startPg, access, mainmenu, fromdir, nextPg, nbPg, curpage, step, mode) return _index(req, **args) # Answer to both /submit/ and /submit __call__ = index # def retrieve_most_recent_attached_file(file_path): # """ # Retrieve the latest file that has been uploaded with the # CKEditor. This is the only way to retrieve files that the # CKEditor has renamed after the upload. # Eg: 'prefix/image.jpg' was uploaded but did already # exist. CKEditor silently renamed it to 'prefix/image(1).jpg': # >>> retrieve_most_recent_attached_file('prefix/image.jpg') # 'prefix/image(1).jpg' # """ # (base_path, filename) = os.path.split(file_path) # base_name = os.path.splitext(filename)[0] # file_ext = os.path.splitext(filename)[1][1:] # most_recent_filename = filename # i = 0 # while True: # i += 1 # possible_filename = "%s(%d).%s" % \ # (base_name, i, file_ext) # if os.path.exists(base_path + os.sep + possible_filename): # most_recent_filename = possible_filename # else: # break # return os.path.join(base_path, most_recent_filename)
kawamuray/ganeti
refs/heads/build
test/py/testutils.py
4
# # # Copyright (C) 2006, 2007, 2008 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Utilities for unit testing""" import os import sys import stat import tempfile import unittest import logging from ganeti import utils def GetSourceDir(): return os.environ.get("TOP_SRCDIR", ".") def TestDataFilename(name): """Returns the filename of a given test data file. @type name: str @param name: the 'base' of the file name, as present in the test/data directory @rtype: str @return: the full path to the filename, such that it can be used in 'make distcheck' rules """ return "%s/test/data/%s" % (GetSourceDir(), name) def ReadTestData(name): """Returns the content of a test data file. This is just a very simple wrapper over utils.ReadFile with the proper test file name. """ return utils.ReadFile(TestDataFilename(name)) def _SetupLogging(verbose): """Setupup logging infrastructure. """ fmt = logging.Formatter("%(asctime)s: %(threadName)s" " %(levelname)s %(message)s") if verbose: handler = logging.StreamHandler() else: handler = logging.FileHandler(os.devnull, "a") handler.setLevel(logging.NOTSET) handler.setFormatter(fmt) root_logger = logging.getLogger("") root_logger.setLevel(logging.NOTSET) root_logger.addHandler(handler) class GanetiTestProgram(unittest.TestProgram): def runTests(self): """Runs all tests. """ _SetupLogging("LOGTOSTDERR" in os.environ) sys.stderr.write("Running %s\n" % self.progName) sys.stderr.flush() # Ensure assertions will be evaluated if not __debug__: raise Exception("Not running in debug mode, assertions would not be" " evaluated") # Check again, this time with a real assertion try: assert False except AssertionError: pass else: raise Exception("Assertion not evaluated") return unittest.TestProgram.runTests(self) class GanetiTestCase(unittest.TestCase): """Helper class for unittesting. This class defines a few utility functions that help in building unittests. Child classes must call the parent setup and cleanup. """ def setUp(self): self._temp_files = [] def tearDown(self): while self._temp_files: try: utils.RemoveFile(self._temp_files.pop()) except EnvironmentError: pass def assertFileContent(self, file_name, expected_content): """Checks that the content of a file is what we expect. @type file_name: str @param file_name: the file whose contents we should check @type expected_content: str @param expected_content: the content we expect """ actual_content = utils.ReadFile(file_name) self.assertEqual(actual_content, expected_content) def assertFileMode(self, file_name, expected_mode): """Checks that the mode of a file is what we expect. @type file_name: str @param file_name: the file whose contents we should check @type expected_mode: int @param expected_mode: the mode we expect """ st = os.stat(file_name) actual_mode = stat.S_IMODE(st.st_mode) self.assertEqual(actual_mode, expected_mode) def assertFileUid(self, file_name, expected_uid): """Checks that the user id of a file is what we expect. @type file_name: str @param file_name: the file whose contents we should check @type expected_uid: int @param expected_uid: the user id we expect """ st = os.stat(file_name) actual_uid = st.st_uid self.assertEqual(actual_uid, expected_uid) def assertFileGid(self, file_name, expected_gid): """Checks that the group id of a file is what we expect. @type file_name: str @param file_name: the file whose contents we should check @type expected_gid: int @param expected_gid: the group id we expect """ st = os.stat(file_name) actual_gid = st.st_gid self.assertEqual(actual_gid, expected_gid) def assertEqualValues(self, first, second, msg=None): """Compares two values whether they're equal. Tuples are automatically converted to lists before comparing. """ return self.assertEqual(UnifyValueType(first), UnifyValueType(second), msg=msg) def _CreateTempFile(self): """Creates a temporary file and adds it to the internal cleanup list. This method simplifies the creation and cleanup of temporary files during tests. """ fh, fname = tempfile.mkstemp(prefix="ganeti-test", suffix=".tmp") os.close(fh) self._temp_files.append(fname) return fname def patch_object(*args, **kwargs): """Unified patch_object for various versions of Python Mock. Different Python Mock versions provide incompatible versions of patching an object. More recent versions use _patch_object, older ones used patch_object. This function unifies the different variations. """ import mock try: # pylint: disable=W0212 return mock._patch_object(*args, **kwargs) except AttributeError: # pylint: disable=E1101 return mock.patch_object(*args, **kwargs) def UnifyValueType(data): """Converts all tuples into lists. This is useful for unittests where an external library doesn't keep types. """ if isinstance(data, (tuple, list)): return [UnifyValueType(i) for i in data] elif isinstance(data, dict): return dict([(UnifyValueType(key), UnifyValueType(value)) for (key, value) in data.iteritems()]) return data class CallCounter(object): """Utility class to count number of calls to a function/method. """ def __init__(self, fn): """Initializes this class. @type fn: Callable """ self._fn = fn self._count = 0 def __call__(self, *args, **kwargs): """Calls wrapped function with given parameters. """ self._count += 1 return self._fn(*args, **kwargs) def Count(self): """Returns number of calls. @rtype: number """ return self._count
MikkelSchubert/paleomix
refs/heads/master
tests/common_tests/formats_tests/bed_test.py
1
#!/usr/bin/python3 # # Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import copy from pathlib import Path from typing import Any, List from unittest.mock import Mock import pytest from paleomix.common.formats.bed import ( BEDError, BEDRecord, merge_bed_records, pad_bed_records, read_bed_file, sort_bed_by_bamfile, ) ######################################################################################## # BEDRecord constructor def test_bedrecord__constructor_3(): record = BEDRecord("my_contig", 12, 345) assert record.contig == "my_contig" assert record.start == 12 assert record.end == 345 assert record.name is None assert record.score is None assert record.strand is None assert str(record) == "my_contig\t12\t345" assert repr(record) == "BEDRecord(contig='my_contig', start=12, end=345)" def test_bedrecord__constructor_6(): record = BEDRecord("my_contig", 12, 345, "my_name", -3, "-") assert record.contig == "my_contig" assert record.start == 12 assert record.end == 345 assert record.name == "my_name" assert record.score == -3 assert record.strand == "-" assert str(record) == "my_contig\t12\t345\tmy_name\t-3\t-" assert ( repr(record) == "BEDRecord(contig='my_contig', start=12, " "end=345, name='my_name', score=-3, strand='-')" ) @pytest.mark.parametrize("contig", (None, "")) def test_bedrecord__constructor__empty_contig(contig: Any): with pytest.raises(ValueError, match="contig is blank"): BEDRecord(contig, 12, 345, "my_name", -3, "-") @pytest.mark.parametrize("strand", ("", "?", "foo")) def test_bedrecord__constructor__invalid_strand(strand: Any): with pytest.raises(ValueError, match="invalid strand"): BEDRecord("contig", 12, 345, "my_name", -3, strand) ######################################################################################## # BEDRecord.parse @pytest.mark.parametrize("text", ("", "contig", "contig\t0")) def test_bedrecord__parse__0_to_2_fields(text: str): with pytest.raises(BEDError, match="not enough columns"): BEDRecord.parse(text) def test_bedrecord__parse__3_fields(): text = "my_contig\t12\t345" record = BEDRecord.parse(text) assert str(record) == text assert repr(record) == "BEDRecord(contig='my_contig', start=12, end=345)" _6_COLUMN_LINES = [ "my_contig\t12\t345\tmy_name\t-3\t-", "my_contig\t12\t345\tmy_name\t-3\t-\tfoo\tbar", ] @pytest.mark.parametrize("text", _6_COLUMN_LINES) def test_bedrecord__parse__6_fields(text: str): record = BEDRecord.parse(text) assert record.contig == "my_contig" assert record.start == 12 assert record.end == 345 assert record.name == "my_name" assert record.score == -3 assert record.strand == "-" assert str(record) == "\t".join(text.split("\t")[:6]) assert ( repr(record) == "BEDRecord(contig='my_contig', start=12, " "end=345, name='my_name', score=-3, strand='-')" ) def test_bedrecord__parse__invalid_values_1(): with pytest.raises(BEDError, match="contig is blank"): BEDRecord.parse("\t123\t456") def test_bedrecord__parse__invalid_values_2(): with pytest.raises(BEDError, match="Expected int in column 2"): BEDRecord.parse("contig\tsix\t456") def test_bedrecord__parse__invalid_values_3(): with pytest.raises(BEDError, match="Expected int in column 3"): BEDRecord.parse("contig\t123\tlots") def test_bedrecord__parse__invalid_values_5(): with pytest.raises(BEDError, match="Expected int in column 5"): BEDRecord.parse("contig\t123\t456\tfoo\tbar") def test_bedrecord__parse__invalid_values_6(): with pytest.raises(BEDError, match="strand must be \\+ or -"): BEDRecord.parse("contig\t123\t456\tfoo\t0\t?") def test_bedrecord__parse__invalid_values_1_(): tmpl = "contig\t0\t%s\t\t0\t-" BEDRecord.parse(tmpl % (1,)) # check template with pytest.raises(BEDError, match=""): BEDRecord.parse(tmpl % ("not a number",)) def test_bedrecord__setters__unset_fields__after_end(): record = BEDRecord.parse("my_name\t17\t258") record.strand = "-" assert str(record) == "my_name\t17\t258\t\t0\t-" record = BEDRecord.parse("my_name\t17\t258\tregion") record.strand = "-" assert str(record) == "my_name\t17\t258\tregion\t0\t-" record = BEDRecord.parse("my_name\t17\t258\tregion\t33") record.strand = "-" assert str(record) == "my_name\t17\t258\tregion\t33\t-" record = BEDRecord.parse("my_name\t17\t258\tregion\t33\t+") record.strand = "-" assert str(record) == "my_name\t17\t258\tregion\t33\t-" ############################################################################### # BEDRecord comparisons def test_sorting__compared_with_non_bed_record(): with pytest.raises(TypeError): BEDRecord("chr2", 1, 20) < "foo" def test_bedrecord__cmp(): record_1 = BEDRecord("my_contig", 12, 345, "my_name", -3, "-") record_2 = BEDRecord("other", 565, 684, "name2", 0, "+") for key in ("contig", "start", "end", "name", "score", "strand"): record_tmp = copy.copy(record_1) assert record_1 == record_tmp setattr(record_tmp, key, getattr(record_2, key)) assert record_1 != record_tmp setattr(record_tmp, key, getattr(record_1, key)) assert record_1 == record_tmp ############################################################################### # pad_bed_records def test_pad_bed_records__empty_sequences(): pad_bed_records([], 10) pad_bed_records((), 10) def test_pad_bed_records(): records = [ BEDRecord("chr1", 10, 90), BEDRecord("chr2", 100, 200), ] assert pad_bed_records(records, 20) == [ BEDRecord("chr1", 0, 110), BEDRecord("chr2", 80, 220), ] def test_pad_bed_records__with_max_lengths(): max_sizes = {"chr1": 100, "chr2": 200} records = [ BEDRecord("chr1", 10, 90), BEDRecord("chr2", 10, 90), BEDRecord("chr2", 100, 190), ] assert pad_bed_records(records, 20, max_sizes) == [ BEDRecord("chr1", 0, 100), BEDRecord("chr2", 0, 110), BEDRecord("chr2", 80, 200), ] def test_pad_bed_records__negative_padding(): records = [ BEDRecord("chr1", 10, 90), BEDRecord("chr2", 100, 200), ] assert pad_bed_records(records, -15) == [ BEDRecord("chr1", 25, 75), BEDRecord("chr2", 115, 185), ] def test_pad_bed_records__negative_padding__near_empty_records(): assert pad_bed_records([BEDRecord("chr1", 10, 90)], -38) == [ BEDRecord("chr1", 48, 52) ] assert pad_bed_records([BEDRecord("chr1", 10, 90)], -39) == [ BEDRecord("chr1", 49, 51) ] assert pad_bed_records([BEDRecord("chr1", 10, 91)], -40) == [ BEDRecord("chr1", 50, 51) ] def test_pad_bed_records__negative_padding__empty_records(): assert pad_bed_records([BEDRecord("chr1", 10, 90)], -40) == [] assert pad_bed_records([BEDRecord("chr1", 10, 90)], -41) == [] assert pad_bed_records([BEDRecord("chr1", 10, 91)], -41) == [] ############################################################################### # merge_bed_records def test_merge_records__empty_sequences(): assert merge_bed_records(()) == [] assert merge_bed_records([]) == [] def test_merge_records__single_record(): assert merge_bed_records([BEDRecord("chr1", 1234, 5678)]) == [ BEDRecord("chr1", 1234, 5678) ] def test_merge_records__minimal_fields_only(): assert merge_bed_records([BEDRecord("chr1", 1234, 5678, "foo", 1, "-")]) == [ BEDRecord("chr1", 1234, 5678) ] def test_merge_records__overlapping_records_1(): assert merge_bed_records( [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5677, 9012)] ) == [BEDRecord("chr1", 1234, 9012)] def test_merge_records__overlapping_records_2(): assert merge_bed_records( [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5678, 9012)] ) == [BEDRecord("chr1", 1234, 9012)] def test_merge_records__non_overlapping_records_1(): assert merge_bed_records( [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5679, 9012)] ) == [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5679, 9012)] def test_merge_records__non_overlapping_records_2(): assert merge_bed_records( [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5680, 9012)] ) == [BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5680, 9012)] def test_merge_records__complex_example(): assert merge_bed_records( [ BEDRecord("chr1", 1234, 5678), BEDRecord("chr1", 5678, 9012), BEDRecord("chr2", 1, 20), BEDRecord("chr2", 100, 200), BEDRecord("chr2", 150, 250), ] ) == [ BEDRecord("chr1", 1234, 9012), BEDRecord("chr2", 1, 20), BEDRecord("chr2", 100, 250), ] def test_merge_records__complex_example__unsorted(): assert merge_bed_records( [ BEDRecord("chr2", 100, 200), BEDRecord("chr1", 1234, 5678), BEDRecord("chr2", 150, 250), BEDRecord("chr2", 1, 20), BEDRecord("chr1", 5678, 9012), ] ) == [ BEDRecord("chr1", 1234, 9012), BEDRecord("chr2", 1, 20), BEDRecord("chr2", 100, 250), ] ############################################################################### # read_bed_file _SIMPLE_BED = """chr1\t7\t9 chr3\t123\t45123\tFoo\t0 chr9\t777\t999 """ _SIMPLE_BED_WITH_SKIPPED_LINES = """ chr1\t7\t9 # chr3\t123\t45123\tFoo\t0 chr9\t777\t999 """ def _write_bed(tmp_path: Path, data: str) -> str: filename = tmp_path / "tmp.bed" with filename.open("wt") as handle: handle.write(data) return str(filename) def test_read_bed_file__empty_file(tmp_path: Path): filename = _write_bed(tmp_path, "") assert list(read_bed_file(filename)) == [] def test_read_bed_file__simple_records(tmp_path: Path): filename = _write_bed(tmp_path, _SIMPLE_BED) assert list(read_bed_file(filename)) == [ BEDRecord("chr1", 7, 9), BEDRecord("chr3", 123, 45123, "Foo", 0), BEDRecord("chr9", 777, 999), ] def test_read_bed_file__simple_records_and_skipped_lines(tmp_path: Path): filename = _write_bed(tmp_path, _SIMPLE_BED_WITH_SKIPPED_LINES) assert list(read_bed_file(filename)) == [ BEDRecord("chr1", 7, 9), BEDRecord("chr9", 777, 999), ] def test_read_bed_file__parse_error(tmp_path: Path): filename = _write_bed(tmp_path, "chr1\t0\tabc") with pytest.raises(BEDError, match=":1: Expected int in column 3 but found 'abc'"): next(read_bed_file(filename)) _INVALID_START_END = ["chr1\t0\t0", "chr1\t-1\t100", "chr1\t-100\t-1", "chr1\t100\t10"] _POSITIONS_INSIDE_CONTIG = ["chr1\t0\t50", "chr1\t50\t99", "chr1\t99\t100"] _POSITIONS_OUTSIDE_CONTIG = ["chr1\t0\t101", "chr1\t99\t102", "chr1\t200\t300"] @pytest.mark.parametrize("text", _INVALID_START_END) def test_read_bed_file__invalid_coordinates(tmp_path: Path, text: str): filename = _write_bed(tmp_path, text) with pytest.raises(BEDError, match="invalid start/end coordinates"): list(read_bed_file(filename)) @pytest.mark.parametrize("text", _POSITIONS_INSIDE_CONTIG) def test_read_bed_file__inside_contig(tmp_path: Path, text: str): filename = _write_bed(tmp_path, text) list(read_bed_file(filename, {"chr1": 100})) @pytest.mark.parametrize("text", _POSITIONS_OUTSIDE_CONTIG) def test_read_bed_file__outside_contig(tmp_path: Path, text: str): filename = _write_bed(tmp_path, text) with pytest.raises(BEDError, match="coordinates outside contig"): list(read_bed_file(filename, {"chr1": 100})) def test_read_bed_file__unknown_contigs(tmp_path: Path): filename = _write_bed(tmp_path, "chr2\t0\t100") with pytest.raises(BEDError, match="unknown contig"): list(read_bed_file(filename, {"chr1": 100})) ######################################################################################## # sort_bed_by_bamfile _UNSORTED_RECORDS = [ BEDRecord("chr3", 100, 200), BEDRecord("chr2", 500, 10010), BEDRecord("chr3", 1, 10), BEDRecord("chr1", 0, 1234), ] def test_sort_bed_by_bamfile__empty_list(): handle: Any = Mock() handle.references = [] regions: List[BEDRecord] = [] sort_bed_by_bamfile(handle, regions) assert regions == [] def test_sort_bed_by_bamfile__known_contigs_1(): handle: Any = Mock() handle.references = ["chr1", "chr2", "chr3"] regions = list(_UNSORTED_RECORDS) sort_bed_by_bamfile(handle, regions) assert regions == [ BEDRecord("chr1", 0, 1234), BEDRecord("chr2", 500, 10010), BEDRecord("chr3", 1, 10), BEDRecord("chr3", 100, 200), ] def test_sort_bed_by_bamfile__known_contigs_2(): handle: Any = Mock() handle.references = ["chr3", "chr2", "chr1"] regions = list(_UNSORTED_RECORDS) sort_bed_by_bamfile(handle, regions) assert regions == [ BEDRecord("chr3", 1, 10), BEDRecord("chr3", 100, 200), BEDRecord("chr2", 500, 10010), BEDRecord("chr1", 0, 1234), ] def test_sort_bed_by_bamfile__unknown_contigs_1(): handle: Any = Mock() handle.references = ["chr1", "chr2"] regions = list(_UNSORTED_RECORDS) sort_bed_by_bamfile(handle, regions) assert regions == [ BEDRecord("chr1", 0, 1234), BEDRecord("chr2", 500, 10010), BEDRecord("chr3", 1, 10), BEDRecord("chr3", 100, 200), ] def test_sort_bed_by_bamfile__unknown_contigs_2(): handle: Any = Mock() handle.references = [] regions = list(_UNSORTED_RECORDS) sort_bed_by_bamfile(handle, regions) assert regions == [ BEDRecord("chr1", 0, 1234), BEDRecord("chr2", 500, 10010), BEDRecord("chr3", 1, 10), BEDRecord("chr3", 100, 200), ]
matthappens/taskqueue
refs/heads/master
taskqueue/venv_tq/lib/python2.7/site-packages/werkzeug/testsuite/__init__.py
99
# -*- coding: utf-8 -*- """ werkzeug.testsuite ~~~~~~~~~~~~~~~~~~ Contains all test Werkzeug tests. :copyright: (c) 2013 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import re import sys import unittest import shutil import tempfile import atexit from werkzeug.utils import find_modules from werkzeug._compat import text_type, integer_types, reraise def get_temporary_directory(): directory = tempfile.mkdtemp() @atexit.register def remove_directory(): try: shutil.rmtree(directory) except EnvironmentError: pass return directory def iter_suites(package): """Yields all testsuites.""" for module in find_modules(package, include_packages=True): mod = __import__(module, fromlist=['*']) if hasattr(mod, 'suite'): yield mod.suite() def find_all_tests(suite): """Yields all the tests and their names from a given suite.""" suites = [suite] while suites: s = suites.pop() try: suites.extend(s) except TypeError: yield s, '%s.%s.%s' % ( s.__class__.__module__, s.__class__.__name__, s._testMethodName ) class WerkzeugTestCase(unittest.TestCase): """Baseclass for all the tests that Werkzeug uses. Use these methods for testing instead of the camelcased ones in the baseclass for consistency. """ def setup(self): pass def teardown(self): pass def setUp(self): self.setup() def tearDown(self): unittest.TestCase.tearDown(self) self.teardown() def assert_line_equal(self, x, y): assert x == y, "lines not equal\n a = %r\n b = %r" % (x, y) def assert_equal(self, x, y, msg=None): return self.assertEqual(x, y, msg) def assert_not_equal(self, x, y): return self.assertNotEqual(x, y) def assert_raises(self, exc_type, callable=None, *args, **kwargs): catcher = _ExceptionCatcher(self, exc_type) if callable is None: return catcher with catcher: callable(*args, **kwargs) if sys.version_info[:2] == (2, 6): def assertIsNone(self, x): assert x is None, "%r is not None" % (x,) def assertIsNotNone(self, x): assert x is not None, "%r is None" % (x, ) def assertIn(self, x, y): assert x in y, "%r not in %r" % (x, y) def assertNotIn(self, x, y): assert x not in y, "%r in %r" % (x, y) def assertIsInstance(self, x, y): assert isinstance(x, y), "not isinstance(%r, %r)" % (x, y) def assertIs(self, x, y): assert x is y, "%r is not %r" % (x, y) def assertIsNot(self, x, y): assert x is not y, "%r is %r" % (x, y) def assertSequenceEqual(self, x, y): self.assertEqual(x, y) def assertRaisesRegex(self, exc_type, regex, *args, **kwargs): catcher = _ExceptionCatcher(self, exc_type) if not args: return catcher elif callable(args[0]): with catcher: args[0](*args[1:], **kwargs) if args[0] is not None: assert re.search(args[0], catcher.exc_value[0]) else: raise NotImplementedError() elif sys.version_info[0] == 2: def assertRaisesRegex(self, *args, **kwargs): return self.assertRaisesRegexp(*args, **kwargs) def assert_is_none(self, x): self.assertIsNone(x) def assert_is_not_none(self, x): self.assertIsNotNone(x) def assert_in(self, x, y): self.assertIn(x, y) def assert_is_instance(self, x, y): self.assertIsInstance(x, y) def assert_not_in(self, x, y): self.assertNotIn(x, y) def assert_is(self, x, y): self.assertIs(x, y) def assert_is_not(self, x, y): self.assertIsNot(x, y) def assert_true(self, x): self.assertTrue(x) def assert_false(self, x): self.assertFalse(x) def assert_raises_regex(self, *args, **kwargs): return self.assertRaisesRegex(*args, **kwargs) def assert_sequence_equal(self, x, y): self.assertSequenceEqual(x, y) def assert_strict_equal(self, x, y): '''Stricter version of assert_equal that doesn't do implicit conversion between unicode and strings''' self.assert_equal(x, y) assert issubclass(type(x), type(y)) or issubclass(type(y), type(x)), \ '%s != %s' % (type(x), type(y)) if isinstance(x, (bytes, text_type, integer_types)) or x is None: return elif isinstance(x, dict) or isinstance(y, dict): x = sorted(x.items()) y = sorted(y.items()) elif isinstance(x, set) or isinstance(y, set): x = sorted(x) y = sorted(y) rx, ry = repr(x), repr(y) if rx != ry: rx = rx[:200] + (rx[200:] and '...') ry = ry[:200] + (ry[200:] and '...') raise AssertionError(rx, ry) assert repr(x) == repr(y), repr((x, y))[:200] class _ExceptionCatcher(object): def __init__(self, test_case, exc_type): self.test_case = test_case self.exc_type = exc_type self.exc_value = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): exception_name = self.exc_type.__name__ if exc_type is None: self.test_case.fail('Expected exception of type %r' % exception_name) elif not issubclass(exc_type, self.exc_type): reraise(exc_type, exc_value, tb) self.exc_value = exc_value return True class BetterLoader(unittest.TestLoader): """A nicer loader that solves two problems. First of all we are setting up tests from different sources and we're doing this programmatically which breaks the default loading logic so this is required anyways. Secondly this loader has a nicer interpolation for test names than the default one so you can just do ``run-tests.py ViewTestCase`` and it will work. """ def getRootSuite(self): return suite() def loadTestsFromName(self, name, module=None): root = self.getRootSuite() if name == 'suite': return root all_tests = [] for testcase, testname in find_all_tests(root): if testname == name or \ testname.endswith('.' + name) or \ ('.' + name + '.') in testname or \ testname.startswith(name + '.'): all_tests.append(testcase) if not all_tests: raise LookupError('could not find test case for "%s"' % name) if len(all_tests) == 1: return all_tests[0] rv = unittest.TestSuite() for test in all_tests: rv.addTest(test) return rv def suite(): """A testsuite that has all the Flask tests. You can use this function to integrate the Flask tests into your own testsuite in case you want to test that monkeypatches to Flask do not break it. """ suite = unittest.TestSuite() for other_suite in iter_suites(__name__): suite.addTest(other_suite) return suite def main(): """Runs the testsuite as command line application.""" try: unittest.main(testLoader=BetterLoader(), defaultTest='suite') except Exception: import sys import traceback traceback.print_exc() sys.exit(1)
ahmadRagheb/goldenHR
refs/heads/master
erpnext/hr/doctype/salary_detail/salary_detail.py
56
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class SalaryDetail(Document): pass
RUB-SysSec/kAFL
refs/heads/master
kAFL-Fuzzer/fuzzer/protocol.py
1
""" Copyright (C) 2017 Sergej Schumilo This file is part of kAFL Fuzzer (kAFL). QEMU-PT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. QEMU-PT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>. """ class FuzzingResult: def __init__(self, pos, crash, timeout, kasan, affected_bytes, slave_id, performance, reloaded=False, new_bits=True, qid=0): self.pos = pos self.crash = crash self.timeout = timeout self.kasan = kasan self.affected_bytes = affected_bytes self.slave_id = slave_id self.reloaded = reloaded self.performance = performance self.new_bits = new_bits self.qid = qid KAFL_TAG_REQ = 0 KAFL_TAG_JOB = 1 KAFL_TAG_OUTPUT = 2 KAFL_TAG_START = 3 KAFL_TAG_RESULT = 4 KAFL_TAG_MAP_INFO = 5 KAFL_TAG_NXT_FIN = 6 KAFL_TAG_NXT_UNFIN = 7 KAFL_TAG_UNTOUCHED_NODES = 8 KAFL_TAG_REQ_BITMAP = 9 KAFL_TAG_REQ_EFFECTOR = 10 KAFL_TAG_GET_EFFECTOR = 11 KAFL_INIT_BITMAP = 12 KAFL_TAG_REQ_SAMPLING = 13 KAFL_TAG_REQ_BENCHMARK = 14 KAFL_TAG_ABORT_REQ = 15
deshipu/micropython
refs/heads/master
tests/basics/set_difference.py
55
l = [1, 2, 3, 4] s = set(l) outs = [s.difference(), s.difference({1}), s.difference({1}, [1, 2]), s.difference({1}, {1, 2}, {2, 3})] for out in outs: print(sorted(out)) s = set(l) print(s.difference_update()) print(sorted(s)) print(s.difference_update({1})) print(sorted(s)) print(s.difference_update({1}, [2])) print(sorted(s)) s.difference_update(s) print(s)
kevinmel2000/brython
refs/heads/master
www/src/Lib/test/unittests/test_xml_dom_minicompat.py
100
# Tests for xml.dom.minicompat import pickle import unittest import xml.dom from xml.dom.minicompat import * class EmptyNodeListTestCase(unittest.TestCase): """Tests for the EmptyNodeList class.""" def test_emptynodelist_item(self): # Test item access on an EmptyNodeList. node_list = EmptyNodeList() self.assertIsNone(node_list.item(0)) self.assertIsNone(node_list.item(-1)) # invalid item with self.assertRaises(IndexError): node_list[0] with self.assertRaises(IndexError): node_list[-1] def test_emptynodelist_length(self): node_list = EmptyNodeList() # Reading self.assertEqual(node_list.length, 0) # Writing with self.assertRaises(xml.dom.NoModificationAllowedErr): node_list.length = 111 def test_emptynodelist___add__(self): node_list = EmptyNodeList() + NodeList() self.assertEqual(node_list, NodeList()) def test_emptynodelist___radd__(self): node_list = [1,2] + EmptyNodeList() self.assertEqual(node_list, [1,2]) class NodeListTestCase(unittest.TestCase): """Tests for the NodeList class.""" def test_nodelist_item(self): # Test items access on a NodeList. # First, use an empty NodeList. node_list = NodeList() self.assertIsNone(node_list.item(0)) self.assertIsNone(node_list.item(-1)) with self.assertRaises(IndexError): node_list[0] with self.assertRaises(IndexError): node_list[-1] # Now, use a NodeList with items. node_list.append(111) node_list.append(999) self.assertEqual(node_list.item(0), 111) self.assertIsNone(node_list.item(-1)) # invalid item self.assertEqual(node_list[0], 111) self.assertEqual(node_list[-1], 999) def test_nodelist_length(self): node_list = NodeList([1, 2]) # Reading self.assertEqual(node_list.length, 2) # Writing with self.assertRaises(xml.dom.NoModificationAllowedErr): node_list.length = 111 def test_nodelist___add__(self): node_list = NodeList([3, 4]) + [1, 2] self.assertEqual(node_list, NodeList([3, 4, 1, 2])) def test_nodelist___radd__(self): node_list = [1, 2] + NodeList([3, 4]) self.assertEqual(node_list, NodeList([1, 2, 3, 4])) def test_nodelist_pickle_roundtrip(self): # Test pickling and unpickling of a NodeList. # Empty NodeList. node_list = NodeList() pickled = pickle.dumps(node_list) unpickled = pickle.loads(pickled) self.assertEqual(unpickled, node_list) # Non-empty NodeList. node_list.append(1) node_list.append(2) pickled = pickle.dumps(node_list) unpickled = pickle.loads(pickled) self.assertEqual(unpickled, node_list) if __name__ == '__main__': unittest.main()
sbidoul/odoo
refs/heads/8.0
addons/l10n_fr/wizard/fr_report_bilan.py
374
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## from openerp.osv import fields, osv class account_bilan_report(osv.osv_memory): _name = 'account.bilan.report' _description = 'Account Bilan Report' def _get_default_fiscalyear(self, cr, uid, context=None): fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid) return fiscalyear_id _columns = { 'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True), } _defaults = { 'fiscalyear_id': _get_default_fiscalyear } def print_bilan_report(self, cr, uid, ids, context=None): active_ids = context.get('active_ids', []) data = {} data['form'] = {} data['ids'] = active_ids data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id return self.pool['report'].get_action( cr, uid, ids, 'l10n_fr.report_l10nfrbilan', data=data, context=context ) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
AmineChikhaoui/nixops
refs/heads/master
nixops/resources/azure_network_security_group.py
6
# -*- coding: utf-8 -*- # Automatic provisioning of Azure network security groups. import os import azure from nixops.util import attr_property from nixops.azure_common import ResourceDefinition, ResourceState, ResId, normalize_location from azure.mgmt.network import * class AzureNetworkSecurityGroupDefinition(ResourceDefinition): """Definition of an Azure Network Security Group""" @classmethod def get_type(cls): return "azure-network-security-group" @classmethod def get_resource_type(cls): return "azureSecurityGroups" def __init__(self, xml): ResourceDefinition.__init__(self, xml) self.nsg_name = self.get_option_value(xml, 'name', str) self.copy_option(xml, 'resourceGroup', 'resource') self.copy_location(xml) self.copy_tags(xml) self.security_rules = { _r.get("name"): self._parse_security_rule(_r) for _r in xml.findall("attrs/attr[@name='securityRules']/attrs/attr") } def _parse_security_rule(self, xml): return { 'description': self.get_option_value(xml, 'description', str), 'protocol': self.get_option_value(xml, 'protocol', str), 'source_port_range': self.get_option_value(xml, 'sourcePortRange', str), 'destination_port_range': self.get_option_value(xml, 'destinationPortRange', str), 'source_address_prefix': self.get_option_value(xml, 'sourceAddressPrefix', str), 'destination_address_prefix': self.get_option_value(xml, 'destinationAddressPrefix', str), 'access': self.get_option_value(xml, 'access', str), 'priority': self.get_option_value(xml, 'priority', int), 'direction': self.get_option_value(xml, 'direction', str), } def show_type(self): return "{0} [{1}]".format(self.get_type(), self.location) class AzureNetworkSecurityGroupState(ResourceState): """State of an Azure Network Security Group""" nsg_name = attr_property("azure.name", None) resource_group = attr_property("azure.resourceGroup", None) location = attr_property("azure.location", None) tags = attr_property("azure.tags", {}, 'json') security_rules = attr_property("azure.securityRules", {}, 'json') @classmethod def get_type(cls): return "azure-network-security-group" def show_type(self): s = super(AzureNetworkSecurityGroupState, self).show_type() if self.state == self.UP: s = "{0} [{1}]".format(s, self.location) return s @property def resource_id(self): return self.nsg_name @property def full_name(self): return "Azure network security group '{0}'".format(self.resource_id) def get_resource(self): try: return self.nrpc().network_security_groups.get( self.resource_group, self.resource_id).network_security_group except azure.common.AzureMissingResourceHttpError: return None def destroy_resource(self): self.nrpc().network_security_groups.delete( self.resource_group, self.resource_id) defn_properties = [ 'location', 'tags', 'security_rules' ] def _create_or_update(self, defn): self.nrpc().network_security_groups.create_or_update( defn.resource_group, defn.nsg_name, NetworkSecurityGroup( location = defn.location, security_rules = [ SecurityRule( name = _name, description = _r['description'], protocol = _r['protocol'], source_port_range = _r['source_port_range'], destination_port_range = _r['destination_port_range'], source_address_prefix = _r['source_address_prefix'], destination_address_prefix = _r['destination_address_prefix'], access = _r['access'], priority = _r['priority'], direction = _r['direction'], ) for _name, _r in defn.security_rules.iteritems() ], tags = defn.tags)) self.state = self.UP self.copy_properties(defn) def handle_changed_security_rules(self, rules): def update_rules(k, v): x = self.security_rules if v == None: x.pop(k, None) else: x[k] = v self.security_rules = x for _rule in rules: _s_name = next((_n for _n, _x in self.security_rules.iteritems() if _n == _rule.name), None) if _s_name is None: self.warn("found unexpected security rule {0}".format(_rule.name)) update_rules(_rule.name, {"dummy": True}) for _name, _s_rule in self.security_rules.iteritems(): if _s_rule.get("dummy", False): continue rule_res_name = "security rule {0}".format(_name) rule = next((_r for _r in rules if _r.name == _name), None) if rule is None: self.warn("{0} has been deleted behind our back".format(rule_res_name)) update_rules(_name, None) continue self.handle_changed_dict(_s_rule, 'description', rule.description, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'protocol', rule.protocol, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'source_port_range', rule.source_port_range, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'destination_port_range', rule.destination_port_range, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'source_address_prefix', rule.source_address_prefix, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'destination_address_prefix', rule.destination_address_prefix, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'access', rule.access, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'priority', rule.priority, resource_name = rule_res_name) self.handle_changed_dict(_s_rule, 'direction', rule.direction, resource_name = rule_res_name) update_rules(_name, _s_rule) def create(self, defn, check, allow_reboot, allow_recreate): self.no_subscription_id_change(defn) self.no_location_change(defn) self.no_property_change(defn, 'resource_group') self.copy_mgmt_credentials(defn) self.nsg_name = defn.nsg_name self.resource_group = defn.resource_group if check: nsg = self.get_settled_resource() if not nsg: self.warn_missing_resource() elif self.state == self.UP: self.warn_if_failed(nsg) self.handle_changed_property('location', normalize_location(nsg.location), can_fix = False) self.handle_changed_property('tags', nsg.tags) self.handle_changed_security_rules(nsg.security_rules) else: self.warn_not_supposed_to_exist() self.confirm_destroy() if self.state != self.UP: if self.get_settled_resource(): raise Exception("tried creating a network security group that already exists; " "please run 'deploy --check' to fix this") self.log("creating {0} in {1}...".format(self.full_name, defn.location)) self._create_or_update(defn) if self.properties_changed(defn): self.log("updating properties of {0}...".format(self.full_name)) self.get_settled_resource_assert_exists() self._create_or_update(defn) def create_after(self, resources, defn): from nixops.resources.azure_resource_group import AzureResourceGroupState return {r for r in resources if isinstance(r, AzureResourceGroupState) }
youdonghai/intellij-community
refs/heads/master
python/testData/quickFixes/AddFieldQuickFixTest/addFieldFromMethod_after.py
80
class A: def __init__(self): self.y = None self.x = 1 def foo(self): a = self.y
servo/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/third_party/h2/examples/tornado/tornado-server.py
39
#!/usr/bin/env python # -*- coding: utf-8 -*- """ tornado-server.py ~~~~~~~~~~~~~~~~~ A fully-functional HTTP/2 server written for Tornado. """ import collections import json import ssl import tornado.gen import tornado.ioloop import tornado.iostream import tornado.tcpserver from h2.config import H2Configuration from h2.connection import H2Connection from h2.events import RequestReceived, DataReceived def create_ssl_context(certfile, keyfile): ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.options |= ( ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION ) ssl_context.set_ciphers("ECDHE+AESGCM") ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile) ssl_context.set_alpn_protocols(["h2"]) return ssl_context class H2Server(tornado.tcpserver.TCPServer): @tornado.gen.coroutine def handle_stream(self, stream, address): handler = EchoHeadersHandler(stream) yield handler.handle() class EchoHeadersHandler(object): def __init__(self, stream): self.stream = stream config = H2Configuration(client_side=False) self.conn = H2Connection(config=config) @tornado.gen.coroutine def handle(self): self.conn.initiate_connection() yield self.stream.write(self.conn.data_to_send()) while True: try: data = yield self.stream.read_bytes(65535, partial=True) if not data: break events = self.conn.receive_data(data) for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) elif isinstance(event, DataReceived): self.conn.reset_stream(event.stream_id) yield self.stream.write(self.conn.data_to_send()) except tornado.iostream.StreamClosedError: break def request_received(self, headers, stream_id): headers = collections.OrderedDict(headers) data = json.dumps({'headers': headers}, indent=4).encode('utf-8') response_headers = ( (':status', '200'), ('content-type', 'application/json'), ('content-length', str(len(data))), ('server', 'tornado-h2'), ) self.conn.send_headers(stream_id, response_headers) self.conn.send_data(stream_id, data, end_stream=True) if __name__ == '__main__': ssl_context = create_ssl_context('server.crt', 'server.key') server = H2Server(ssl_options=ssl_context) server.listen(8888) io_loop = tornado.ioloop.IOLoop.current() io_loop.start()
beni55/edx-platform
refs/heads/master
common/test/acceptance/pages/studio/textbooks.py
103
""" Course Textbooks page. """ from .course_page import CoursePage class TextbooksPage(CoursePage): """ Course Textbooks page. """ url_path = "textbooks" def is_browser_on_page(self): return self.q(css='body.view-textbooks').present
petemoore/build-funsize
refs/heads/master
configs/gunicorn-dev-conf.py
1
import multiprocessing bind = ":5000" workers = multiprocessing.cpu_count() * 2 accesslog = "/var/log/funsize/frontend_access_log.log" user = "daemon" reload = True
Antiun/partner-contact
refs/heads/8.0
partner_contact_nationality/__init__.py
63
# -*- coding: utf-8 -*- # Odoo, Open Source Management Solution # Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from . import models
devs1991/test_edx_docmode
refs/heads/master
venv/lib/python2.7/site-packages/django/conf/locale/sr/formats.py
394
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y.' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y.' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y.' SHORT_DATETIME_FORMAT = 'j.m.Y. H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.' '%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.' # '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.' # '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.' # '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59' '%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200' '%d.%m.%Y. %H:%M', # '25.10.2006. 14:30' '%d.%m.%Y.', # '25.10.2006.' '%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59' '%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200' '%d.%m.%y. %H:%M', # '25.10.06. 14:30' '%d.%m.%y.', # '25.10.06.' '%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59' '%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200' '%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30' '%d. %m. %Y.', # '25. 10. 2006.' '%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59' '%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200' '%d. %m. %y. %H:%M', # '25. 10. 06. 14:30' '%d. %m. %y.', # '25. 10. 06.' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
brendandahl/servo
refs/heads/master
tests/wpt/web-platform-tests/websockets/handlers/echo-cookie_wsh.py
265
#!/usr/bin/python from mod_pywebsocket import msgutil def web_socket_do_extra_handshake(request): request.ws_cookie = request.headers_in.get('Cookie') def web_socket_transfer_data(request): if request.ws_cookie is not None: msgutil.send_message(request, request.ws_cookie) else: msgutil.send_message(request, '(none)')
gwpy/gwpy.github.io
refs/heads/master
docs/2.0.1/examples/spectrogram/ratio-2.py
32
specgram = data.spectrogram(2, fftlength=1, overlap=.5) ** (1/2.)
DataDog/integrations-core
refs/heads/master
envoy/datadog_checks/envoy/parser.py
1
import re from math import isnan from typing import Any, Dict, List, Tuple from six.moves import range, zip from .errors import UnknownMetric, UnknownTags from .metrics import METRIC_PREFIX, METRIC_TREE, METRICS HISTOGRAM = re.compile(r'([P0-9.]+)\(([^,]+)') PERCENTILE_SUFFIX = { 'P0': '.0percentile', 'P25': '.25percentile', 'P50': '.50percentile', 'P75': '.75percentile', 'P90': '.90percentile', 'P95': '.95percentile', 'P99': '.99percentile', 'P99.9': '.99_9percentile', 'P100': '.100percentile', } def _parse_metric(metric, metric_mapping, skip_part=None): metric_parts = [] tag_names = [] tag_values = [] tag_value_builder = [] unknown_tags = [] tags_to_build = 0 minimum_tag_length = 0 # From the split metric name, any part that is not in the mapping it will become part of the tag value for metric_part in metric.split('.'): if metric_part in metric_mapping and metric_part != skip_part and tags_to_build >= minimum_tag_length: # Rebuild any built up tags whenever we encounter a known metric part. if tag_value_builder: # Edge case where we hit a known metric part after a sequence of all unknown parts if '|_tags_|' not in metric_mapping: raise UnknownMetric tags = next( (mapped_tags for mapped_tags in metric_mapping['|_tags_|'] if tags_to_build >= len(mapped_tags)), tuple(), ) constructed_tag_values = construct_tag_values(tag_value_builder, len(tags)) # Once the builder has been used, clear its contents. tag_value_builder = [] if tags: tag_names.extend(tags) tag_values.extend(constructed_tag_values) else: unknown_tags.extend(constructed_tag_values) tags_to_build = 0 metric_parts.append(metric_part) metric_mapping = metric_mapping[metric_part] minimum_tag_length = len(metric_mapping['|_tags_|'][-1]) else: tag_value_builder.append(metric_part) tags_to_build += 1 return metric_parts, tag_value_builder, tag_names, tag_values, unknown_tags, tags_to_build, metric_mapping def parse_metric(metric, retry=False, metric_mapping=METRIC_TREE, disable_legacy_cluster_tag=False): # type: (str, Dict[str, Any]) -> Tuple[str, List[str], str] """Takes a metric formatted by Envoy and splits it into a unique metric name. Returns the unique metric name, a list of tags, and the name of the submission method. Example: 'listener.0.0.0.0_80.downstream_cx_total' -> ('listener.downstream_cx_total', ['address:0.0.0.0_80'], 'count') """ metric_parts, tag_value_builder, tag_names, tag_values, unknown_tags, tags_to_build, last_mapping = _parse_metric( metric, metric_mapping ) parsed_metric = '.'.join(metric_parts) if parsed_metric not in METRICS: if retry: skip_parts = [] # Retry parsing for metrics by skipping the last matched metric part while len(metric_parts) > 1: skip_part = metric_parts.pop() if skip_part in skip_parts: raise UnknownMetric else: skip_parts.append(skip_part) ( metric_parts, tag_value_builder, tag_names, tag_values, unknown_tags, tags_to_build, last_mapping, ) = _parse_metric(metric, metric_mapping, skip_part) parsed_metric = '.'.join(metric_parts) if parsed_metric in METRICS: break else: raise UnknownMetric else: raise UnknownMetric # Rebuild any trailing tags if tag_value_builder: tags = next( (mapped_tags for mapped_tags in last_mapping['|_tags_|'] if tags_to_build >= len(mapped_tags)), tuple() ) constructed_tag_values = construct_tag_values(tag_value_builder, len(tags)) if tags: tag_names.extend(tags) tag_values.extend(constructed_tag_values) else: unknown_tags.extend(constructed_tag_values) if unknown_tags: raise UnknownTags('{}'.format('|||'.join(unknown_tags))) if not disable_legacy_cluster_tag: for name, legacy_name in [('envoy_cluster', 'cluster_name'), ('virtual_envoy_cluster', 'virtual_cluster_name')]: try: pos = tag_names.index(name) tag_names.append(legacy_name) tag_values.append(tag_values[pos]) except ValueError: pass tags = ['{}:{}'.format(tag_name, tag_value) for tag_name, tag_value in zip(tag_names, tag_values)] return METRIC_PREFIX + parsed_metric, tags, METRICS[parsed_metric]['method'] def construct_tag_values(tag_builder, num_tags): # type: (List[str], int) -> List[str] # First fill in all trailing slots with one tag. tags = [tag_builder.pop() for _ in range(num_tags - 1)] # Merge any excess tag parts. if tag_builder: tags.append('.'.join(tag_builder)) # Return an iterator in the original order. return reversed(tags) def parse_histogram(metric, histogram): """Iterates over histogram data, yielding metric-value pairs.""" for match in HISTOGRAM.finditer(histogram): percentile, value = match.groups() value = float(value) if not isnan(value): try: yield metric + PERCENTILE_SUFFIX[percentile], value # In case Envoy adds more except KeyError: yield '{}.{}percentile'.format(metric, percentile[1:].replace('.', '_')), value
paplorinc/intellij-community
refs/heads/master
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py
31
f'{1:.5d} {2:3.5d} {3:3d} {"spam":>20} {4:<#d}'
GauravSahu/odoo
refs/heads/8.0
addons/base_geolocalize/models/__init__.py
1273
import res_partner
Orav/kbengine
refs/heads/master
kbe/src/lib/python/Tools/scripts/finddiv.py
1
#! /usr/bin/env python3 """finddiv - a grep-like tool that looks for division operators. Usage: finddiv [-l] file_or_directory ... For directory arguments, all files in the directory whose name ends in .py are processed, and subdirectories are processed recursively. This actually tokenizes the files to avoid false hits in comments or strings literals. By default, this prints all lines containing a / or /= operator, in grep -n style. With the -l option specified, it prints the filename of files that contain at least one / or /= operator. """ import os import sys import getopt import tokenize def main(): try: opts, args = getopt.getopt(sys.argv[1:], "lh") except getopt.error as msg: usage(msg) return 2 if not args: usage("at least one file argument is required") return 2 listnames = 0 for o, a in opts: if o == "-h": print(__doc__) return if o == "-l": listnames = 1 exit = None for filename in args: x = process(filename, listnames) exit = exit or x return exit def usage(msg): sys.stderr.write("%s: %s\n" % (sys.argv[0], msg)) sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0]) sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0]) def process(filename, listnames): if os.path.isdir(filename): return processdir(filename, listnames) try: fp = open(filename) except IOError as msg: sys.stderr.write("Can't open: %s\n" % msg) return 1 g = tokenize.generate_tokens(fp.readline) lastrow = None for type, token, (row, col), end, line in g: if token in ("/", "/="): if listnames: print(filename) break if row != lastrow: lastrow = row print("%s:%d:%s" % (filename, row, line), end=' ') fp.close() def processdir(dir, listnames): try: names = os.listdir(dir) except OSError as msg: sys.stderr.write("Can't list directory: %s\n" % dir) return 1 files = [] for name in names: fn = os.path.join(dir, name) if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn): files.append(fn) files.sort(key=os.path.normcase) exit = None for fn in files: x = process(fn, listnames) exit = exit or x return exit if __name__ == "__main__": sys.exit(main())
BenTheElder/test-infra
refs/heads/master
gubernator/third_party/defusedxml/common.py
55
# defusedxml # # Copyright (c) 2013 by Christian Heimes <christian@python.org> # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/psf/license for licensing details. """Common constants, exceptions and helpe functions """ import sys from types import MethodType PY3 = sys.version_info[0] == 3 PY26 = sys.version_info[:2] == (2, 6) PY31 = sys.version_info[:2] == (3, 1) class DefusedXmlException(ValueError): """Base exception """ def __repr__(self): return str(self) class DTDForbidden(DefusedXmlException): """Document type definition is forbidden """ def __init__(self, name, sysid, pubid): super(DTDForbidden, self).__init__() self.name = name self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class EntitiesForbidden(DefusedXmlException): """Entity definition is forbidden """ def __init__(self, name, value, base, sysid, pubid, notation_name): super(EntitiesForbidden, self).__init__() self.name = name self.value = value self.base = base self.sysid = sysid self.pubid = pubid self.notation_name = notation_name def __str__(self): tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class ExternalReferenceForbidden(DefusedXmlException): """Resolving an external reference is forbidden """ def __init__(self, context, base, sysid, pubid): super(ExternalReferenceForbidden, self).__init__() self.context = context self.base = base self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" return tpl.format(self.sysid, self.pubid) class NotSupportedError(DefusedXmlException): """The operation is not supported """ def _apply_defusing(defused_mod): assert defused_mod is sys.modules[defused_mod.__name__] stdlib_name = defused_mod.__origin__ __import__(stdlib_name, {}, {}, ["*"]) stdlib_mod = sys.modules[stdlib_name] stdlib_names = set(dir(stdlib_mod)) for name, obj in vars(defused_mod).items(): if name.startswith("_") or name not in stdlib_names: continue setattr(stdlib_mod, name, obj) return stdlib_mod def _generate_etree_functions(DefusedXMLParser, _TreeBuilder, _IterParseIterator, _parse, _iterparse): """Factory for functions needed by etree, dependent on whether cElementTree or ElementTree is used.""" def parse(source, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True): if parser is None: parser = DefusedXMLParser(target=_TreeBuilder(), forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external) return _parse(source, parser) if PY26 or PY31: def bind(xmlparser, funcname, hookname): func = getattr(DefusedXMLParser, funcname) if PY26: # unbound -> function func = func.__func__ method = MethodType(func, xmlparser, xmlparser.__class__) else: method = MethodType(func, xmlparser) # set hook setattr(xmlparser._parser, hookname, method) def iterparse(source, events=None, forbid_dtd=False, forbid_entities=True, forbid_external=True): it = _iterparse(source, events) xmlparser = it._parser if forbid_dtd: bind(xmlparser, "defused_start_doctype_decl", "StartDoctypeDeclHandler") if forbid_entities: bind(xmlparser, "defused_entity_decl", "EntityDeclHandler") bind(xmlparser, "defused_unparsed_entity_decl", "UnparsedEntityDeclHandler") if forbid_external: bind(xmlparser, "defused_external_entity_ref_handler", "ExternalEntityRefHandler") return it elif PY3: def iterparse(source, events=None, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True): close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True if not parser: parser = DefusedXMLParser(target=_TreeBuilder(), forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external) return _IterParseIterator(source, events, parser, close_source) else: # Python 2.7 def iterparse(source, events=None, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True): if parser is None: parser = DefusedXMLParser(target=_TreeBuilder(), forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external) return _iterparse(source, events, parser) def fromstring(text, forbid_dtd=False, forbid_entities=True, forbid_external=True): parser = DefusedXMLParser(target=_TreeBuilder(), forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external) parser.feed(text) return parser.close() return parse, iterparse, fromstring
mathmastin/NumString
refs/heads/master
src/numcass.py
1
__author__ = 'Matt Mastin' import numstring import cass import cassandra class NumCass(numstring.NSPGenerator): def __init__(self, hosts=None, stringsize=0): """Subclass constructor for NumCass This class is mainly for insertion of a NumStringPile into Cassandra Use NumKeyspace to access an existing Pile. hosts will default to localhost, but keyspace must be provided :type stringsize: int """ super(NumCass, self).__init__(stringsize) self.controller = NumKeyspace(stringsize, hosts) def insertpile(self): """Inserts the pile into keyspace with a column family for each starting digit""" count = 1 for i in self.getgen(): self.controller.session.execute_async("INSERT INTO start%s (num_string, comp) VALUES (%s, %s)", [i.digits[0], str(i.digits), count]) count += 1 def create(self): """Create the Keyspace""" self.controller.createnumkeyspace() def delete(self): """Delete the Keyspace""" self.controller.deletenumkeyspace() #def attachkeyspace(self): # self.controller.usekeyspace(self.controller.keyspace) class NumKeyspace(cass.CassController): """Subcalss of CassController for use by NumCass Used for interacting with a NumStringPile that lives in a Cassandra keyspace """ def __init__(self, stringsize=1, hosts=None): super(NumKeyspace, self).__init__(hosts) self.stringsize = stringsize self.keyspace = 'numstring' + str(stringsize) def createnumkeyspace(self): """Creates a keyspace using the NumString data model""" self.session.execute( "CREATE KEYSPACE IF NOT EXISTS numstring%s WITH REPLICATION = " "{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", [self.stringsize]) # Set the keyspace of the session self.session.set_keyspace(self.keyspace) # This will pass "USE keyspace" to the cluster so we do not need # to specify the keyspace in queries self.usekeyspace(self.keyspace) # Now we create the tables for i in range(0, 10): self.session.execute( "CREATE TABLE IF NOT EXISTS start%s (num_string varchar PRIMARY KEY, comp int)", [i]) def deletenumkeyspace(self): """Deletes the keyspace associated to the NumKeyspace""" self.deletekeyspace() def numquery(self, cqlstatement): """Yields a generator into the results of the query cqlstatement Note that we shouldn't need to explicitly include the name of the keyspace """ # We assume that if a query is called then the keyspace exists and should be used # if the keyspace doesn't exist then Cassandra will throw an InvalidRequest error # if the generator is used. self.usekeyspace(self.keyspace) results = self.query(cqlstatement) for i in results: # The query yields a unicode version of the digits, so we must format and cast # to a list of ints to send to the constructor of NumString if self.stringsize > 1: yield numstring.NumString(map(int, str(i[0]).lstrip('(').rstrip(')').split(','))) else: yield numstring.NumString([int(i[0][1])]) def getnhbs(self, numstring): """Returns generator into list of neighbors of numstring in the NumString graph Two NumStrings are neighbors if one can be obtained from the other by adding 1 to some integer in the string and subtracting 1 from another """ if numstring.stringsize == 0: return None for i in range(0,numstring.stringsize): for j in range(0,numstring.stringsize): if i != j: nbr = numstring.digits def dumpsubgraph(self, vertlist = None): """Dumps neighbor subgraph corresponding to the vertices in vertlist to file for processing by Graphx""" pass def updatecomps(self): """Reads connected component data from Graphx generated file and update component information in the NumString keyspace""" pass
dfunckt/django
refs/heads/master
django/contrib/gis/geos/point.py
47
import warnings from ctypes import c_uint from django.contrib.gis import gdal from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.geometry import GEOSGeometry from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from django.utils.six.moves import range class Point(GEOSGeometry): _minlength = 2 _maxlength = 3 has_cs = True def __init__(self, x=None, y=None, z=None, srid=None): """ The Point object may be initialized with either a tuple, or individual parameters. For Example: >>> p = Point((5, 23)) # 2D point, passed in as a tuple >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters """ if x is None: coords = [] elif isinstance(x, (tuple, list)): # Here a tuple or list was passed in under the `x` parameter. coords = x elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)): # Here X, Y, and (optionally) Z were passed in individually, as parameters. if isinstance(z, six.integer_types + (float,)): coords = [x, y, z] else: coords = [x, y] else: raise TypeError('Invalid parameters given for Point initialization.') point = self._create_point(len(coords), coords) # Initializing using the address returned from the GEOS # createPoint factory. super(Point, self).__init__(point, srid=srid) def _ogr_ptr(self): return gdal.geometries.Point._create_empty() if self.empty else super(Point, self)._ogr_ptr() @classmethod def _create_empty(cls): return cls._create_point(None, None) @classmethod def _create_point(cls, ndim, coords): """ Create a coordinate sequence, set X, Y, [Z], and create point """ if not ndim: return capi.create_point(None) if ndim < 2 or ndim > 3: raise TypeError('Invalid point dimension: %s' % str(ndim)) cs = capi.create_cs(c_uint(1), c_uint(ndim)) i = iter(coords) capi.cs_setx(cs, 0, next(i)) capi.cs_sety(cs, 0, next(i)) if ndim == 3: capi.cs_setz(cs, 0, next(i)) return capi.create_point(cs) def _set_list(self, length, items): ptr = self._create_point(length, items) if ptr: capi.destroy_geom(self.ptr) self._ptr = ptr self._set_cs() else: # can this happen? raise GEOSException('Geometry resulting from slice deletion was invalid.') def _set_single(self, index, value): self._cs.setOrdinate(index, 0, value) def __iter__(self): "Allows iteration over coordinates of this Point." for i in range(len(self)): yield self[i] def __len__(self): "Returns the number of dimensions for this Point (either 0, 2 or 3)." if self.empty: return 0 if self.hasz: return 3 else: return 2 def _get_single_external(self, index): if index == 0: return self.x elif index == 1: return self.y elif index == 2: return self.z _get_single_internal = _get_single_external @property def x(self): "Returns the X component of the Point." return self._cs.getOrdinate(0, 0) @x.setter def x(self, value): "Sets the X component of the Point." self._cs.setOrdinate(0, 0, value) @property def y(self): "Returns the Y component of the Point." return self._cs.getOrdinate(1, 0) @y.setter def y(self, value): "Sets the Y component of the Point." self._cs.setOrdinate(1, 0, value) @property def z(self): "Returns the Z component of the Point." return self._cs.getOrdinate(2, 0) if self.hasz else None @z.setter def z(self, value): "Sets the Z component of the Point." if not self.hasz: raise GEOSException('Cannot set Z on 2D Point.') self._cs.setOrdinate(2, 0, value) def get_x(self): warnings.warn( "`get_x()` is deprecated, use the `x` property instead.", RemovedInDjango20Warning, 2 ) return self.x def set_x(self, value): warnings.warn( "`set_x()` is deprecated, use the `x` property instead.", RemovedInDjango20Warning, 2 ) self.x = value def get_y(self): warnings.warn( "`get_y()` is deprecated, use the `y` property instead.", RemovedInDjango20Warning, 2 ) return self.y def set_y(self, value): warnings.warn( "`set_y()` is deprecated, use the `y` property instead.", RemovedInDjango20Warning, 2 ) self.y = value def get_z(self): warnings.warn( "`get_z()` is deprecated, use the `z` property instead.", RemovedInDjango20Warning, 2 ) return self.z def set_z(self, value): warnings.warn( "`set_z()` is deprecated, use the `z` property instead.", RemovedInDjango20Warning, 2 ) self.z = value # ### Tuple setting and retrieval routines. ### @property def tuple(self): "Returns a tuple of the point." return self._cs.tuple @tuple.setter def tuple(self, tup): "Sets the coordinates of the point with the given tuple." self._cs[0] = tup def get_coords(self): warnings.warn( "`get_coords()` is deprecated, use the `tuple` property instead.", RemovedInDjango20Warning, 2 ) return self.tuple def set_coords(self, tup): warnings.warn( "`set_coords()` is deprecated, use the `tuple` property instead.", RemovedInDjango20Warning, 2 ) self.tuple = tup # The tuple and coords properties coords = tuple
Sentient07/scikit-learn
refs/heads/master
sklearn/utils/extmath.py
19
""" Extended math utilities. """ # Authors: Gael Varoquaux # Alexandre Gramfort # Alexandre T. Passos # Olivier Grisel # Lars Buitinck # Stefan van der Walt # Kyle Kastner # Giorgio Patrini # License: BSD 3 clause from __future__ import division from functools import partial import warnings import numpy as np from scipy import linalg from scipy.sparse import issparse, csr_matrix from . import check_random_state from .fixes import np_version from ._logistic_sigmoid import _log_logistic_sigmoid from ..externals.six.moves import xrange from .sparsefuncs_fast import csr_row_norms from .validation import check_array from ..exceptions import NonBLASDotWarning def norm(x): """Compute the Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). More precise than sqrt(squared_norm(x)). """ x = np.asarray(x) nrm2, = linalg.get_blas_funcs(['nrm2'], [x]) return nrm2(x) # Newer NumPy has a ravel that needs less copying. if np_version < (1, 7, 1): _ravel = np.ravel else: _ravel = partial(np.ravel, order='K') def squared_norm(x): """Squared Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). Faster than norm(x) ** 2. """ x = _ravel(x) return np.dot(x, x) def row_norms(X, squared=False): """Row-wise (squared) Euclidean norm of X. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse matrices and does not create an X.shape-sized temporary. Performs no input validation. """ if issparse(X): if not isinstance(X, csr_matrix): X = csr_matrix(X) norms = csr_row_norms(X) else: norms = np.einsum('ij,ij->i', X, X) if not squared: np.sqrt(norms, norms) return norms def fast_logdet(A): """Compute log(det(A)) for A symmetric Equivalent to : np.log(nl.det(A)) but more robust. It returns -Inf if det(A) is non positive or is not defined. """ sign, ld = np.linalg.slogdet(A) if not sign > 0: return -np.inf return ld def _impose_f_order(X): """Helper Function""" # important to access flags instead of calling np.isfortran, # this catches corner cases. if X.flags.c_contiguous: return check_array(X.T, copy=False, order='F'), True else: return check_array(X, copy=False, order='F'), False def _fast_dot(A, B): if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c' raise ValueError if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64) for x in [A, B]): warnings.warn('Falling back to np.dot. ' 'Data must be of same type of either ' '32 or 64 bit float for the BLAS function, gemm, to be ' 'used for an efficient dot operation. ', NonBLASDotWarning) raise ValueError if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2: raise ValueError # scipy 0.9 compliant API dot = linalg.get_blas_funcs(['gemm'], (A, B))[0] A, trans_a = _impose_f_order(A) B, trans_b = _impose_f_order(B) return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b) def _have_blas_gemm(): try: linalg.get_blas_funcs(['gemm']) return True except (AttributeError, ValueError): warnings.warn('Could not import BLAS, falling back to np.dot') return False # Only use fast_dot for older NumPy; newer ones have tackled the speed issue. if np_version < (1, 7, 2) and _have_blas_gemm(): def fast_dot(A, B): """Compute fast dot products directly calling BLAS. This function calls BLAS directly while warranting Fortran contiguity. This helps avoiding extra copies `np.dot` would have created. For details see section `Linear Algebra on large Arrays`: http://wiki.scipy.org/PerformanceTips Parameters ---------- A, B: instance of np.ndarray Input arrays. Arrays are supposed to be of the same dtype and to have exactly 2 dimensions. Currently only floats are supported. In case these requirements aren't met np.dot(A, B) is returned instead. To activate the related warning issued in this case execute the following lines of code: >> import warnings >> from sklearn.exceptions import NonBLASDotWarning >> warnings.simplefilter('always', NonBLASDotWarning) """ try: return _fast_dot(A, B) except ValueError: # Maltyped or malformed data. return np.dot(A, B) else: fast_dot = np.dot def density(w, **kwargs): """Compute density of a sparse vector Return a value between 0 and 1 """ if hasattr(w, "toarray"): d = float(w.nnz) / (w.shape[0] * w.shape[1]) else: d = 0 if w is None else float((w != 0).sum()) / w.size return d def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return fast_dot(a, b) def randomized_range_finder(A, size, n_iter, power_iteration_normalizer='auto', random_state=None): """Computes an orthonormal matrix whose range approximates the range of A. Parameters ---------- A : 2D array The input data matrix size : integer Size of the return array n_iter : integer Number of power iterations used to stabilize the result power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none' Whether the power iterations are normalized with step-by-step QR factorization (the slowest but most accurate), 'none' (the fastest but numerically unstable when `n_iter` is large, e.g. typically 5 or larger), or 'LU' factorization (numerically stable but can lose slightly in accuracy). The 'auto' mode applies no normalization if `n_iter`<=2 and switches to LU otherwise. .. versionadded:: 0.18 random_state : RandomState or an int seed (0 by default) A random number generator instance Returns ------- Q : 2D array A (size x size) projection matrix, the range of which approximates well the range of the input matrix A. Notes ----- Follows Algorithm 4.3 of Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ random_state = check_random_state(random_state) # Generating normal random vectors with shape: (A.shape[1], size) Q = random_state.normal(size=(A.shape[1], size)) # Deal with "auto" mode if power_iteration_normalizer == 'auto': if n_iter <= 2: power_iteration_normalizer = 'none' else: power_iteration_normalizer = 'LU' # Perform power iterations with Q to further 'imprint' the top # singular vectors of A in Q for i in range(n_iter): if power_iteration_normalizer == 'none': Q = safe_sparse_dot(A, Q) Q = safe_sparse_dot(A.T, Q) elif power_iteration_normalizer == 'LU': Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True) Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True) elif power_iteration_normalizer == 'QR': Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic') Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic') # Sample the range of A using by linear projection of Q # Extract an orthonormal basis Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic') return Q def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto', power_iteration_normalizer='auto', transpose='auto', flip_sign=True, random_state=0): """Computes a truncated randomized SVD Parameters ---------- M : ndarray or sparse matrix Matrix to decompose n_components : int Number of singular values and vectors to extract. n_oversamples : int (default is 10) Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. Smaller number can improve speed but can negatively impact the quality of approximation of singular vectors and singular values. n_iter : int or 'auto' (default is 'auto') Number of power iterations. It can be used to deal with very noisy problems. When 'auto', it is set to 4, unless `n_components` is small (< .1 * min(X.shape)) `n_iter` in which case is set to 7. This improves precision with few components. .. versionchanged:: 0.18 power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none' Whether the power iterations are normalized with step-by-step QR factorization (the slowest but most accurate), 'none' (the fastest but numerically unstable when `n_iter` is large, e.g. typically 5 or larger), or 'LU' factorization (numerically stable but can lose slightly in accuracy). The 'auto' mode applies no normalization if `n_iter`<=2 and switches to LU otherwise. .. versionadded:: 0.18 transpose : True, False or 'auto' (default) Whether the algorithm should be applied to M.T instead of M. The result should approximately be the same. The 'auto' mode will trigger the transposition if M.shape[1] > M.shape[0] since this implementation of randomized SVD tend to be a little faster in that case. .. versionchanged:: 0.18 flip_sign : boolean, (True by default) The output of a singular value decomposition is only unique up to a permutation of the signs of the singular vectors. If `flip_sign` is set to `True`, the sign ambiguity is resolved by making the largest loadings for each component in the left singular vectors positive. random_state : RandomState or an int seed (0 by default) A random number generator instance to make behavior Notes ----- This algorithm finds a (usually very good) approximate truncated singular value decomposition using randomization to speed up the computations. It is particularly fast on large matrices on which you wish to extract only a small number of components. In order to obtain further speed up, `n_iter` can be set <=2 (at the cost of loss of precision). References ---------- * Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061 * A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert * An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ random_state = check_random_state(random_state) n_random = n_components + n_oversamples n_samples, n_features = M.shape if n_iter == 'auto': # Checks if the number of iterations is explicitely specified # Adjust n_iter. 7 was found a good compromise for PCA. See #5299 n_iter = 7 if n_components < .1 * min(M.shape) else 4 if transpose == 'auto': transpose = n_samples < n_features if transpose: # this implementation is a bit faster with smaller shape[1] M = M.T Q = randomized_range_finder(M, n_random, n_iter, power_iteration_normalizer, random_state) # project M to the (k + p) dimensional space using the basis vectors B = safe_sparse_dot(Q.T, M) # compute the SVD on the thin matrix: (k + p) wide Uhat, s, V = linalg.svd(B, full_matrices=False) del B U = np.dot(Q, Uhat) if flip_sign: if not transpose: U, V = svd_flip(U, V) else: # In case of transpose u_based_decision=false # to actually flip based on u and not v. U, V = svd_flip(U, V, u_based_decision=False) if transpose: # transpose back the results according to the input convention return V[:n_components, :].T, s[:n_components], U[:, :n_components].T else: return U[:, :n_components], s[:n_components], V[:n_components, :] def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 """ arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out def weighted_mode(a, w, axis=0): """Returns an array of the weighted modal (most common) value in a If there is more than one such value, only the first is returned. The bin-count for the modal bins is also returned. This is an extension of the algorithm in scipy.stats.mode. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). w : array_like n-dimensional array of weights for each value axis : int, optional Axis along which to operate. Default is 0, i.e. the first axis. Returns ------- vals : ndarray Array of modal values. score : ndarray Array of weighted counts for each mode. Examples -------- >>> from sklearn.utils.extmath import weighted_mode >>> x = [4, 1, 4, 2, 4, 2] >>> weights = [1, 1, 1, 1, 1, 1] >>> weighted_mode(x, weights) (array([ 4.]), array([ 3.])) The value 4 appears three times: with uniform weights, the result is simply the mode of the distribution. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's >>> weighted_mode(x, weights) (array([ 2.]), array([ 3.5])) The value 2 has the highest score: it appears twice with weights of 1.5 and 2: the sum of these is 3. See Also -------- scipy.stats.mode """ if axis is None: a = np.ravel(a) w = np.ravel(w) axis = 0 else: a = np.asarray(a) w = np.asarray(w) axis = axis if a.shape != w.shape: w = np.zeros(a.shape, dtype=w.dtype) + w scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape) oldcounts = np.zeros(testshape) for score in scores: template = np.zeros(a.shape) ind = (a == score) template[ind] = w[ind] counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return mostfrequent, oldcounts def pinvh(a, cond=None, rcond=None, lower=True): """Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix. Calculate a generalized inverse of a symmetric matrix using its eigenvalue decomposition and including all 'large' eigenvalues. Parameters ---------- a : array, shape (N, N) Real symmetric or complex hermetian matrix to be pseudo-inverted cond : float or None, default None Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. rcond : float or None, default None (deprecated) Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : boolean Whether the pertinent array data is taken from the lower or upper triangle of a. (Default: lower) Returns ------- B : array, shape (N, N) Raises ------ LinAlgError If eigenvalue does not converge Examples -------- >>> import numpy as np >>> a = np.random.randn(9, 6) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = np.asarray_chkfinite(a) s, u = linalg.eigh(a, lower=lower) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps # unlike svd case, eigh can lead to negative eigenvalues above_cutoff = (abs(s) > cond * np.max(abs(s))) psigma_diag = np.zeros_like(s) psigma_diag[above_cutoff] = 1.0 / s[above_cutoff] return np.dot(u * psigma_diag, np.conjugate(u).T) def cartesian(arrays, out=None): """Generate a cartesian product of input arrays. Parameters ---------- arrays : list of array-like 1-D arrays to form the cartesian product of. out : ndarray Array to place the cartesian product in. Returns ------- out : ndarray 2-D array of shape (M, len(arrays)) containing cartesian products formed of input arrays. Examples -------- >>> cartesian(([1, 2, 3], [4, 5], [6, 7])) array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) """ arrays = [np.asarray(x) for x in arrays] shape = (len(x) for x in arrays) dtype = arrays[0].dtype ix = np.indices(shape) ix = ix.reshape(len(arrays), -1).T if out is None: out = np.empty_like(ix, dtype=dtype) for n, arr in enumerate(arrays): out[:, n] = arrays[n][ix[:, n]] return out def svd_flip(u, v, u_based_decision=True): """Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u, v : ndarray u and v are the output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. u_based_decision : boolean, (default=True) If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted, v_adjusted : arrays with the same dimensions as the input. """ if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, xrange(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[xrange(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v def log_logistic(X, out=None): """Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``. This implementation is numerically stable because it splits positive and negative values:: -log(1 + exp(-x_i)) if x_i > 0 x_i - log(1 + exp(x_i)) if x_i <= 0 For the ordinary logistic function, use ``sklearn.utils.fixes.expit``. Parameters ---------- X : array-like, shape (M, N) or (M, ) Argument to the logistic function out : array-like, shape: (M, N) or (M, ), optional: Preallocated output array. Returns ------- out : array, shape (M, N) or (M, ) Log of the logistic function evaluated at every point in x Notes ----- See the blog post describing this implementation: http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/ """ is_1d = X.ndim == 1 X = np.atleast_2d(X) X = check_array(X, dtype=np.float64) n_samples, n_features = X.shape if out is None: out = np.empty_like(X) _log_logistic_sigmoid(n_samples, n_features, X, out) if is_1d: return np.squeeze(out) return out def softmax(X, copy=True): """ Calculate the softmax function. The softmax function is calculated by np.exp(X) / np.sum(np.exp(X), axis=1) This will cause overflow when large values are exponentiated. Hence the largest value in each row is subtracted from each data point to prevent this. Parameters ---------- X : array-like, shape (M, N) Argument to the logistic function copy : bool, optional Copy X or not. Returns ------- out : array, shape (M, N) Softmax function evaluated at every point in x """ if copy: X = np.copy(X) max_prob = np.max(X, axis=1).reshape((-1, 1)) X -= max_prob np.exp(X, X) sum_prob = np.sum(X, axis=1).reshape((-1, 1)) X /= sum_prob return X def safe_min(X): """Returns the minimum value of a dense or a CSR/CSC matrix. Adapated from http://stackoverflow.com/q/13426580 """ if issparse(X): if len(X.data) == 0: return 0 m = X.data.min() return m if X.getnnz() == X.size else min(m, 0) else: return X.min() def make_nonnegative(X, min_value=0): """Ensure `X.min()` >= `min_value`.""" min_ = safe_min(X) if min_ < min_value: if issparse(X): raise ValueError("Cannot make the data matrix" " nonnegative because it is sparse." " Adding a value to every entry would" " make it no longer sparse.") X = X + (min_value - min_) return X def _incremental_mean_and_var(X, last_mean=.0, last_variance=None, last_sample_count=0): """Calculate mean update and a Youngs and Cramer variance update. last_mean and last_variance are statistics computed at the last step by the function. Both must be initialized to 0.0. In case no scaling is required last_variance can be None. The mean is always required and returned because necessary for the calculation of the variance. last_n_samples_seen is the number of samples encountered until now. From the paper "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for variance update last_mean : array-like, shape: (n_features,) last_variance : array-like, shape: (n_features,) last_sample_count : int Returns ------- updated_mean : array, shape (n_features,) updated_variance : array, shape (n_features,) If None, only mean is computed updated_sample_count : int References ---------- T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance: recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247 Also, see the sparse implementation of this in `utils.sparsefuncs.incr_mean_variance_axis` and `utils.sparsefuncs_fast.incr_mean_variance_axis0` """ # old = stats until now # new = the current increment # updated = the aggregated stats last_sum = last_mean * last_sample_count new_sum = X.sum(axis=0) new_sample_count = X.shape[0] updated_sample_count = last_sample_count + new_sample_count updated_mean = (last_sum + new_sum) / updated_sample_count if last_variance is None: updated_variance = None else: new_unnormalized_variance = X.var(axis=0) * new_sample_count if last_sample_count == 0: # Avoid division by 0 updated_unnormalized_variance = new_unnormalized_variance else: last_over_new_count = last_sample_count / new_sample_count last_unnormalized_variance = last_variance * last_sample_count updated_unnormalized_variance = ( last_unnormalized_variance + new_unnormalized_variance + last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum) ** 2) updated_variance = updated_unnormalized_variance / updated_sample_count return updated_mean, updated_variance, updated_sample_count def _deterministic_vector_sign_flip(u): """Modify the sign of vectors for reproducibility Flips the sign of elements of all the vectors (rows of u) such that the absolute maximum element of each vector is positive. Parameters ---------- u : ndarray Array with vectors as its rows. Returns ------- u_flipped : ndarray with same shape as u Array with the sign flipped vectors as its rows. """ max_abs_rows = np.argmax(np.abs(u), axis=1) signs = np.sign(u[range(u.shape[0]), max_abs_rows]) u *= signs[:, np.newaxis] return u def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): """Use high precision for cumsum and check that final value matches sum Parameters ---------- arr : array-like To be cumulatively summed as flat axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. rtol : float Relative tolerance, see ``np.allclose`` atol : float Absolute tolerance, see ``np.allclose`` """ # sum is as unstable as cumsum for numpy < 1.9 if np_version < (1, 9): return np.cumsum(arr, axis=axis, dtype=np.float64) out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True)): warnings.warn('cumsum was found to be unstable: ' 'its last element does not correspond to sum', RuntimeWarning) return out
luoyetx/mxnet
refs/heads/master
example/ssd/tools/caffe_converter/convert_mean.py
79
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Convert caffe mean """ import argparse import mxnet as mx import numpy as np import caffe_parser def convert_mean(binaryproto_fname, output=None): """Convert caffe mean Parameters ---------- binaryproto_fname : str Filename of the mean output : str, optional Save the mean into mxnet's format Returns ------- NDArray Mean in ndarray """ mean_blob = caffe_parser.caffe_pb2.BlobProto() with open(binaryproto_fname, 'rb') as f: mean_blob.ParseFromString(f.read()) img_mean_np = np.array(mean_blob.data) img_mean_np = img_mean_np.reshape( mean_blob.channels, mean_blob.height, mean_blob.width ) # swap channels from Caffe BGR to RGB img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :] nd = mx.nd.array(img_mean_np) if output is not None: mx.nd.save(output, {"mean_image": nd}) return nd def main(): parser = argparse.ArgumentParser(description='Convert caffe mean') parser.add_argument('binaryproto_fname', help='Filename of the mean') parser.add_argument('output', help='The name of the output file') args = parser.parse_args() convert_mean(args.binaryproto_fname, args.output) if __name__ == '__main__': main()
drougge/wellpapp-pyclient
refs/heads/master
wellpapp/shell/fusefs.py
1
from __future__ import print_function import fuse import stat import errno import os import sys from wellpapp import Client, Tag, raw_exts import re from time import time, sleep from hashlib import md5 from struct import pack, unpack from zlib import crc32 from xml.sax.saxutils import escape as xmlescape from os.path import exists from threading import Thread, RLock, Lock from collections import namedtuple if not hasattr(fuse, "__version__"): raise RuntimeError("No fuse.__version__, too old?") fuse.fuse_python_api = (0, 2) if sys.version_info[0] == 2: PY3 = False else: PY3 = True unicode = str unichr = chr if fuse.__version__ < "1.0.0": raise RuntimeError("Needs at least fuse 1.0.0 on python 3") md5re = re.compile(r"^(?:\d{6}\.)?([0-9a-f]{32})\.(\w+)$") shortmd5re = re.compile(r"^(?:\d{6}\.)?([0-9a-f]{32})$") metamd5re = re.compile(r"^(?:\d{6}\.)?([0-9a-f]{32})\.(\w+)\.gq\.xmp$") sre = re.compile(r"[ /]+") orient = {0: 1, 90: 6, 180: 3, 270: 8} default_range = (0, 10000) _stat_t = namedtuple("stat_t", ["version", "size", "mtime", "dest", "jpegsize"]) _search_t = namedtuple("search_t", ["want", "dontwant", "order", "range", "clean"]) def NOTFOUND(): raise IOError(errno.ENOENT, "Not found") class WpStat(fuse.Stat): def __init__(self, mode, nlink, size, time): self.st_mode = mode self.st_ino = 0 self.st_dev = 0 self.st_nlink = nlink self.st_uid = 0 self.st_gid = 0 self.st_size = size self.st_atime = time self.st_mtime = time self.st_ctime = time class Cache: def __init__(self, ttl): self._data = {} self._time = time() self._ttl = ttl self._lock = RLock() def get(self, key, getter): with self._lock: if self._time < time() - self._ttl: self._clean() if key not in self._data: self._data[key] = (time(), getter(key)) return self._data[key][1] def _clean(self): self._time = time() t = self._time - (self._ttl / 1.5) too_old = [k for k, v in self._data.items() if v[0] < t] for key in too_old: del self._data[key] _thumbpaths = ([".thumblocal", "normal"], [".thumblocal", "large"]) _cfgpath = "/.wellpapprc" _cloudname = ".cloud" _rawext = dict(zip(raw_exts, ("Jpg", "jPg", "jpG", "JPg", "JPG", "JpG", "jPG", "jpg"))) assert len(_rawext) == len(raw_exts) _rawext_r = {v: k for k, v in _rawext.items()} if PY3 and sys.getfilesystemencodeerrors() == 'surrogateescape': def pathfix(path): b = path.encode('utf-8', 'surrogateescape') try: return b.decode('utf-8') except UnicodeDecodeError: return b.decode('iso-8859-1') else: pathfix = str def _un_chr(m): return unichr(int(m.group(0)[2:], 16)) def unescape(s): return re.sub(r'\\u[a-fA-F0-9]{4}', _un_chr, s) class Wellpapp(fuse.Fuse): def __init__(self, *a, **kw): fuse.Fuse.__init__(self, *a, **kw) self._raw2jpeg = False self._default_search = None self.parser.add_option(mountopt="raw2jpeg", action="store_true", dest="_raw2jpeg", help="Present RAW files as JPEG") self.parser.add_option(mountopt="default_search", dest="_default_search", help="Default search (added to all searches)") def _cfg2file(self): cfg = self._client.cfg data = [] for k, v in cfg.items(): if not k.startswith("_"): data.append(k + "=" + v + "\n") res = "".join(sorted(data)) if PY3: res = res.encode("utf-8") return res def _cache_read(self): self._cache_fh.seek(0, 1) for line in self._cache_fh: try: v, m, size, mtime, dest = line.rstrip("\n").split(" ", 4) if v == "1": jz, dest = dest.split(" ", 1) else: jz = 0 assert v == "0" self._stat_cache[m] = _stat_t(int(v), int(size), int(mtime), dest, int(jz)) except Exception: print("Bad line in cache:", line) def _cache_thread(self): while True: sleep(1) self._cache_read() def _prime_stat_cache(self): fn = self._client.cfg.image_base + "/cache" if not exists(fn): return try: print("Loading stat-cache..") self._cache_fh = open(fn, "r", encoding="utf-8") if PY3 else open(fn, "r") self._stat_cache = {} self._cache_read() self._use_cache = True except Exception as e: print("Failed to load cache:", e) # Starting threads doesn't work from __init__. def fsinit(self): if self._use_cache: t = Thread(target=self._cache_thread) t.name = "cache loader" t.daemon = True t.start() print("Ready") def _stat(self, m): if m not in self._stat_cache: print(m, "not in cache") p = self._client.image_path(m) dest = os.readlink(p) st = os.stat(dest) self._stat_cache[m] = _stat_t(0, st.st_size, st.st_mtime, dest, 0) return self._stat_cache[m] def getattr(self, path): path = pathfix(path) spath = path.split("/")[1:] mode = stat.S_IFDIR | 0o555 nlink = 2 size = 0 m = md5re.match(spath[-1]) metam = metamd5re.match(spath[-1]) time = 0 if spath[-3:-1] in _thumbpaths: if not m or not m.group(2) != ".png": NOTFOUND() search = self._path2search("/" + " ".join(spath[:-3])) if not search: NOTFOUND() if search.order or self._raw2jpeg: # order specified or potentially unwrapped orgmd5 = self._resolve_thumb(search, spath[-1]) if not orgmd5: NOTFOUND() mode = stat.S_IFREG | 0o444 tfn = self._client.thumb_path(orgmd5[0], spath[-2]) size = os.stat(tfn).st_size if search.order: # plus six digits and a period size += 7 else: mode = stat.S_IFLNK | 0o444 nlink = 1 elif m: if self._use_cache: mode = stat.S_IFREG | 0o444 version, size, time, dest, jpeg = self._stat(m.group(1)) if self._raw2jpeg and spath[-1][-3:] in _rawext_r: # wrapped RAW size = jpeg else: mode = stat.S_IFLNK | 0o444 nlink = 1 elif metam: mode = stat.S_IFREG | 0o444 size = len(self._generate_meta(metam.group(1))) nlink = 1 elif path == "/" or spath[-1] in (".thumblocal", ".metadata") or \ spath[-2:] in _thumbpaths: pass elif path == _cfgpath: mode = stat.S_IFREG | 0o444 nlink = 1 size = len(self._cfgfile) elif spath[-1][:len(_cloudname)] == _cloudname: mode = stat.S_IFREG | 0o444 nlink = 1 size = len(self._generate_cloud(spath[:-1], spath[-1])) else: search = self._path2search(path) if not search: NOTFOUND() try: self._cache.get(search, self._search) except Exception: m = shortmd5re.match(spath[-1]) if m: mode = stat.S_IFLNK | 0o444 nlink = 1 else: NOTFOUND() return WpStat(mode, nlink, size, time) def _generate_cloud(self, spath, fn): fn = fn[len(_cloudname):] count = 20 if fn and fn[0] == ":": try: count = int(fn[1:]) except ValueError: pass if count < 1: count = 1 want, dontwant = self._path2search("/" + "/".join(spath))[:2] with self._client_lock: range = (0, count - 1 + len(want)) tags = self._client.find_tags("EI", "", range=range, guids=want, excl_guids=dontwant, order="-post", flags="-datatag") want = [w[0][-27:] for w in want] names = [t.name for t in tags if t.guid not in want] res = "\n".join(names) + "\n" return res.encode("utf-8") def _generate_meta(self, m): data = u"""<?xml version="1.0" encoding="UTF-8"?><x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="XMP Core 4.1.1-Exiv2"><rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><rdf:Description rdf:about="" xmlns:tiff="http://ns.adobe.com/tiff/1.0/" xmlns:dc="http://purl.org/dc/elements/1.1/" """ with self._client_lock: post = self._client.get_post(m, wanted=["tagname", "tagdata", "rotate"]) if "rotate" in post and post.rotate.value in orient: data += u"tiff:Orientation=\"" + unicode(orient[post.rotate.value]) + u"\"" data += u"><dc:subject><rdf:Bag>" tags = [tag.pname + ((u"=" + unicode(tag.value)) if tag.value else u"") for tag in post.tags] data += u"".join([u"<rdf:li>" + xmlescape(tn) + u"</rdf:li>" \ for tn in sorted(tags)]) data += u"</rdf:Bag></dc:subject></rdf:Description></rdf:RDF></x:xmpmeta>" return data.encode("utf-8") def _resolve_thumb(self, search, thumbname): thumbmd5 = thumbname[:32] fns, pcache = self._cache.get(search, self._search) if not pcache: for fn in fns: m = md5re.match(fn) ext = m.group(2) ofn = m.group(1) + "." + _rawext_r.get(ext, ext) tmd5 = md5(fn.encode("utf-8")).hexdigest() pcache[tmd5] = (md5(ofn.encode("utf-8")).hexdigest(), fn) return pcache.get(thumbmd5) def readlink(self, path): path = pathfix(path) path = path.split("/")[1:] m = md5re.match(path[-1]) if m: if path[-3:-1] in _thumbpaths: return self._client.thumb_path(m.group(1), path[-2]) else: return self._client.image_path(m.group(1)) if path[-3:-1] not in _thumbpaths: m = shortmd5re.match(path[-1]) if m: return self._client.image_path(m.group(1)) NOTFOUND() def readdir(self, path, offset): path = pathfix(path) list = [".", ".."] search = self._path2search(path) path = path.split("/")[1:] if path[-1] == ".thumblocal": list += ["normal", "large"] elif path == [""] or path[-2:] in _thumbpaths: pass elif search: try: list += self._cache.get(search, self._search)[0] except Exception: NOTFOUND() else: NOTFOUND() for e in list: yield fuse.Direntry(e) def _search(self, search): order = search.order range = search.range if not range: range = default_range assert None not in search.want assert None not in search.dontwant with self._client_lock: s = self._client.search_post(guids=search.want, excl_guids=search.dontwant, wanted=["ext"], order=order, range=range) r = [] idx = 0 prefix = "" for p in s: if order: prefix = "%06d." % (idx,) idx += 1 if search.clean: r.append(prefix + p.md5) else: m = p.md5 ext = p.ext if self._raw2jpeg and ext in _rawext and self._stat(m).jpegsize: r.append(prefix + m + "." + _rawext[ext]) r.append(prefix + m + "." + ext) if not PY3: r = map(str, r) return r, {} def _escape_wrap(self, f_name, name, *a): f = getattr(self._client, f_name) with self._client_lock: res = f(name, *a) if not res and '\\' in name: res = f(unescape(name), *a) return res def _path2search(self, path): if path == "/": return None want = set() dontwant = set() order = [] first = None range = None clean = False nodefault = False for e in filter(None, sre.split(path[1:])): if e[0] == "-": e = self._escape_wrap('parse_tag', e[1:], True) dontwant.add(e) elif e[:2] == "O:": o = e[2:] if o != "group": t = Tag() o = self._escape_wrap('find_tag', o, t, True) assert t.valuetype order.append(o) elif e[:2] == "R:": range = tuple(map(int, e[2:].split(":"))) elif e == "C:": clean = True elif e == "N:": nodefault = True else: e = self._escape_wrap('parse_tag', e, True) want.add(e) if not first: first = e if self._default_search and not nodefault: def bare(tg): if tg[0] in "~!": return tg[1:] return tg allguids = {bare(t[0]) for t in want | dontwant if t} for n in ("want", "dontwant"): for t in getattr(self._default_search, n): if bare(t[0]) not in allguids: locals()[n].add(t) if "group" in order: want.remove(first) want = [first] + list(want) return _search_t(tuple(want), tuple(dontwant), tuple(order), range, clean) def main(self, *a, **kw): self._cache = Cache(30) self._client = Client() self._client_lock = RLock() self._cfgfile = self._cfg2file() self._use_cache = False self._prime_stat_cache() if self._raw2jpeg and not self._use_cache: raise Exception("raw2jpeg only works with a stat-cache") if self._raw2jpeg: from wellpapp import RawWrapper if self._default_search: ds = "/" + self._default_search self._default_search = None self._default_search = self._path2search(ds) if None in self._default_search.want or None in self._default_search.dontwant: raise Exception("Default search broken (%r)" % (self._default_search,)) wp = self class FakeFile: keep_cache = False direct_io = False _fh = None data = "" def __init__(self, path, flags, *mode): rwflags = flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) if rwflags != os.O_RDONLY: NOTFOUND() path = pathfix(path) if path == _cfgpath: self.data = wp._cfgfile return spath = path.split("/") metam = metamd5re.match(spath[-1]) if metam: self.data = wp._generate_meta(metam.group(1)) return if spath[-1][:len(_cloudname)] == _cloudname: self.data = wp._generate_cloud(spath[1:-1], spath[-1]) return if spath[-3:-1] in _thumbpaths: self.data = self._make_thumb(spath) else: fn = spath[-1].split(".") m = fn[-2][-32:] fh = self._open(m) if fh: if wp._raw2jpeg and fn[-1] in _rawext_r: # @@ Check wp._stat(m).version self._fh = RawWrapper(fh, True) else: self._fh = fh self._lock = Lock() def _open(self, m): try: dest = wp._stat(m).dest return open(dest, "rb") except Exception: pass try: p = wp._client.image_path(m) dest = os.readlink(p) fh = open(dest, "rb") wp._stat_cache[m] = wp._stat(m)._replace(dest=dest) return fh except Exception: pass # FUSE doesn't seem to like destroying these objects. # But it does call release, so I'll do what I can. def __del__(self): self.release(0) def release(self, flags): if self._fh: self._fh.close() self.data = self._fh = self._Lock = None def _make_thumb(self, spath): search = wp._path2search("/".join(spath[:-3])) if not search: NOTFOUND() orgmd5, fn = wp._resolve_thumb(search, spath[-1]) ext = fn.split(".")[-1] tfn = wp._client.thumb_path(orgmd5, spath[-2]) fh = open(tfn, "rb") data = fh.read() fh.close() if not (search.order or ext in _rawext_r): return data data = data.split(b"tEXtThumb::URI\0") if len(data) != 2: NOTFOUND() pre, post = data clen, = unpack(">I", pre[-4:]) if search.order: # It's longer only of search was ordered pre = pre[:-4] + pack(">I", clen + 7) post = post[clen - 7:] tEXt = b"tEXtThumb::URI\0" + fn.encode("utf-8") crc = crc32(tEXt) if crc < 0: crc += 0x100000000 tEXt += pack(">I", crc) return pre + tEXt + post def read(self, length, offset): if self._fh: with self._lock: self._fh.seek(offset) return self._fh.read(length) else: return self.data[offset:offset + length] self.file_class = FakeFile return fuse.Fuse.main(self, *a, **kw) def main(arg0, argv): server = Wellpapp(prog=arg0) server.parse(argv, errex=1, values=server) server.main()
daniparera/MCR
refs/heads/master
NXWND/ponderateSynsetWraped.py
1
#!/usr/bin/python import os, subprocess import textwrap, argparse parserarg = argparse.ArgumentParser( prog='ponderateSynsetWraped', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ calculate ponderated vectors for specific synset -------------------------------- example of use $python3 %(prog)s --synset synset [[--debug]] ''')) parserarg.add_argument('--debug', action='store_false', default='TRUE', help='to show aditional information') parserarg.add_argument('--synset', dest='synset', required=True, default='', type=str , help='input synsets (required)') args = parserarg.parse_args() debug = not(bool(args.debug)) if debug: deb="--debug" else: deb="" syn = args.synset cmd = "python ponderateSynset.py --synset "+syn+" "+deb result = subprocess.check_output(cmd, shell=True).strip().decode('utf-8') print("++++++++++++++++++++++") print(result)
FR4NK-W/osourced-scion
refs/heads/master
python/lib/log.py
3
# Copyright 2015 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`log` --- Logging utilites =============================== """ # Stdlib import logging import logging.handlers import traceback from datetime import datetime, timezone # This file should not include other SCION libraries, to prevent circular import # errors. #: Bytes LOG_MAX_SIZE = 10 * 1024 * 1024 LOG_BACKUP_COUNT = 1 # Logging handlers that will log logging exceptions, and then re-raise them. The # default behaviour of python's logging handlers is to catch logging exceptions, # which hides the problem. # # We don't try to use the normal logging system at this point because we don't # know if that's working at all. If it is (e.g. when the exception is a # formatting error), when we re-raise the exception, it'll get handled by the # normal process. _dispatch_formatter = None def _handleError(self, _): self.stream.write("Exception in logging module:\n") for line in traceback.format_exc().split("\n"): self.stream.write(line+"\n") self.flush() raise class _RotatingErrorHandler(logging.handlers.RotatingFileHandler): handleError = _handleError class _ConsoleErrorHandler(logging.StreamHandler): handleError = _handleError class Rfc3339Formatter(logging.Formatter): def format(self, record): # pragma: no cover lines = super().format(record).splitlines() return "\n> ".join(lines) def formatTime(self, record, _): # pragma: no cover # Not using lib.util.iso_timestamp here, to avoid potential import # loops. # Also, using str on a datetime object inserts a ":" into the time zone, # which, while legal, is inconsistent with logging in Go and Zlog. Fortunately, # Python's strftime does the right thing. return datetime.fromtimestamp( record.created, tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S.%f%z") class DispatchFormatter: # pragma: no cover """ A dispatching formatter that allows modules to install custom formatters for their child loggers. """ def __init__(self, default_formatter, formatters=None): self._default_formatter = default_formatter self._formatters = formatters or {} def add_formatter(self, key, formatter): self._formatters[key] = formatter def format(self, record): formatter = self._formatters.get(record.name, self._default_formatter) return formatter.format(record) def add_formatter(name, formatter): # pragma: no cover _dispatch_formatter.add_formatter(name, formatter) def init_logging(log_base=None, file_level=logging.DEBUG, console_level=logging.NOTSET): """ Configure logging for components (servers, routers, gateways). """ default_formatter = Rfc3339Formatter( "%(asctime)s [%(levelname)s] (%(threadName)s) %(message)s") global _dispatch_formatter _dispatch_formatter = DispatchFormatter(default_formatter) handlers = [] if log_base: for lvl in sorted(logging._levelToName): if lvl < file_level: continue log_file = "%s.%s" % (log_base, logging._levelToName[lvl]) h = _RotatingErrorHandler( log_file, maxBytes=LOG_MAX_SIZE, backupCount=LOG_BACKUP_COUNT, encoding="utf-8") h.setLevel(lvl) handlers.append(h) if console_level: h = _ConsoleErrorHandler() h.setLevel(console_level) handlers.append(h) for h in handlers: h.setFormatter(_dispatch_formatter) # Use logging.DEBUG here, so that the handlers themselves can decide what to # filter. logging.basicConfig(level=logging.DEBUG, handlers=handlers) def log_exception(msg, *args, level=logging.CRITICAL, **kwargs): """ Properly format an exception before logging. """ logging.log(level, msg, *args, **kwargs) for line in traceback.format_exc().split("\n"): logging.log(level, line) def log_stack(level=logging.DEBUG): logging.log(level, "".join(traceback.format_stack()))
mamikonyana/mamikonyana.github.io
refs/heads/flask
static/ml_afternoon/presentation_data/practical_s1/kmeans.py
1
import numpy as np def random_initialize(data_array, num_clusters): # TODO: Initialize cluster centers by sampling `num_clusters` points # uniformly from data_array. return list() def plus_plus_initialize(data_array, num_clusters): # TODO: Initialize cluster centers using k-means++ algorithm. return list() class KMeans(object): def __init__(self, num_mixtures): self.K = num_mixtures self.means = [] def initialize(self, data): """ :param data: data, numpy 2-D array """ # TODO: Initialize cluster centers # Hint: Use one of the function at the top of the file. pass def fit(self, data): """ :param data: data to fit, numpy 2-D array """ # TODO: Initialize Mixtures, then run EM algorithm until it converges. pass def predict(self, data): """ Return index of the cluster the point is most likely to belong. :param data: data, numpy 2-D array :return: labels, numpy 1-D array """ # TODO: Determine which cluster each of the points belongs to pass def get_centers(self): """ Return list of centers of the clusters, i.e. means """ return self.means
Johnzero/erp
refs/heads/fga
openerp/addons/hr/__init__.py
9
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_department import hr import report import wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
chrisjones-brack3t/python-oauth2
refs/heads/master
oauth2/clients/imap.py
885
""" The MIT License Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import oauth2 import imaplib class IMAP4_SSL(imaplib.IMAP4_SSL): """IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH.""" def authenticate(self, url, consumer, token): if consumer is not None and not isinstance(consumer, oauth2.Consumer): raise ValueError("Invalid consumer.") if token is not None and not isinstance(token, oauth2.Token): raise ValueError("Invalid token.") imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH', lambda x: oauth2.build_xoauth_string(url, consumer, token))
hhstore/flask-annotated
refs/heads/master
asyncio/asyncio-3.4.3/examples/fetch/fetch0.py
2
#!/usr/bin/env python # -*- coding: utf-8 -*- """Simplest possible HTTP client.""" import sys from asyncio import * # 协程: @coroutine def fetch(): r, w = yield from open_connection('python.org', 80) request = 'GET / HTTP/1.0\r\n\r\n' print('>', request, file=sys.stderr) w.write(request.encode('latin-1')) while True: line = yield from r.readline() # 异步返回 line = line.decode('latin-1').rstrip() if not line: break print('<', line, file=sys.stderr) print(file=sys.stderr) body = yield from r.read() # 异步返回 return body ######################################### # 主函数 # ######################################### def main(): loop = get_event_loop() # 事件循环 try: body = loop.run_until_complete(fetch()) finally: loop.close() print(body.decode('latin-1'), end='') if __name__ == '__main__': main()
Southpaw-TACTIC/TACTIC
refs/heads/4.7
src/tactic/ui/widget/gallery_wdg.py
1
########################################################### # # Copyright (c) 2014, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['GalleryWdg'] import urllib from pyasm.biz import Snapshot, File from pyasm.search import Search from pyasm.web import HtmlElement, DivWdg, Table from pyasm.widget import TextWdg, IconWdg from tactic.ui.common import BaseRefreshWdg class GalleryWdg(BaseRefreshWdg): def init(self): self.curr_path = None def get_display(self): self.sobject_data = {} top = self.top top.add_style top.add_class("spt_gallery_top") inner = DivWdg() top.add(inner) # make the whole Gallery unselectable inner.add_class('unselectable') inner.add_style("position: fixed") inner.add_style("top: 0px") inner.add_style("left: 0px") inner.add_style("width: 100%") #inner.add_style("height: 100%") inner.add_style("bottom: 0px") inner.add_style("padding-bottom: 40px") #inner.add_style("background: rgba(0,0,0,0.5)") inner.add_style("background: rgba(0,0,0,1)") inner.add_style("z-index: 2000") width = self.kwargs.get("width") height = self.kwargs.get("height") # default to top. align = self.kwargs.get("align") if not align: align = "top" if not width: width = 1300 else: width = int(width) paths = self.get_paths(file_type='main') # icon type may be too small thumb_paths = self.get_paths(file_type='web') descriptions = [] for path in paths: sobject = self.sobject_data.get(path) if not sobject: descriptions.append("") else: description = sobject.get("description") if not description: description = "" descriptions.append(description) total_width = width * len(paths) inner.add_behavior( { 'type': 'load', 'width': width, 'total_width': total_width, 'descriptions': descriptions, 'cbjs_action': ''' spt.gallery = {}; // 1250 is defined also in the css styles spt.gallery.portrait = window.innerWidth < 1250; spt.gallery.portrait = false spt.gallery.top = bvr.src_el; spt.gallery.content = spt.gallery.top.getElement(".spt_gallery_content"); spt.gallery.content.setStyle('opacity','0.1') spt.gallery.desc_el = spt.gallery.top.getElement(".spt_gallery_description"); //window.addEvent('domready', function() { setTimeout(function() { // set the img h or w directly var items = bvr.src_el.getElements('.spt_gallery_item img'); // fade in spt.gallery.content.set('tween', {duration: 250}).fade('in'); /* for (var k=0; k < items.length; k++) { var sizes = items[k].getSize(); var item_h = sizes.y; var item_w = sizes.x; if (item_h >= item_w){ //items[k].setStyle('width', 'auto'); //items[k].setStyle('height', '100%'); } else { //items[k].setStyle('width','auto'); //items[k].setStyle('height','100%'); } } */ }, 50) spt.gallery.width = bvr.width; spt.gallery.descriptions = bvr.descriptions; spt.gallery.index = 0; spt.gallery.total = bvr.descriptions.length; spt.gallery.left_arrow = bvr.src_el.getElement('.spt_left_arrow'); spt.gallery.right_arrow = bvr.src_el.getElement('.spt_right_arrow'); spt.gallery.videos = {}; spt.gallery.init = function() { } spt.gallery.stack = []; spt.gallery.push_stack = function(key) { spt.gallery.stack.push(key); } spt.gallery.show_next = function(src_el) { if (!src_el) src_el = spt.gallery.right_arrow; if (spt.gallery.index >= spt.gallery.total-2) { spt.hide(src_el); } if (spt.gallery.index == spt.gallery.total-1) { return; } spt.gallery.index += 1; spt.gallery.show_index(spt.gallery.index); } spt.gallery.show_prev = function(src_el) { if (!src_el) src_el = spt.gallery.left_arrow; if (spt.gallery.index <= 1) { spt.hide(src_el); } if (spt.gallery.index == 0) { return; } spt.gallery.index -= 1; spt.gallery.show_index(spt.gallery.index); } spt.gallery.show_index = function(index) { // stop all videos var videos = spt.gallery.top.getElements(".video-js"); for (var i = 0; i < videos.length; i++) { try { var video = videos[i]; var video_id = video.get("id"); var video_obj = videojs(video_id, {"nativeControlsForTouch": false}); video_obj.pause(); } catch(e) { } } // can't tween percentage with this library??? var width = spt.gallery.width; var margin = - width * index; var content = spt.gallery.content; //content.setStyle("margin-left", margin + "px"); new Fx.Tween(content,{duration: 250}).start("margin-left", margin); spt.gallery.index = index; var total = spt.gallery.total; if (index == 0) { spt.hide(spt.gallery.left_arrow); spt.show(spt.gallery.right_arrow); } else if (index == total - 1) { spt.show(spt.gallery.left_arrow); spt.hide(spt.gallery.right_arrow); } else { spt.show(spt.gallery.left_arrow); spt.show(spt.gallery.right_arrow); } var description = spt.gallery.descriptions[index]; if (!description) { description = (index+1)+" of "+total; } else { description = (index+1)+" of "+total+" - " + description; } spt.gallery.set_description(description); } spt.gallery.close = function() { var content = spt.gallery.content; var gallery_top = content.getParent(".spt_gallery_top"); var top = gallery_top.getParent(".spt_top"); spt.behavior.destroy_element(gallery_top); // header is sometimes not in view after closing, if a header exists // make sure it is scrolled into view if (top) { var index_header = top.getElement(".spt_index_header"); if (index_header) { index_header.scrollIntoView(); } } } spt.gallery.set_description = function(desc) { var desc_el = spt.gallery.desc_el; desc_el.innerHTML = desc; } ''' } ) scroll = DivWdg(css='spt_gallery_scroll') inner.add(scroll) scroll.set_box_shadow() scroll.add_style("width: %s" % width) if height: scroll.add_style("height: %s" % height) scroll.add_style("overflow-x: hidden") scroll.add_style("overflow-y: hidden") scroll.add_style("background: #000") #scroll.add_style("position: absolute") scroll.add_style("margin-left: auto") scroll.add_style("margin-right: auto") content = DivWdg() top.add_attr('tabindex','-1') scroll.add(content) content.add_class("spt_gallery_content") # make the items vertically align to bottom (flex-emd) # on a regular monitor, align to top (flex-start) is better if align == 'bottom': align_items = 'flex-end' else: align_items = 'flex-start' content.add_styles("display: flex; flex-flow: row nowrap; align-items: %s; justify-content: center;"%align_items) content.add_style("width: %s" % total_width) content.add_style("height: 100%") top.add_behavior( { 'type': 'load', 'cbjs_action': ''' bvr.src_el.focus(); ''' } ) top.add_behavior( { 'type': 'mouseenter', 'cbjs_action': ''' bvr.src_el.focus(); ''' } ) top.add_behavior( { 'type': 'mouseleave', 'cbjs_action': ''' bvr.src_el.blur(); ''' } ) """ input = TextWdg("keydown") content.add(input) input.add_style("position: absolute") input.add_style("left: -5000px") """ top.add_behavior( { 'type': 'keydown', 'cbjs_action': ''' var key = evt.key; if (key == "left") { spt.gallery.push_stack(key); spt.gallery.show_prev(); } else if (key == "right") { spt.gallery.push_stack(key); spt.gallery.show_next(); } else if (key == "esc" || key == "enter") { spt.gallery.close(); } ''' } ) curr_index = 0 for i, path in enumerate(paths): path_div = DivWdg(css='spt_gallery_item') content.add(path_div) #path_div.add_style("float: left") path_div.add_style("display: inline-block") path_div.add_style("vertical-align: middle") if path == self.curr_path: curr_index = i try: thumb_path = thumb_paths[i] except IndexError: print("Cannot find the thumb_path [%s] "%i ) thumb_path = '' #path_div.add_style("width: %s" % width) #if height: # path_div.add_style("height: %s" % height) path_div.add_style("width: 100%") path_div.add_style("height: 100%") path_div.add_style("overflow-x: hidden") path_div.add_style("overflow-y: hidden") from tactic.ui.widget import EmbedWdg embed = EmbedWdg(src=path, click=False, thumb_path=thumb_path, index=i, controls="true", layout="fit") path_div.add(embed) content.add_behavior({ 'type': 'load', 'index': curr_index, 'cbjs_action': ''' if (!bvr.index) bvr.index = 0; spt.gallery.show_index(bvr.index); ''' } ) #icon = IconWdg(title="Close", icon="/plugins/remington/pos/icons/close.png") icon = IconWdg(title="Close", icon="/context/icons/glyphs/close.png", width="40px") inner.add(icon) icon.add_style("position: absolute") icon.add_style("cursor: pointer") icon.add_style("top: 30px") icon.add_style("right: 38px") icon.add_style("opacity: 0.5") icon.add_behavior( { 'type': 'click_up' , 'cbjs_action': ''' spt.gallery.close(); ''' } ) icon.add_style("background", "rgba(48,48,48,0.7)") icon.add_style("border-radius", "5px") icon = IconWdg(title="Previous", icon="/context/icons/glyphs/chevron_left.png") inner.add(icon) icon.add_class('spt_left_arrow') icon.add_style("cursor: pointer") icon.add_style("position: absolute") icon.add_style("top: 40%") icon.add_style("left: 0px") icon.add_style("opacity: 0.5") icon.add_behavior( { 'type': 'click_up' , 'cbjs_action': ''' var arrow = bvr.src_el; spt.gallery.show_prev(arrow); ''' } ) icon.add_style("background", "rgba(48,48,48,0.7)") icon.add_style("border-radius", "5px") icon = IconWdg(title="Next", icon="/context/icons/glyphs/chevron_right.png") inner.add(icon) icon.add_class('spt_right_arrow') icon.add_style("position: absolute") icon.add_style("cursor: pointer") icon.add_style("top: 40%") icon.add_style("right: 0px") icon.add_style("opacity: 0.5") icon.add_behavior( { 'type': 'click_up', 'cbjs_action': ''' var arrow = bvr.src_el; spt.gallery.show_next(arrow); ''' } ) icon.add_style("background", "rgba(48,48,48,0.7)") icon.add_style("border-radius", "5px") desc_div = DivWdg() desc_div.add_class("spt_gallery_description") desc_div.add_style("height: 30px") desc_div.add_style("width: %s" % width) desc_div.add_style("text-align: center") desc_div.add_style("background: rgba(0,0,0,1)") desc_div.add_style("color: #bbb") desc_div.add_style("font-weight: bold") desc_div.add_style("font-size: 16px") desc_div.add_style("padding-top: 10px") desc_div.add_style("margin-left: -%s" % (width/2)) desc_div.add_style("z-index: 1000") desc_div.add("") desc_outer_div = DivWdg() inner.add(desc_outer_div) desc_outer_div.add_style("position: fixed") desc_outer_div.add(desc_div) desc_outer_div.add_style("bottom: 0px") desc_outer_div.add_style("left: 50%") return top def get_paths(self, file_type='main'): # this is the selected one search_key = self.kwargs.get("search_key") search_keys = self.kwargs.get("search_keys") paths = self.kwargs.get("paths") if not paths: paths = [] if search_keys: sobjects = Search.get_by_search_keys(search_keys, keep_order=True) # return_dict=True defaults to return the first of each snapshot list # and so works well with is_latest=True if sobjects and sobjects[0].get_base_search_type() == "sthpw/snapshot": sobj_snapshot_dict = {} for sobject in sobjects: tmp_search_key = sobject.get_search_key() sobj_snapshot_dict[tmp_search_key] = sobject snapshots = sobjects else: sobj_snapshot_dict = Snapshot.get_by_sobjects(sobjects, is_latest=True, return_dict=True) snapshots = sobj_snapshot_dict.values() file_dict = Snapshot.get_files_dict_by_snapshots(snapshots, file_type=file_type) for sobject in sobjects: path = '' snapshot = sobj_snapshot_dict.get(sobject.get_search_key()) # it is supposed to get one (latest), just a precaution if isinstance(snapshot, list): snapshot = snapshot[0] if not snapshot: continue file_list = file_dict.get(snapshot.get_code()) if not file_list: paths.append("") continue # NOTE: there should only be one file tmp_paths = [] for file_object in file_list: path = file_object.get_web_path() # If the file type is not supported by web browsers, get the web version # as a fallback. # TODO: Note that this will disable # the retrieval of a sequence of files as in ####.tif case, where # the asset is a sequence of files. So it will only display the web # version of the first file in the asset list. extension = File.get_extension(path) accepted_exts = ['mp4', 'mov', 'jpg', 'png', 'ogg', 'webm'] if file_type == 'main' and extension not in accepted_exts: path= snapshot.get_web_path_by_type(type="web") # If the asset is a sequence of files, retrieve all the file paths. # NOTE: In this case, web versions do not exist for all of the files. # The web version is generated only for the first one in the sequence. if path.find("#") != -1: expanded_paths = snapshot.get_expanded_web_paths() path = "|".join(expanded_paths) tmp_paths.append(path) path = "|".join(tmp_paths) self.sobject_data[path] = sobject paths.append(path) # set the current path the user clicks on if not self.curr_path and sobject.get_search_key() == search_key and file_type=='main': self.curr_path = path elif paths: return paths else: # TEST paths = [ '/assets/test/store/The%20Boxter_v001.jpg', '/assets/test/store/Another%20one_v001.jpg', '/assets/test/store/Whatever_v001.jpg' ] """ for index,path in enumerate(paths): path = urllib.pathname2url(path) paths[index] = path """ return paths
samuel1208/scikit-learn
refs/heads/master
sklearn/covariance/graph_lasso_.py
127
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized estimator. """ # Author: Gael Varoquaux <gael.varoquaux@normalesup.org> # License: BSD 3 clause # Copyright: INRIA import warnings import operator import sys import time import numpy as np from scipy import linalg from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance, log_likelihood) from ..utils import ConvergenceWarning from ..utils.extmath import pinvh from ..utils.validation import check_random_state, check_array from ..linear_model import lars_path from ..linear_model import cd_fast from ..cross_validation import check_cv, cross_val_score from ..externals.joblib import Parallel, delayed import collections # Helper functions to compute the objective and dual objective functions # of the l1-penalized estimator def _objective(mle, precision_, alpha): """Evaluation of the graph-lasso objective function the objective function is made of a shifted scaled version of the normalized log-likelihood (i.e. its empirical mean over the samples) and a penalisation term to promote sparsity """ p = precision_.shape[0] cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return cost def _dual_gap(emp_cov, precision_, alpha): """Expression of the dual gap convergence criterion The specific definition is given in Duchi "Projected Subgradient Methods for Learning Sparse Gaussians". """ gap = np.sum(emp_cov * precision_) gap -= precision_.shape[0] gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return gap def alpha_max(emp_cov): """Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : 2D array, (n_features, n_features) The sample covariance matrix Notes ----- This results from the bound for the all the Lasso that are solved in GraphLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by `max(abs(Xy))`, the result follows. """ A = np.copy(emp_cov) A.flat[::A.shape[0] + 1] = 0 return np.max(np.abs(A)) # The g-lasso algorithm def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, return_costs=False, eps=np.finfo(np.float64).eps, return_n_iter=False): """l1-penalized covariance estimator Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- emp_cov : 2D ndarray, shape (n_features, n_features) Empirical covariance from which to compute the covariance estimate. alpha : positive float The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. cov_init : 2D array (n_features, n_features), optional The initial guess for the covariance. mode : {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, optional The maximum number of iterations. verbose : boolean, optional If verbose is True, the objective function and dual gap are printed at each iteration. return_costs : boolean, optional If return_costs is True, the objective function and dual gap at each iteration are returned. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- covariance : 2D ndarray, shape (n_features, n_features) The estimated covariance matrix. precision : 2D ndarray, shape (n_features, n_features) The estimated (sparse) precision matrix. costs : list of (objective, dual_gap) pairs The list of values of the objective function and the dual gap at each iteration. Returned only if return_costs is True. n_iter : int Number of iterations. Returned only if `return_n_iter` is set to True. See Also -------- GraphLasso, GraphLassoCV Notes ----- The algorithm employed to solve this problem is the GLasso algorithm, from the Friedman 2008 Biostatistics paper. It is the same algorithm as in the R `glasso` package. One possible difference with the `glasso` R package is that the diagonal coefficients are not penalized. """ _, n_features = emp_cov.shape if alpha == 0: if return_costs: precision_ = linalg.inv(emp_cov) cost = - 2. * log_likelihood(emp_cov, precision_) cost += n_features * np.log(2 * np.pi) d_gap = np.sum(emp_cov * precision_) - n_features if return_n_iter: return emp_cov, precision_, (cost, d_gap), 0 else: return emp_cov, precision_, (cost, d_gap) else: if return_n_iter: return emp_cov, linalg.inv(emp_cov), 0 else: return emp_cov, linalg.inv(emp_cov) if cov_init is None: covariance_ = emp_cov.copy() else: covariance_ = cov_init.copy() # As a trivial regularization (Tikhonov like), we scale down the # off-diagonal coefficients of our starting point: This is needed, as # in the cross-validation the cov_init can easily be # ill-conditioned, and the CV loop blows. Beside, this takes # conservative stand-point on the initial conditions, and it tends to # make the convergence go faster. covariance_ *= 0.95 diagonal = emp_cov.flat[::n_features + 1] covariance_.flat[::n_features + 1] = diagonal precision_ = pinvh(covariance_) indices = np.arange(n_features) costs = list() # The different l1 regression solver have different numerical errors if mode == 'cd': errors = dict(over='raise', invalid='ignore') else: errors = dict(invalid='raise') try: # be robust to the max_iter=0 edge case, see: # https://github.com/scikit-learn/scikit-learn/issues/4134 d_gap = np.inf for i in range(max_iter): for idx in range(n_features): sub_covariance = covariance_[indices != idx].T[indices != idx] row = emp_cov[idx, indices != idx] with np.errstate(**errors): if mode == 'cd': # Use coordinate descent coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps)) coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram( coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False) else: # Use LARS _, _, coefs = lars_path( sub_covariance, row, Xy=row, Gram=sub_covariance, alpha_min=alpha / (n_features - 1), copy_Gram=True, method='lars', return_path=False) # Update the precision matrix precision_[idx, idx] = ( 1. / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))) precision_[indices != idx, idx] = (- precision_[idx, idx] * coefs) precision_[idx, indices != idx] = (- precision_[idx, idx] * coefs) coefs = np.dot(sub_covariance, coefs) covariance_[idx, indices != idx] = coefs covariance_[indices != idx, idx] = coefs d_gap = _dual_gap(emp_cov, precision_, alpha) cost = _objective(emp_cov, precision_, alpha) if verbose: print( '[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap)) if return_costs: costs.append((cost, d_gap)) if np.abs(d_gap) < tol: break if not np.isfinite(cost) and i > 0: raise FloatingPointError('Non SPD result: the system is ' 'too ill-conditioned for this solver') else: warnings.warn('graph_lasso: did not converge after %i iteration:' ' dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning) except FloatingPointError as e: e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',) raise e if return_costs: if return_n_iter: return covariance_, precision_, costs, i + 1 else: return covariance_, precision_, costs else: if return_n_iter: return covariance_, precision_, i + 1 else: return covariance_, precision_ class GraphLasso(EmpiricalCovariance): """Sparse inverse covariance estimation with an l1-penalized estimator. Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- alpha : positive float, default 0.01 The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. mode : {'cd', 'lars'}, default 'cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, default 1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, default 100 The maximum number of iterations. verbose : boolean, default False If verbose is True, the objective function and dual gap are plotted at each iteration. assume_centered : boolean, default False If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Attributes ---------- covariance_ : array-like, shape (n_features, n_features) Estimated covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. n_iter_ : int Number of iterations run. See Also -------- graph_lasso, GraphLassoCV """ def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, assume_centered=False): self.alpha = alpha self.mode = mode self.tol = tol self.enet_tol = enet_tol self.max_iter = max_iter self.verbose = verbose self.assume_centered = assume_centered # The base class needs this for the score method self.store_precision = True def fit(self, X, y=None): X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) emp_cov = empirical_covariance( X, assume_centered=self.assume_centered) self.covariance_, self.precision_, self.n_iter_ = graph_lasso( emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=self.verbose, return_n_iter=True) return self # Cross-validation with GraphLasso def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False): """l1-penalized covariance estimator along a path of decreasing alphas Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- X : 2D ndarray, shape (n_samples, n_features) Data from which to compute the covariance estimate. alphas : list of positive floats The list of regularization parameters, decreasing order. X_test : 2D array, shape (n_test_samples, n_features), optional Optional test matrix to measure generalisation error. mode : {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, optional The maximum number of iterations. verbose : integer, optional The higher the verbosity flag, the more information is printed during the fitting. Returns ------- covariances_ : List of 2D ndarray, shape (n_features, n_features) The estimated covariance matrices. precisions_ : List of 2D ndarray, shape (n_features, n_features) The estimated (sparse) precision matrices. scores_ : List of float The generalisation error (log-likelihood) on the test data. Returned only if test data is passed. """ inner_verbose = max(0, verbose - 1) emp_cov = empirical_covariance(X) if cov_init is None: covariance_ = emp_cov.copy() else: covariance_ = cov_init covariances_ = list() precisions_ = list() scores_ = list() if X_test is not None: test_emp_cov = empirical_covariance(X_test) for alpha in alphas: try: # Capture the errors, and move on covariance_, precision_ = graph_lasso( emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol, enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose) covariances_.append(covariance_) precisions_.append(precision_) if X_test is not None: this_score = log_likelihood(test_emp_cov, precision_) except FloatingPointError: this_score = -np.inf covariances_.append(np.nan) precisions_.append(np.nan) if X_test is not None: if not np.isfinite(this_score): this_score = -np.inf scores_.append(this_score) if verbose == 1: sys.stderr.write('.') elif verbose > 1: if X_test is not None: print('[graph_lasso_path] alpha: %.2e, score: %.2e' % (alpha, this_score)) else: print('[graph_lasso_path] alpha: %.2e' % alpha) if X_test is not None: return covariances_, precisions_, scores_ return covariances_, precisions_ class GraphLassoCV(GraphLasso): """Sparse inverse covariance w/ cross-validated choice of the l1 penalty Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- alphas : integer, or list positive float, optional If an integer is given, it fixes the number of points on the grids of alpha to be used. If a list is given, it gives the grid to be used. See the notes in the class docstring for more details. n_refinements: strictly positive integer The number of times the grid is refined. Not used if explicit values of alphas are passed. cv : cross-validation generator, optional see sklearn.cross_validation module. If None is passed, defaults to a 3-fold strategy tol: positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter: integer, optional Maximum number of iterations. mode: {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where number of features is greater than number of samples. Elsewhere prefer cd which is more numerically stable. n_jobs: int, optional number of jobs to run in parallel (default 1). verbose: boolean, optional If verbose is True, the objective function and duality gap are printed at each iteration. assume_centered : Boolean If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Attributes ---------- covariance_ : numpy.ndarray, shape (n_features, n_features) Estimated covariance matrix. precision_ : numpy.ndarray, shape (n_features, n_features) Estimated precision matrix (inverse covariance). alpha_ : float Penalization parameter selected. cv_alphas_ : list of float All penalization parameters explored. `grid_scores`: 2D numpy.ndarray (n_alphas, n_folds) Log-likelihood score on left-out data across folds. n_iter_ : int Number of iterations run for the optimal alpha. See Also -------- graph_lasso, GraphLasso Notes ----- The search for the optimal penalization parameter (alpha) is done on an iteratively refined grid: first the cross-validated scores on a grid are computed, then a new refined grid is centered around the maximum, and so on. One of the challenges which is faced here is that the solvers can fail to converge to a well-conditioned estimate. The corresponding values of alpha then come out as missing values, but the optimum may be close to these missing values. """ def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4, enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1, verbose=False, assume_centered=False): self.alphas = alphas self.n_refinements = n_refinements self.mode = mode self.tol = tol self.enet_tol = enet_tol self.max_iter = max_iter self.verbose = verbose self.cv = cv self.n_jobs = n_jobs self.assume_centered = assume_centered # The base class needs this for the score method self.store_precision = True def fit(self, X, y=None): """Fits the GraphLasso covariance model to X. Parameters ---------- X : ndarray, shape (n_samples, n_features) Data from which to compute the covariance estimate """ X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) emp_cov = empirical_covariance( X, assume_centered=self.assume_centered) cv = check_cv(self.cv, X, y, classifier=False) # List of (alpha, scores, covs) path = list() n_alphas = self.alphas inner_verbose = max(0, self.verbose - 1) if isinstance(n_alphas, collections.Sequence): alphas = self.alphas n_refinements = 1 else: n_refinements = self.n_refinements alpha_1 = alpha_max(emp_cov) alpha_0 = 1e-2 * alpha_1 alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] t0 = time.time() for i in range(n_refinements): with warnings.catch_warnings(): # No need to see the convergence warnings on this grid: # they will always be points that will not converge # during the cross-validation warnings.simplefilter('ignore', ConvergenceWarning) # Compute the cross-validated loss on the current grid # NOTE: Warm-restarting graph_lasso_path has been tried, and # this did not allow to gain anything (same execution time with # or without). this_path = Parallel( n_jobs=self.n_jobs, verbose=self.verbose )( delayed(graph_lasso_path)( X[train], alphas=alphas, X_test=X[test], mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=int(.1 * self.max_iter), verbose=inner_verbose) for train, test in cv) # Little danse to transform the list in what we need covs, _, scores = zip(*this_path) covs = zip(*covs) scores = zip(*scores) path.extend(zip(alphas, scores, covs)) path = sorted(path, key=operator.itemgetter(0), reverse=True) # Find the maximum (avoid using built in 'max' function to # have a fully-reproducible selection of the smallest alpha # in case of equality) best_score = -np.inf last_finite_idx = 0 for index, (alpha, scores, _) in enumerate(path): this_score = np.mean(scores) if this_score >= .1 / np.finfo(np.float64).eps: this_score = np.nan if np.isfinite(this_score): last_finite_idx = index if this_score >= best_score: best_score = this_score best_index = index # Refine the grid if best_index == 0: # We do not need to go back: we have chosen # the highest value of alpha for which there are # non-zero coefficients alpha_1 = path[0][0] alpha_0 = path[1][0] elif (best_index == last_finite_idx and not best_index == len(path) - 1): # We have non-converged models on the upper bound of the # grid, we need to refine the grid there alpha_1 = path[best_index][0] alpha_0 = path[best_index + 1][0] elif best_index == len(path) - 1: alpha_1 = path[best_index][0] alpha_0 = 0.01 * path[best_index][0] else: alpha_1 = path[best_index - 1][0] alpha_0 = path[best_index + 1][0] if not isinstance(n_alphas, collections.Sequence): alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) alphas = alphas[1:-1] if self.verbose and n_refinements > 1: print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is' % (i + 1, n_refinements, time.time() - t0)) path = list(zip(*path)) grid_scores = list(path[1]) alphas = list(path[0]) # Finally, compute the score with alpha = 0 alphas.append(0) grid_scores.append(cross_val_score(EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs, verbose=inner_verbose)) self.grid_scores = np.array(grid_scores) best_alpha = alphas[best_index] self.alpha_ = best_alpha self.cv_alphas_ = alphas # Finally fit the model with the selected alpha self.covariance_, self.precision_, self.n_iter_ = graph_lasso( emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=inner_verbose, return_n_iter=True) return self
DmitryTsybin/Study
refs/heads/master
Coursera/Using_Python_to_Access_Web_Data/Getting_Started/week_1.py
1
print "hello from Russia!"
mikewiebe-ansible/ansible
refs/heads/devel
lib/ansible/modules/cloud/xenserver/xenserver_guest_powerstate.py
12
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: xenserver_guest_powerstate short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool description: > This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. version_added: '2.8' author: - Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs> notes: - Minimal supported version of XenServer is 5.6. - Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. - 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI.py' - 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' - 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' requirements: - python >= 2.6 - XenAPI options: state: description: - Specify the state VM should be in. - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. type: str default: present choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] name: description: - Name of the VM to manage. - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - This parameter is case sensitive. type: str required: yes aliases: [ name_label ] uuid: description: - UUID of the VM to manage if known. This is XenServer's unique identifier. - It is required if name is not unique. type: str wait_for_ip_address: description: - Wait until XenServer detects an IP address for the VM. - This requires XenServer Tools to be preinstalled on the VM to work properly. type: bool default: no state_change_timeout: description: - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).' - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - In case of timeout, module will generate an error message. type: int default: 0 extends_documentation_fragment: xenserver.documentation ''' EXAMPLES = r''' - name: Power on VM xenserver_guest_powerstate: hostname: "{{ xenserver_hostname }}" username: "{{ xenserver_username }}" password: "{{ xenserver_password }}" name: testvm_11 state: powered-on delegate_to: localhost register: facts ''' RETURN = r''' instance: description: Metadata about the VM returned: always type: dict sample: { "cdrom": { "type": "none" }, "customization_agent": "native", "disks": [ { "name": "windows-template-testing-0", "name_desc": "", "os_device": "xvda", "size": 42949672960, "sr": "Local storage", "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", "vbd_userdevice": "0" }, { "name": "windows-template-testing-1", "name_desc": "", "os_device": "xvdb", "size": 42949672960, "sr": "Local storage", "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", "vbd_userdevice": "1" } ], "domid": "56", "folder": "", "hardware": { "memory_mb": 8192, "num_cpu_cores_per_socket": 2, "num_cpus": 4 }, "home_server": "", "is_template": false, "name": "windows-template-testing", "name_desc": "", "networks": [ { "gateway": "192.168.0.254", "gateway6": "fc00::fffe", "ip": "192.168.0.200", "ip6": [ "fe80:0000:0000:0000:e9cb:625a:32c5:c291", "fc00:0000:0000:0000:0000:0000:0000:0001" ], "mac": "ba:91:3a:48:20:76", "mtu": "1500", "name": "Pool-wide network associated with eth1", "netmask": "255.255.255.128", "prefix": "25", "prefix6": "64", "vif_device": "0" } ], "other_config": { "base_template_name": "Windows Server 2016 (64-bit)", "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", "install-methods": "cdrom", "instant": "true", "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" }, "platform": { "acpi": "1", "apic": "true", "cores-per-socket": "2", "device_id": "0002", "hpet": "true", "nx": "true", "pae": "true", "timeoffset": "-25200", "vga": "std", "videoram": "8", "viridian": "true", "viridian_reference_tsc": "true", "viridian_time_ref_count": "true" }, "state": "poweredon", "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", "xenstore_data": { "vm-data": "" } } ''' import re HAS_XENAPI = False try: import XenAPI HAS_XENAPI = True except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, gather_vm_params, gather_vm_facts, set_vm_power_state, wait_for_vm_ip_address) class XenServerVM(XenServerObject): """Class for managing XenServer VM. Attributes: vm_ref (str): XAPI reference to VM. vm_params (dict): A dictionary with VM parameters as returned by gather_vm_params() function. """ def __init__(self, module): """Inits XenServerVM using module parameters. Args: module: Reference to Ansible module object. """ super(XenServerVM, self).__init__(module) self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") self.gather_params() def gather_params(self): """Gathers all VM parameters available in XAPI database.""" self.vm_params = gather_vm_params(self.module, self.vm_ref) def gather_facts(self): """Gathers and returns VM facts.""" return gather_vm_facts(self.module, self.vm_params) def set_power_state(self, power_state): """Controls VM power state.""" state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) # If state has changed, update vm_params. if state_changed: self.vm_params['power_state'] = current_state.capitalize() return state_changed def wait_for_ip_address(self): """Waits for VM to acquire an IP address.""" self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) def main(): argument_spec = xenserver_common_argument_spec() argument_spec.update( state=dict(type='str', default='present', choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), name=dict(type='str', aliases=['name_label']), uuid=dict(type='str'), wait_for_ip_address=dict(type='bool', default=False), state_change_timeout=dict(type='int', default=0), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[ ['name', 'uuid'], ], ) result = {'failed': False, 'changed': False} # Module will exit with an error message if no VM is found. vm = XenServerVM(module) # Set VM power state. if module.params['state'] != "present": result['changed'] = vm.set_power_state(module.params['state']) if module.params['wait_for_ip_address']: vm.wait_for_ip_address() result['instance'] = vm.gather_facts() if result['failed']: module.fail_json(**result) else: module.exit_json(**result) if __name__ == '__main__': main()
hmen89/odoo
refs/heads/master
openerp/service/common.py
86
# -*- coding: utf-8 -*- import logging import openerp.release import openerp.tools import security _logger = logging.getLogger(__name__) RPC_VERSION_1 = { 'server_version': openerp.release.version, 'server_version_info': openerp.release.version_info, 'server_serie': openerp.release.serie, 'protocol_version': 1, } def dispatch(method, params): if method in ['login', 'about', 'timezone_get', 'version', 'authenticate']: pass elif method in ['set_loglevel']: passwd = params[0] params = params[1:] security.check_super(passwd) else: raise Exception("Method not found: %s" % method) fn = globals()['exp_' + method] return fn(*params) def exp_login(db, login, password): # TODO: legacy indirection through 'security', should use directly # the res.users model res = security.login(db, login, password) msg = res and 'successful login' or 'bad login or password' _logger.info("%s from '%s' using database '%s'", msg, login, db.lower()) return res or False def exp_authenticate(db, login, password, user_agent_env): res_users = openerp.registry(db)['res.users'] return res_users.authenticate(db, login, password, user_agent_env) def exp_version(): return RPC_VERSION_1 def exp_about(extended=False): """Return information about the OpenERP Server. @param extended: if True then return version info @return string if extended is False else tuple """ info = _('See http://openerp.com') if extended: return info, openerp.release.version return info def exp_timezone_get(db, login, password): return openerp.tools.misc.get_server_timezone() def exp_set_loglevel(loglevel, logger=None): # TODO Previously, the level was set on the now deprecated # `openerp.netsvc.Logger` class. return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
tomkralidis/QGIS
refs/heads/master
src/plugins/grass/scripts/r.external.all.py
45
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ *************************************************************************** r.external.all.py --------------------- Date : July 2009 Copyright : (C) 2009 by Lorenzo Masini Email : rugginoso at develer dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Lorenzo Masini' __date__ = 'July 2009' __copyright__ = '(C) 2009, Lorenzo Masini' ############################################################################ # # MODULE: qgis.r.external.all.py # AUTHOR(S): Lorenzo Masini # # PURPOSE: Link all GDAL supported raster files into a directory # to binary raster map layers. # COPYRIGHT: (C) 2009 by Lorenzo Masini # # This program is free software under the GNU General Public # License (>=v2). Read the file COPYING that comes with GRASS # for details. # ############################################################################# #%Module #% description: Link all GDAL supported raster files into a directory to binary raster map layers. #% keywords: raster, import #%End #%option #% key: input #% type: string #% gisprompt: input #% key_desc : name #% description: Directory containing raster files #% required : yes #%end #%option #% key: band #% type: integer #% description: Band to select #% answer: 1 #% required : no #%end #%flag #% key: o #% description: Override projection (use location's projection) #%end #%flag #% key: e #% description: Extend location extents based on new dataset #%end #%flag #% key: r #% description: Recursively scan subdirectories import os try: from grass.script import core as grass except ImportError: import grass except: raise Exception("Cannot find 'grass' Python module. Python is supported by GRASS from version >= 6.4") def import_directory_of_rasters(directory, recursive): for dir, dirnames, filenames in os.walk(directory): for filename in filenames: if grass.run_command('r.external', flags=flags_string, input=os.path.join(dir, filename), band=options['band'], output=filename[:-4], title=filename[:-4]) != 0: grass.warning('Cannot import file' + filename) if not recursive: break for dirname in dirnames: import_directory_of_rasters(dirname, recursive) def main(): input = options['input'] recursive = flags['r'] import_directory_of_rasters(input, recursive) if __name__ == "__main__": options, flags = grass.parser() flags_string = "".join([k for k in flags.keys() if flags[k] and k != 'r']) main()
easyfmxu/zulip
refs/heads/master
zerver/lib/upload.py
121
from __future__ import absolute_import from django.conf import settings from django.template.defaultfilters import slugify from zerver.lib.avatar import user_avatar_hash from boto.s3.key import Key from boto.s3.connection import S3Connection from mimetypes import guess_type, guess_extension from zerver.models import get_user_profile_by_id import base64 import os from PIL import Image, ImageOps from StringIO import StringIO import random # Performance Note: # # For writing files to S3, the file could either be stored in RAM # (if it is less than 2.5MiB or so) or an actual temporary file on disk. # # Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case # should occur in practice. # # This is great, because passing the pseudofile object that Django gives # you to boto would be a pain. # To come up with a s3 key we randomly generate a "directory". The "file # name" is the original filename provided by the user run through Django's # slugify. def sanitize_name(name): split_name = name.split('.') base = ".".join(split_name[:-1]) extension = split_name[-1] return slugify(base) + "." + slugify(extension) def random_name(bytes=60): return base64.urlsafe_b64encode(os.urandom(bytes)) def resize_avatar(image_data): AVATAR_SIZE = 100 im = Image.open(StringIO(image_data)) im = ImageOps.fit(im, (AVATAR_SIZE, AVATAR_SIZE), Image.ANTIALIAS) out = StringIO() im.save(out, format='png') return out.getvalue() ### S3 def get_bucket(conn, bucket_name): # Calling get_bucket() with validate=True can apparently lead # to expensive S3 bills: # http://www.appneta.com/blog/s3-list-get-bucket-default/ # The benefits of validation aren't completely clear to us, and # we want to save on our bills, so we set the validate flag to False. # (We think setting validate to True would cause us to fail faster # in situations where buckets don't exist, but that shouldn't be # an issue for us.) bucket = conn.get_bucket(bucket_name, validate=False) return bucket def upload_image_to_s3( bucket_name, file_name, content_type, user_profile, contents, ): conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = Key(bucket) key.key = file_name key.set_metadata("user_profile_id", str(user_profile.id)) key.set_metadata("realm_id", str(user_profile.realm.id)) if content_type: headers = {'Content-Type': content_type} else: headers = None key.set_contents_from_string(contents, headers=headers) def get_file_info(request, user_file): uploaded_file_name = user_file.name content_type = request.GET.get('mimetype') if content_type is None: content_type = guess_type(uploaded_file_name)[0] else: uploaded_file_name = uploaded_file_name + guess_extension(content_type) return uploaded_file_name, content_type def upload_message_image_s3(uploaded_file_name, content_type, file_data, user_profile, target_realm=None): bucket_name = settings.S3_AUTH_UPLOADS_BUCKET s3_file_name = "/".join([ str(target_realm.id if target_realm is not None else user_profile.realm.id), random_name(18), sanitize_name(uploaded_file_name) ]) url = "/user_uploads/%s" % (s3_file_name) upload_image_to_s3( bucket_name, s3_file_name, content_type, user_profile, file_data ) return url def get_signed_upload_url(path): conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) return conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=path) def get_realm_for_filename(path): conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) key = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET).get_key(path) if key is None: # This happens if the key does not exist. return None return get_user_profile_by_id(key.metadata["user_profile_id"]).realm.id def upload_avatar_image_s3(user_file, user_profile, email): content_type = guess_type(user_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET s3_file_name = user_avatar_hash(email) image_data = user_file.read() upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, user_profile, image_data, ) resized_data = resize_avatar(image_data) upload_image_to_s3( bucket_name, s3_file_name, 'image/png', user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) ### Local def mkdirs(path): dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def write_local_file(type, path, file_data): file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) mkdirs(file_path) with open(file_path, 'wb') as f: f.write(file_data) def upload_message_image_local(uploaded_file_name, content_type, file_data, user_profile, target_realm=None): # Split into 256 subdirectories to prevent directories from getting too big path = "/".join([ str(user_profile.realm.id), format(random.randint(0, 255), 'x'), random_name(18), sanitize_name(uploaded_file_name) ]) write_local_file('files', path, file_data) return '/user_uploads/' + path def upload_avatar_image_local(user_file, user_profile, email): email_hash = user_avatar_hash(email) image_data = user_file.read() write_local_file('avatars', email_hash+'.original', image_data) resized_data = resize_avatar(image_data) write_local_file('avatars', email_hash+'.png', resized_data) ### Common if settings.LOCAL_UPLOADS_DIR is not None: upload_message_image = upload_message_image_local upload_avatar_image = upload_avatar_image_local else: upload_message_image = upload_message_image_s3 upload_avatar_image = upload_avatar_image_s3 def upload_message_image_through_web_client(request, user_file, user_profile): uploaded_file_name, content_type = get_file_info(request, user_file) return upload_message_image(uploaded_file_name, content_type, user_file.read(), user_profile)
Permutatrix/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/webdriver/webdriver/servo.py
11
class ServoExtensionCommands(object): def __init__(self, session): self.session = session @command def get_prefs(self, *prefs): body = {"prefs": list(prefs)} return self.session.send_command("POST", "servo/prefs/get", body) @command def set_prefs(self, prefs): body = {"prefs": prefs} return self.session.send_command("POST", "servo/prefs/set", body) @command def reset_prefs(self, *prefs): body = {"prefs": list(prefs)} return self.session.send_command("POST", "servo/prefs/reset", body)
viewdy/phantomjs2
refs/heads/master
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/queuestest.py
124
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.net.bugzilla import Attachment from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.executive import ScriptError from webkitpy.thirdparty.mock import Mock from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler from webkitpy.tool.mocktool import MockTool class MockQueueEngine(object): def __init__(self, name, queue, wakeup_event, seconds_to_sleep): pass def run(self): pass class QueuesTest(unittest.TestCase): # This is _patch1 in mocktool.py mock_work_item = MockTool().bugs.fetch_attachment(10000) def assert_outputs(self, func, func_name, args, expected_stdout, expected_stderr, expected_exceptions, expected_logs): exception = None if expected_exceptions and func_name in expected_exceptions: exception = expected_exceptions[func_name] logs = None if expected_logs and func_name in expected_logs: logs = expected_logs[func_name] OutputCapture().assert_outputs(self, func, args=args, expected_stdout=expected_stdout.get(func_name, ""), expected_stderr=expected_stderr.get(func_name, ""), expected_exception=exception, expected_logs=logs) def _default_begin_work_queue_stderr(self, name): string_replacements = {"name": name} return "MOCK: update_status: %(name)s Starting Queue\n" % string_replacements def _default_begin_work_queue_logs(self, name): checkout_dir = '/mock-checkout' string_replacements = {"name": name, 'checkout_dir': checkout_dir} return "CAUTION: %(name)s will discard all local changes in \"%(checkout_dir)s\"\nRunning WebKit %(name)s.\nMOCK: update_status: %(name)s Starting Queue\n" % string_replacements def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, expected_logs=None, options=None, tool=None): if not tool: tool = MockTool() # This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue # the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this. tool.filesystem.write_text_file('/mock-results/full_results.json', "") if not expected_stdout: expected_stdout = {} if not expected_stderr: expected_stderr = {} if not args: args = [] if not options: options = Mock() options.port = None if not work_item: work_item = self.mock_work_item tool.user.prompt = lambda message: "yes" queue.execute(options, args, tool, engine=MockQueueEngine) self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions, expected_logs) self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions, expected_logs) # Should we have a different function for testing StepSequenceErrorHandlers? if isinstance(queue, StepSequenceErrorHandler): self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand", output="MOCK output")], expected_stdout, expected_stderr, expected_exceptions, expected_logs)
mdietrichc2c/server-tools
refs/heads/8.0
__unported__/super_calendar/super_calendar.py
24
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>) # Copyright (C) 2012 Domsense srl (<http://www.domsense.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm from openerp.tools.translate import _ import logging from mako.template import Template from datetime import datetime from openerp import tools from openerp.tools.safe_eval import safe_eval def _models_get(self, cr, uid, context=None): obj = self.pool.get('ir.model') ids = obj.search(cr, uid, []) res = obj.read(cr, uid, ids, ['model', 'name'], context) return [(r['model'], r['name']) for r in res] class super_calendar_configurator(orm.Model): _logger = logging.getLogger('super.calendar') _name = 'super.calendar.configurator' _columns = { 'name': fields.char('Name', size=64, required=True), 'line_ids': fields.one2many('super.calendar.configurator.line', 'configurator_id', 'Lines'), } def generate_calendar_records(self, cr, uid, ids, context=None): configurator_ids = self.search(cr, uid, []) super_calendar_pool = self.pool.get('super.calendar') # removing old records super_calendar_ids = super_calendar_pool.search(cr, uid, [], context=context) super_calendar_pool.unlink(cr, uid, super_calendar_ids, context=context) for configurator in self.browse(cr, uid, configurator_ids, context): for line in configurator.line_ids: current_pool = self.pool.get(line.name.model) current_record_ids = current_pool.search( cr, uid, line.domain and safe_eval(line.domain) or [], context=context) for current_record_id in current_record_ids: current_record = current_pool.browse(cr, uid, current_record_id, context=context) if (line.user_field_id and current_record[line.user_field_id.name] and current_record[line.user_field_id.name]._table_name != 'res.users'): raise orm.except_orm( _('Error'), _("The 'User' field of record %s (%s) does not refer to res.users") % (current_record[line.description_field_id.name], line.name.model)) if (((line.description_field_id and current_record[line.description_field_id.name]) or line.description_code) and current_record[line.date_start_field_id.name]): duration = False if (not line.duration_field_id and line.date_stop_field_id and current_record[line.date_start_field_id.name] and current_record[line.date_stop_field_id.name]): date_start = datetime.strptime( current_record[line.date_start_field_id.name], tools.DEFAULT_SERVER_DATETIME_FORMAT ) date_stop = datetime.strptime( current_record[line.date_stop_field_id.name], tools.DEFAULT_SERVER_DATETIME_FORMAT ) duration = (date_stop - date_start).total_seconds() / 3600 elif line.duration_field_id: duration = current_record[line.duration_field_id.name] if line.description_type != 'code': name = current_record[line.description_field_id.name] else: parse_dict = {'o': current_record} mytemplate = Template(line.description_code) name = mytemplate.render(**parse_dict) super_calendar_values = { 'name': name, 'model_description': line.description, 'date_start': current_record[line.date_start_field_id.name], 'duration': duration, 'user_id': ( line.user_field_id and current_record[line.user_field_id.name] and current_record[line.user_field_id.name].id or False ), 'configurator_id': configurator.id, 'res_id': line.name.model+','+str(current_record['id']), 'model_id': line.name.id, } super_calendar_pool.create(cr, uid, super_calendar_values, context=context) self._logger.info('Calendar generated') return True class super_calendar_configurator_line(orm.Model): _name = 'super.calendar.configurator.line' _columns = { 'name': fields.many2one('ir.model', 'Model', required=True), 'description': fields.char('Description', size=128, required=True), 'domain': fields.char('Domain', size=512), 'configurator_id': fields.many2one('super.calendar.configurator', 'Configurator'), 'description_type': fields.selection([ ('field', 'Field'), ('code', 'Code'), ], string="Description Type"), 'description_field_id': fields.many2one( 'ir.model.fields', 'Description field', domain="[('model_id', '=', name),('ttype', '=', 'char')]"), 'description_code': fields.text( 'Description field', help="Use '${o}' to refer to the involved object. E.g.: '${o.project_id.name}'" ), 'date_start_field_id': fields.many2one( 'ir.model.fields', 'Start date field', domain="['&','|',('ttype', '=', 'datetime'),('ttype', '=', 'date'),('model_id', '=', name)]", required=True), 'date_stop_field_id': fields.many2one( 'ir.model.fields', 'End date field', domain="['&',('ttype', '=', 'datetime'),('model_id', '=', name)]" ), 'duration_field_id': fields.many2one( 'ir.model.fields', 'Duration field', domain="['&',('ttype', '=', 'float'),('model_id', '=', name)]"), 'user_field_id': fields.many2one( 'ir.model.fields', 'User field', domain="['&',('ttype', '=', 'many2one'),('model_id', '=', name)]"), } class super_calendar(orm.Model): _name = 'super.calendar' _columns = { 'name': fields.char('Description', size=512, required=True), 'model_description': fields.char('Model Description', size=128, required=True), 'date_start': fields.datetime('Start date', required=True), 'duration': fields.float('Duration'), 'user_id': fields.many2one('res.users', 'User'), 'configurator_id': fields.many2one('super.calendar.configurator', 'Configurator'), 'res_id': fields.reference('Resource', selection=_models_get, size=128), 'model_id': fields.many2one('ir.model', 'Model'), }
ax003d/openerp
refs/heads/master
openerp/addons/web/tests/test_menu.py
65
# -*- coding: utf-8 -*- import collections import mock import unittest2 from ..controllers import main class Placeholder(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) class LoadTest(unittest2.TestCase): def setUp(self): self.menu = main.Menu() self.request = mock.Mock() # Have self.request.session.model() return a different mock object for # each model (but always the same mock for a given model name) models = collections.defaultdict(mock.Mock) model = self.request.session.model.side_effect = \ lambda model_name: models[model_name] self.MockMenus = model('ir.ui.menu') # Mock the absence of custom menu model('res.users').read.return_value = [{ 'menu_id': False }] def tearDown(self): del self.request del self.MockMenus del self.menu def test_empty(self): self.MockMenus.search.return_value = [] self.MockMenus.read.return_value = [] root = self.menu.load(self.request) self.MockMenus.search.assert_called_with( [('parent_id','=', False)], 0, False, False, self.request.context) self.assertEqual(root['all_menu_ids'], []) self.assertListEqual( root['children'], []) def test_applications_sort(self): self.MockMenus.search.return_value = [1, 2, 3] self.MockMenus.read.side_effect = lambda *args: [ {'id': 1, 'sequence': 1, 'parent_id': False}, {'id': 3, 'sequence': 2, 'parent_id': False}, {'id': 2, 'sequence': 3, 'parent_id': False}, ] root = self.menu.load(self.request) self.MockMenus.search.assert_called_with( [('id','child_of', [1, 2, 3])], 0, False, False, self.request.context) self.MockMenus.read.assert_called_with( [1, 2, 3], ['name', 'sequence', 'parent_id', 'action'], self.request.context) self.assertEqual(root['all_menu_ids'], [1, 2, 3]) self.assertEqual( root['children'], [{ 'id': 1, 'sequence': 1, 'parent_id': False, 'children': [] }, { 'id': 3, 'sequence': 2, 'parent_id': False, 'children': [] }, { 'id': 2, 'sequence': 3, 'parent_id': False, 'children': [] }]) def test_deep(self): self.MockMenus.search.side_effect = lambda domain, *args: ( [1] if domain == [('parent_id', '=', False)] else [1, 2, 3, 4]) root = {'id': 1, 'sequence': 1, 'parent_id': False} self.MockMenus.read.side_effect = lambda ids, *args: ( [root] if ids == [1] else [ {'id': 1, 'sequence': 1, 'parent_id': False}, {'id': 2, 'sequence': 2, 'parent_id': [1, '']}, {'id': 3, 'sequence': 1, 'parent_id': [2, '']}, {'id': 4, 'sequence': 2, 'parent_id': [2, '']}, ]) root = self.menu.load(self.request) self.MockMenus.search.assert_called_with( [('id','child_of', [1])], 0, False, False, self.request.context) self.assertEqual(root['all_menu_ids'], [1, 2, 3, 4]) self.assertEqual( root['children'], [{ 'id': 1, 'sequence': 1, 'parent_id': False, 'children': [{ 'id': 2, 'sequence': 2, 'parent_id': [1, ''], 'children': [{ 'id': 3, 'sequence': 1, 'parent_id': [2, ''], 'children': [] }, { 'id': 4, 'sequence': 2, 'parent_id': [2, ''], 'children': [] }] }] }] ) class ActionMungerTest(unittest2.TestCase): def setUp(self): self.menu = main.Menu() def test_actual_treeview(self): action = { "views": [[False, "tree"], [False, "form"], [False, "calendar"]], "view_type": "tree", "view_id": False, "view_mode": "tree,form,calendar" } changed = action.copy() del action['view_type'] main.fix_view_modes(changed) self.assertEqual(changed, action) def test_list_view(self): action = { "views": [[False, "tree"], [False, "form"], [False, "calendar"]], "view_type": "form", "view_id": False, "view_mode": "tree,form,calendar" } main.fix_view_modes(action) self.assertEqual(action, { "views": [[False, "list"], [False, "form"], [False, "calendar"]], "view_id": False, "view_mode": "list,form,calendar" }) def test_redundant_views(self): action = { "views": [[False, "tree"], [False, "form"], [False, "calendar"], [42, "tree"]], "view_type": "form", "view_id": False, "view_mode": "tree,form,calendar" } main.fix_view_modes(action) self.assertEqual(action, { "views": [[False, "list"], [False, "form"], [False, "calendar"], [42, "list"]], "view_id": False, "view_mode": "list,form,calendar" })
notriddle/servo
refs/heads/master
tests/wpt/web-platform-tests/common/redirect.py
259
def main(request, response): """Simple handler that causes redirection. The request should typically have two query parameters: status - The status to use for the redirection. Defaults to 302. location - The resource to redirect to. """ status = 302 if "status" in request.GET: try: status = int(request.GET.first("status")) except ValueError: pass response.status = status location = request.GET.first("location") response.headers.set("Location", location)
william-richard/moto
refs/heads/master
tests/test_sts/test_server.py
2
from __future__ import unicode_literals import sure # noqa import moto.server as server """ Test the different server responses """ def test_sts_get_session_token(): backend = server.create_backend_app("sts") test_client = backend.test_client() res = test_client.get("/?Action=GetSessionToken") res.status_code.should.equal(200) res.data.should.contain(b"SessionToken") res.data.should.contain(b"AccessKeyId") def test_sts_get_federation_token(): backend = server.create_backend_app("sts") test_client = backend.test_client() res = test_client.get("/?Action=GetFederationToken&Name=Bob") res.status_code.should.equal(200) res.data.should.contain(b"SessionToken") res.data.should.contain(b"AccessKeyId") def test_sts_get_caller_identity(): backend = server.create_backend_app("sts") test_client = backend.test_client() res = test_client.get("/?Action=GetCallerIdentity") res.status_code.should.equal(200) res.data.should.contain(b"Arn") res.data.should.contain(b"UserId") res.data.should.contain(b"Account")
DONIKAN/django
refs/heads/master
tests/generic_views/test_list.py
309
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.core.exceptions import ImproperlyConfigured from django.test import TestCase, override_settings from django.utils.encoding import force_str from django.views.generic.base import View from .models import Artist, Author, Book, Page @override_settings(ROOT_URLCONF='generic_views.urls') class ListViewTests(TestCase): @classmethod def setUpTestData(cls): cls.artist1 = Artist.objects.create(name='Rene Magritte') cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano') cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg') cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1)) cls.book1.authors.add(cls.author1) cls.book2 = Book.objects.create( name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1) ) cls.page1 = Page.objects.create( content='I was once bitten by a moose.', template='generic_views/page_template.html' ) def test_items(self): res = self.client.get('/list/dict/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/list.html') self.assertEqual(res.context['object_list'][0]['first'], 'John') def test_queryset(self): res = self.client.get('/list/authors/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertIsInstance(res.context['view'], View) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertIsNone(res.context['paginator']) self.assertIsNone(res.context['page_obj']) self.assertFalse(res.context['is_paginated']) def test_paginated_queryset(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(len(res.context['object_list']), 30) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertTrue(res.context['is_paginated']) self.assertEqual(res.context['page_obj'].number, 1) self.assertEqual(res.context['paginator'].num_pages, 4) self.assertEqual(res.context['author_list'][0].name, 'Author 00') self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29') def test_paginated_queryset_shortdata(self): # Test that short datasets ALSO result in a paginated view. res = self.client.get('/list/authors/paginated/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertEqual(res.context['page_obj'].number, 1) self.assertEqual(res.context['paginator'].num_pages, 1) self.assertFalse(res.context['is_paginated']) def test_paginated_get_page_by_query_string(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/', {'page': '2'}) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(len(res.context['object_list']), 30) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertEqual(res.context['author_list'][0].name, 'Author 30') self.assertEqual(res.context['page_obj'].number, 2) def test_paginated_get_last_page_by_query_string(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/', {'page': 'last'}) self.assertEqual(res.status_code, 200) self.assertEqual(len(res.context['object_list']), 10) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertEqual(res.context['author_list'][0].name, 'Author 90') self.assertEqual(res.context['page_obj'].number, 4) def test_paginated_get_page_by_urlvar(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/3/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(len(res.context['object_list']), 30) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertEqual(res.context['author_list'][0].name, 'Author 60') self.assertEqual(res.context['page_obj'].number, 3) def test_paginated_page_out_of_range(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/42/') self.assertEqual(res.status_code, 404) def test_paginated_invalid_page(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/?page=frog') self.assertEqual(res.status_code, 404) def test_paginated_custom_paginator_class(self): self._make_authors(7) res = self.client.get('/list/authors/paginated/custom_class/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['paginator'].num_pages, 1) # Custom pagination allows for 2 orphans on a page size of 5 self.assertEqual(len(res.context['object_list']), 7) def test_paginated_custom_page_kwarg(self): self._make_authors(100) res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'}) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_list.html') self.assertEqual(len(res.context['object_list']), 30) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertEqual(res.context['author_list'][0].name, 'Author 30') self.assertEqual(res.context['page_obj'].number, 2) def test_paginated_custom_paginator_constructor(self): self._make_authors(7) res = self.client.get('/list/authors/paginated/custom_constructor/') self.assertEqual(res.status_code, 200) # Custom pagination allows for 2 orphans on a page size of 5 self.assertEqual(len(res.context['object_list']), 7) def test_paginated_orphaned_queryset(self): self._make_authors(92) res = self.client.get('/list/authors/paginated-orphaned/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['page_obj'].number, 1) res = self.client.get( '/list/authors/paginated-orphaned/', {'page': 'last'}) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['page_obj'].number, 3) res = self.client.get( '/list/authors/paginated-orphaned/', {'page': '3'}) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['page_obj'].number, 3) res = self.client.get( '/list/authors/paginated-orphaned/', {'page': '4'}) self.assertEqual(res.status_code, 404) def test_paginated_non_queryset(self): res = self.client.get('/list/dict/paginated/') self.assertEqual(res.status_code, 200) self.assertEqual(len(res.context['object_list']), 1) def test_verbose_name(self): res = self.client.get('/list/artists/') self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/list.html') self.assertEqual(list(res.context['object_list']), list(Artist.objects.all())) self.assertIs(res.context['artist_list'], res.context['object_list']) self.assertIsNone(res.context['paginator']) self.assertIsNone(res.context['page_obj']) self.assertFalse(res.context['is_paginated']) def test_allow_empty_false(self): res = self.client.get('/list/authors/notempty/') self.assertEqual(res.status_code, 200) Author.objects.all().delete() res = self.client.get('/list/authors/notempty/') self.assertEqual(res.status_code, 404) def test_template_name(self): res = self.client.get('/list/authors/template_name/') self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertTemplateUsed(res, 'generic_views/list.html') def test_template_name_suffix(self): res = self.client.get('/list/authors/template_name_suffix/') self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertTemplateUsed(res, 'generic_views/author_objects.html') def test_context_object_name(self): res = self.client.get('/list/authors/context_object_name/') self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertNotIn('authors', res.context) self.assertIs(res.context['author_list'], res.context['object_list']) self.assertTemplateUsed(res, 'generic_views/author_list.html') def test_duplicate_context_object_name(self): res = self.client.get('/list/authors/dupe_context_object_name/') self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context['object_list']), list(Author.objects.all())) self.assertNotIn('authors', res.context) self.assertNotIn('author_list', res.context) self.assertTemplateUsed(res, 'generic_views/author_list.html') def test_missing_items(self): self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/') def test_paginated_list_view_does_not_load_entire_table(self): # Regression test for #17535 self._make_authors(3) # 1 query for authors with self.assertNumQueries(1): self.client.get('/list/authors/notempty/') # same as above + 1 query to test if authors exist + 1 query for pagination with self.assertNumQueries(3): self.client.get('/list/authors/notempty/paginated/') def test_explicitly_ordered_list_view(self): Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1)) res = self.client.get('/list/books/sorted/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object_list'][0].name, '2066') self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code') self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies') res = self.client.get('/list/books/sortedbypagesandnamedec/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code') self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies') self.assertEqual(res.context['object_list'][2].name, '2066') @override_settings(DEBUG=True) def test_paginated_list_view_returns_useful_message_on_invalid_page(self): # test for #19240 # tests that source exception's message is included in page self._make_authors(1) res = self.client.get('/list/authors/paginated/2/') self.assertEqual(res.status_code, 404) self.assertEqual(force_str(res.context.get('reason')), "Invalid page (2): That page contains no results") def _make_authors(self, n): Author.objects.all().delete() for i in range(n): Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
LumaPictures/rez
refs/heads/pull_requests
src/rez/vendor/unittest2/case.py
9
"""Test case implementation""" import sys import difflib import pprint import re import unittest import warnings from rez.vendor.unittest2 import result from rez.vendor.unittest2.util import ( safe_repr, safe_str, strclass, unorderable_list_difference ) from rez.vendor.unittest2.compatibility import wraps __unittest = True DIFF_OMITTED = ('\nDiff is %s characters long. ' 'Set self.maxDiff to None to see it.') class SkipTest(Exception): """ Raise this exception in a test to skip it. Usually you can use TestResult.skip() or one of the skipping decorators instead of raising this directly. """ class _ExpectedFailure(Exception): """ Raise this when a test is expected to fail. This is an implementation detail. """ def __init__(self, exc_info): # can't use super because Python 2.4 exceptions are old style Exception.__init__(self) self.exc_info = exc_info class _UnexpectedSuccess(Exception): """ The test was supposed to fail, but it didn't! """ def _id(obj): return obj def skip(reason): """ Unconditionally skip a test. """ def decorator(test_item): if not (isinstance(test_item, type) and issubclass(test_item, TestCase)): @wraps(test_item) def skip_wrapper(*args, **kwargs): raise SkipTest(reason) test_item = skip_wrapper test_item.__unittest_skip__ = True test_item.__unittest_skip_why__ = reason return test_item return decorator def skipIf(condition, reason): """ Skip a test if the condition is true. """ if condition: return skip(reason) return _id def skipUnless(condition, reason): """ Skip a test unless the condition is true. """ if not condition: return skip(reason) return _id def expectedFailure(func): @wraps(func) def wrapper(*args, **kwargs): try: func(*args, **kwargs) except Exception: raise _ExpectedFailure(sys.exc_info()) raise _UnexpectedSuccess return wrapper class _AssertRaisesContext(object): """A context manager used to implement TestCase.assertRaises* methods.""" def __init__(self, expected, test_case, expected_regexp=None): self.expected = expected self.failureException = test_case.failureException self.expected_regexp = expected_regexp def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): if exc_type is None: try: exc_name = self.expected.__name__ except AttributeError: exc_name = str(self.expected) raise self.failureException( "%s not raised" % (exc_name,)) if not issubclass(exc_type, self.expected): # let unexpected exceptions pass through return False self.exception = exc_value # store for later retrieval if self.expected_regexp is None: return True expected_regexp = self.expected_regexp if isinstance(expected_regexp, basestring): expected_regexp = re.compile(expected_regexp) if not expected_regexp.search(str(exc_value)): raise self.failureException('"%s" does not match "%s"' % (expected_regexp.pattern, str(exc_value))) return True class _TypeEqualityDict(object): def __init__(self, testcase): self.testcase = testcase self._store = {} def __setitem__(self, key, value): self._store[key] = value def __getitem__(self, key): value = self._store[key] if isinstance(value, basestring): return getattr(self.testcase, value) return value def get(self, key, default=None): if key in self._store: return self[key] return default class TestCase(unittest.TestCase): """A class whose instances are single test cases. By default, the test code itself should be placed in a method named 'runTest'. If the fixture may be used for many test cases, create as many test methods as are needed. When instantiating such a TestCase subclass, specify in the constructor arguments the name of the test method that the instance is to execute. Test authors should subclass TestCase for their own tests. Construction and deconstruction of the test's environment ('fixture') can be implemented by overriding the 'setUp' and 'tearDown' methods respectively. If it is necessary to override the __init__ method, the base class __init__ method must always be called. It is important that subclasses should not change the signature of their __init__ method, since instances of the classes are instantiated automatically by parts of the framework in order to be run. """ # This attribute determines which exception will be raised when # the instance's assertion methods fail; test methods raising this # exception will be deemed to have 'failed' rather than 'errored' failureException = AssertionError # This attribute sets the maximum length of a diff in failure messages # by assert methods using difflib. It is looked up as an instance attribute # so can be configured by individual tests if required. maxDiff = 80*8 # This attribute determines whether long messages (including repr of # objects used in assert methods) will be printed on failure in *addition* # to any explicit message passed. longMessage = True # Attribute used by TestSuite for classSetUp _classSetupFailed = False def __init__(self, methodName='runTest'): """Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name. """ self._testMethodName = methodName self._resultForDoCleanups = None try: testMethod = getattr(self, methodName) except AttributeError: raise ValueError("no such test method in %s: %s" % \ (self.__class__, methodName)) self._testMethodDoc = testMethod.__doc__ self._cleanups = [] # Map types to custom assertEqual functions that will compare # instances of said type in more detail to generate a more useful # error message. self._type_equality_funcs = _TypeEqualityDict(self) self.addTypeEqualityFunc(dict, 'assertDictEqual') self.addTypeEqualityFunc(list, 'assertListEqual') self.addTypeEqualityFunc(tuple, 'assertTupleEqual') self.addTypeEqualityFunc(set, 'assertSetEqual') self.addTypeEqualityFunc(frozenset, 'assertSetEqual') self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual') def addTypeEqualityFunc(self, typeobj, function): """Add a type specific assertEqual style function to compare a type. This method is for use by TestCase subclasses that need to register their own type equality functions to provide nicer error messages. Args: typeobj: The data type to call this function on when both values are of the same type in assertEqual(). function: The callable taking two arguments and an optional msg= argument that raises self.failureException with a useful error message when the two arguments are not equal. """ self._type_equality_funcs[typeobj] = function def addCleanup(self, function, *args, **kwargs): """Add a function, with arguments, to be called when the test is completed. Functions added are called on a LIFO basis and are called after tearDown on test failure or success. Cleanup items are called even if setUp fails (unlike tearDown).""" self._cleanups.append((function, args, kwargs)) def setUp(self): "Hook method for setting up the test fixture before exercising it." @classmethod def setUpClass(cls): "Hook method for setting up class fixture before running tests in the class." @classmethod def tearDownClass(cls): "Hook method for deconstructing the class fixture after running all tests in the class." def tearDown(self): "Hook method for deconstructing the test fixture after testing it." def countTestCases(self): return 1 def defaultTestResult(self): return result.TestResult() def shortDescription(self): """Returns a one-line description of the test, or None if no description has been provided. The default implementation of this method returns the first line of the specified test method's docstring. """ doc = self._testMethodDoc return doc and doc.split("\n")[0].strip() or None def id(self): return "%s.%s" % (strclass(self.__class__), self._testMethodName) def __eq__(self, other): if type(self) is not type(other): return NotImplemented return self._testMethodName == other._testMethodName def __ne__(self, other): return not self == other def __hash__(self): return hash((type(self), self._testMethodName)) def __str__(self): return "%s (%s)" % (self._testMethodName, strclass(self.__class__)) def __repr__(self): return "<%s testMethod=%s>" % \ (strclass(self.__class__), self._testMethodName) def _addSkip(self, result, reason): addSkip = getattr(result, 'addSkip', None) if addSkip is not None: addSkip(self, reason) else: warnings.warn("Use of a TestResult without an addSkip method is deprecated", DeprecationWarning, 2) result.addSuccess(self) def run(self, result=None): orig_result = result if result is None: result = self.defaultTestResult() startTestRun = getattr(result, 'startTestRun', None) if startTestRun is not None: startTestRun() self._resultForDoCleanups = result result.startTest(self) testMethod = getattr(self, self._testMethodName) if (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)): # If the class or method was skipped. try: skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') or getattr(testMethod, '__unittest_skip_why__', '')) self._addSkip(result, skip_why) finally: result.stopTest(self) return try: success = False try: self.setUp() except SkipTest, e: self._addSkip(result, str(e)) except Exception: result.addError(self, sys.exc_info()) else: try: testMethod() except self.failureException: result.addFailure(self, sys.exc_info()) except _ExpectedFailure, e: addExpectedFailure = getattr(result, 'addExpectedFailure', None) if addExpectedFailure is not None: addExpectedFailure(self, e.exc_info) else: warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated", DeprecationWarning) result.addSuccess(self) except _UnexpectedSuccess: addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None) if addUnexpectedSuccess is not None: addUnexpectedSuccess(self) else: warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated", DeprecationWarning) result.addFailure(self, sys.exc_info()) except SkipTest, e: self._addSkip(result, str(e)) except Exception: result.addError(self, sys.exc_info()) else: success = True try: self.tearDown() except Exception: result.addError(self, sys.exc_info()) success = False cleanUpSuccess = self.doCleanups() success = success and cleanUpSuccess if success: result.addSuccess(self) finally: result.stopTest(self) if orig_result is None: stopTestRun = getattr(result, 'stopTestRun', None) if stopTestRun is not None: stopTestRun() def doCleanups(self): """Execute all cleanup functions. Normally called for you after tearDown.""" result = self._resultForDoCleanups ok = True while self._cleanups: function, args, kwargs = self._cleanups.pop(-1) try: function(*args, **kwargs) except Exception: ok = False result.addError(self, sys.exc_info()) return ok def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the test without collecting errors in a TestResult""" self.setUp() getattr(self, self._testMethodName)() self.tearDown() while self._cleanups: function, args, kwargs = self._cleanups.pop(-1) function(*args, **kwargs) def skipTest(self, reason): """Skip this test.""" raise SkipTest(reason) def fail(self, msg=None): """Fail immediately, with the given message.""" raise self.failureException(msg) def assertFalse(self, expr, msg=None): "Fail the test if the expression is true." if expr: msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr)) raise self.failureException(msg) def assertTrue(self, expr, msg=None): """Fail the test unless the expression is true.""" if not expr: msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr)) raise self.failureException(msg) def _formatMessage(self, msg, standardMsg): """Honour the longMessage attribute when generating failure messages. If longMessage is False this means: * Use only an explicit message if it is provided * Otherwise use the standard message for the assert If longMessage is True: * Use the standard message * If an explicit message is provided, plus ' : ' and the explicit message """ if not self.longMessage: return msg or standardMsg if msg is None: return standardMsg try: return '%s : %s' % (standardMsg, msg) except UnicodeDecodeError: return '%s : %s' % (safe_str(standardMsg), safe_str(msg)) def assertRaises(self, excClass, callableObj=None, *args, **kwargs): """Fail unless an exception of class excClass is thrown by callableObj when invoked with arguments args and keyword arguments kwargs. If a different type of exception is thrown, it will not be caught, and the test case will be deemed to have suffered an error, exactly as for an unexpected exception. If called with callableObj omitted or None, will return a context object used like this:: with self.assertRaises(SomeException): do_something() The context manager keeps a reference to the exception as the 'exception' attribute. This allows you to inspect the exception after the assertion:: with self.assertRaises(SomeException) as cm: do_something() the_exception = cm.exception self.assertEqual(the_exception.error_code, 3) """ if callableObj is None: return _AssertRaisesContext(excClass, self) try: callableObj(*args, **kwargs) except excClass: return if hasattr(excClass,'__name__'): excName = excClass.__name__ else: excName = str(excClass) raise self.failureException, "%s not raised" % excName def _getAssertEqualityFunc(self, first, second): """Get a detailed comparison function for the types of the two args. Returns: A callable accepting (first, second, msg=None) that will raise a failure exception if first != second with a useful human readable error message for those types. """ # # NOTE(gregory.p.smith): I considered isinstance(first, type(second)) # and vice versa. I opted for the conservative approach in case # subclasses are not intended to be compared in detail to their super # class instances using a type equality func. This means testing # subtypes won't automagically use the detailed comparison. Callers # should use their type specific assertSpamEqual method to compare # subclasses if the detailed comparison is desired and appropriate. # See the discussion in http://bugs.python.org/issue2578. # if type(first) is type(second): asserter = self._type_equality_funcs.get(type(first)) if asserter is not None: return asserter return self._baseAssertEqual def _baseAssertEqual(self, first, second, msg=None): """The default assertEqual implementation, not type specific.""" if not first == second: standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second)) msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) def assertEqual(self, first, second, msg=None): """Fail if the two objects are unequal as determined by the '==' operator. """ assertion_func = self._getAssertEqualityFunc(first, second) assertion_func(first, second, msg=msg) def assertNotEqual(self, first, second, msg=None): """Fail if the two objects are equal as determined by the '==' operator. """ if not first != second: msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), safe_repr(second))) raise self.failureException(msg) def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None): """Fail if the two objects are unequal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero, or by comparing that the between the two objects is more than the given delta. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most signficant digit). If the two objects compare equal then they will automatically compare almost equal. """ if first == second: # shortcut return if delta is not None and places is not None: raise TypeError("specify delta or places not both") if delta is not None: if abs(first - second) <= delta: return standardMsg = '%s != %s within %s delta' % (safe_repr(first), safe_repr(second), safe_repr(delta)) else: if places is None: places = 7 if round(abs(second-first), places) == 0: return standardMsg = '%s != %s within %r places' % (safe_repr(first), safe_repr(second), places) msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None): """Fail if the two objects are equal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero, or by comparing that the between the two objects is less than the given delta. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most signficant digit). Objects that are equal automatically fail. """ if delta is not None and places is not None: raise TypeError("specify delta or places not both") if delta is not None: if not (first == second) and abs(first - second) > delta: return standardMsg = '%s == %s within %s delta' % (safe_repr(first), safe_repr(second), safe_repr(delta)) else: if places is None: places = 7 if not (first == second) and round(abs(second-first), places) != 0: return standardMsg = '%s == %s within %r places' % (safe_repr(first), safe_repr(second), places) msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) # Synonyms for assertion methods # The plurals are undocumented. Keep them that way to discourage use. # Do not add more. Do not remove. # Going through a deprecation cycle on these would annoy many people. assertEquals = assertEqual assertNotEquals = assertNotEqual assertAlmostEquals = assertAlmostEqual assertNotAlmostEquals = assertNotAlmostEqual assert_ = assertTrue # These fail* assertion method names are pending deprecation and will # be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578 def _deprecate(original_func): def deprecated_func(*args, **kwargs): warnings.warn( ('Please use %s instead.' % original_func.__name__), PendingDeprecationWarning, 2) return original_func(*args, **kwargs) return deprecated_func failUnlessEqual = _deprecate(assertEqual) failIfEqual = _deprecate(assertNotEqual) failUnlessAlmostEqual = _deprecate(assertAlmostEqual) failIfAlmostEqual = _deprecate(assertNotAlmostEqual) failUnless = _deprecate(assertTrue) failUnlessRaises = _deprecate(assertRaises) failIf = _deprecate(assertFalse) def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None, max_diff=80*8): """An equality assertion for ordered sequences (like lists and tuples). For the purposes of this function, a valid ordered sequence type is one which can be indexed, has a length, and has an equality operator. Args: seq1: The first sequence to compare. seq2: The second sequence to compare. seq_type: The expected datatype of the sequences, or None if no datatype should be enforced. msg: Optional message to use on failure instead of a list of differences. max_diff: Maximum size off the diff, larger diffs are not shown """ if seq_type is not None: seq_type_name = seq_type.__name__ if not isinstance(seq1, seq_type): raise self.failureException('First sequence is not a %s: %s' % (seq_type_name, safe_repr(seq1))) if not isinstance(seq2, seq_type): raise self.failureException('Second sequence is not a %s: %s' % (seq_type_name, safe_repr(seq2))) else: seq_type_name = "sequence" differing = None try: len1 = len(seq1) except (TypeError, NotImplementedError): differing = 'First %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: try: len2 = len(seq2) except (TypeError, NotImplementedError): differing = 'Second %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: if seq1 == seq2: return seq1_repr = repr(seq1) seq2_repr = repr(seq2) if len(seq1_repr) > 30: seq1_repr = seq1_repr[:30] + '...' if len(seq2_repr) > 30: seq2_repr = seq2_repr[:30] + '...' elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) differing = '%ss differ: %s != %s\n' % elements for i in xrange(min(len1, len2)): try: item1 = seq1[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of first %s\n' % (i, seq_type_name)) break try: item2 = seq2[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of second %s\n' % (i, seq_type_name)) break if item1 != item2: differing += ('\nFirst differing element %d:\n%s\n%s\n' % (i, item1, item2)) break else: if (len1 == len2 and seq_type is None and type(seq1) != type(seq2)): # The sequences are the same, but have differing types. return if len1 > len2: differing += ('\nFirst %s contains %d additional ' 'elements.\n' % (seq_type_name, len1 - len2)) try: differing += ('First extra element %d:\n%s\n' % (len2, seq1[len2])) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of first %s\n' % (len2, seq_type_name)) elif len1 < len2: differing += ('\nSecond %s contains %d additional ' 'elements.\n' % (seq_type_name, len2 - len1)) try: differing += ('First extra element %d:\n%s\n' % (len1, seq2[len1])) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of second %s\n' % (len1, seq_type_name)) standardMsg = differing diffMsg = '\n' + '\n'.join( difflib.ndiff(pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) standardMsg = self._truncateMessage(standardMsg, diffMsg) msg = self._formatMessage(msg, standardMsg) self.fail(msg) def _truncateMessage(self, message, diff): max_diff = self.maxDiff if max_diff is None or len(diff) <= max_diff: return message + diff return message + (DIFF_OMITTED % len(diff)) def assertListEqual(self, list1, list2, msg=None): """A list-specific equality assertion. Args: list1: The first list to compare. list2: The second list to compare. msg: Optional message to use on failure instead of a list of differences. """ self.assertSequenceEqual(list1, list2, msg, seq_type=list) def assertTupleEqual(self, tuple1, tuple2, msg=None): """A tuple-specific equality assertion. Args: tuple1: The first tuple to compare. tuple2: The second tuple to compare. msg: Optional message to use on failure instead of a list of differences. """ self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple) def assertSetEqual(self, set1, set2, msg=None): """A set-specific equality assertion. Args: set1: The first set to compare. set2: The second set to compare. msg: Optional message to use on failure instead of a list of differences. assertSetEqual uses ducktyping to support different types of sets, and is optimized for sets specifically (parameters must support a difference method). """ try: difference1 = set1.difference(set2) except TypeError, e: self.fail('invalid type when attempting set difference: %s' % e) except AttributeError, e: self.fail('first argument does not support set difference: %s' % e) try: difference2 = set2.difference(set1) except TypeError, e: self.fail('invalid type when attempting set difference: %s' % e) except AttributeError, e: self.fail('second argument does not support set difference: %s' % e) if not (difference1 or difference2): return lines = [] if difference1: lines.append('Items in the first set but not the second:') for item in difference1: lines.append(repr(item)) if difference2: lines.append('Items in the second set but not the first:') for item in difference2: lines.append(repr(item)) standardMsg = '\n'.join(lines) self.fail(self._formatMessage(msg, standardMsg)) def assertIn(self, member, container, msg=None): """Just like self.assertTrue(a in b), but with a nicer default message.""" if member not in container: standardMsg = '%s not found in %s' % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def assertNotIn(self, member, container, msg=None): """Just like self.assertTrue(a not in b), but with a nicer default message.""" if member in container: standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def assertIs(self, expr1, expr2, msg=None): """Just like self.assertTrue(a is b), but with a nicer default message.""" if expr1 is not expr2: standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2)) self.fail(self._formatMessage(msg, standardMsg)) def assertIsNot(self, expr1, expr2, msg=None): """Just like self.assertTrue(a is not b), but with a nicer default message.""" if expr1 is expr2: standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),) self.fail(self._formatMessage(msg, standardMsg)) def assertDictEqual(self, d1, d2, msg=None): self.assert_(isinstance(d1, dict), 'First argument is not a dictionary') self.assert_(isinstance(d2, dict), 'Second argument is not a dictionary') if d1 != d2: standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( pprint.pformat(d1).splitlines(), pprint.pformat(d2).splitlines()))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertDictContainsSubset(self, expected, actual, msg=None): """Checks whether actual is a superset of expected.""" missing = [] mismatched = [] for key, value in expected.iteritems(): if key not in actual: missing.append(key) elif value != actual[key]: mismatched.append('%s, expected: %s, actual: %s' % (safe_repr(key), safe_repr(value), safe_repr(actual[key]))) if not (missing or mismatched): return standardMsg = '' if missing: standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in missing) if mismatched: if standardMsg: standardMsg += '; ' standardMsg += 'Mismatched values: %s' % ','.join(mismatched) self.fail(self._formatMessage(msg, standardMsg)) def assertItemsEqual(self, expected_seq, actual_seq, msg=None): """An unordered sequence specific comparison. It asserts that expected_seq and actual_seq contain the same elements. It is the equivalent of:: self.assertEqual(sorted(expected_seq), sorted(actual_seq)) Raises with an error message listing which elements of expected_seq are missing from actual_seq and vice versa if any. Asserts that each element has the same count in both sequences. Example: - [0, 1, 1] and [1, 0, 1] compare equal. - [0, 0, 1] and [0, 1] compare unequal. """ try: expected = sorted(expected_seq) actual = sorted(actual_seq) except TypeError: # Unsortable items (example: set(), complex(), ...) expected = list(expected_seq) actual = list(actual_seq) missing, unexpected = unorderable_list_difference( expected, actual, ignore_duplicate=False ) else: return self.assertSequenceEqual(expected, actual, msg=msg) errors = [] if missing: errors.append('Expected, but missing:\n %s' % safe_repr(missing)) if unexpected: errors.append('Unexpected, but present:\n %s' % safe_repr(unexpected)) if errors: standardMsg = '\n'.join(errors) self.fail(self._formatMessage(msg, standardMsg)) def assertMultiLineEqual(self, first, second, msg=None): """Assert that two multi-line strings are equal.""" self.assert_(isinstance(first, basestring), ( 'First argument is not a string')) self.assert_(isinstance(second, basestring), ( 'Second argument is not a string')) if first != second: standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True)) diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True), second.splitlines(True))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertLess(self, a, b, msg=None): """Just like self.assertTrue(a < b), but with a nicer default message.""" if not a < b: standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b)) self.fail(self._formatMessage(msg, standardMsg)) def assertLessEqual(self, a, b, msg=None): """Just like self.assertTrue(a <= b), but with a nicer default message.""" if not a <= b: standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)) self.fail(self._formatMessage(msg, standardMsg)) def assertGreater(self, a, b, msg=None): """Just like self.assertTrue(a > b), but with a nicer default message.""" if not a > b: standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b)) self.fail(self._formatMessage(msg, standardMsg)) def assertGreaterEqual(self, a, b, msg=None): """Just like self.assertTrue(a >= b), but with a nicer default message.""" if not a >= b: standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)) self.fail(self._formatMessage(msg, standardMsg)) def assertIsNone(self, obj, msg=None): """Same as self.assertTrue(obj is None), with a nicer default message.""" if obj is not None: standardMsg = '%s is not None' % (safe_repr(obj),) self.fail(self._formatMessage(msg, standardMsg)) def assertIsNotNone(self, obj, msg=None): """Included for symmetry with assertIsNone.""" if obj is None: standardMsg = 'unexpectedly None' self.fail(self._formatMessage(msg, standardMsg)) def assertIsInstance(self, obj, cls, msg=None): """Same as self.assertTrue(isinstance(obj, cls)), with a nicer default message.""" if not isinstance(obj, cls): standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls) self.fail(self._formatMessage(msg, standardMsg)) def assertNotIsInstance(self, obj, cls, msg=None): """Included for symmetry with assertIsInstance.""" if isinstance(obj, cls): standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls) self.fail(self._formatMessage(msg, standardMsg)) def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Asserts that the message in a raised exception matches a regexp. Args: expected_exception: Exception class expected to be raised. expected_regexp: Regexp (re pattern object or string) expected to be found in error message. callable_obj: Function to be called. args: Extra args. kwargs: Extra kwargs. """ if callable_obj is None: return _AssertRaisesContext(expected_exception, self, expected_regexp) try: callable_obj(*args, **kwargs) except expected_exception, exc_value: if isinstance(expected_regexp, basestring): expected_regexp = re.compile(expected_regexp) if not expected_regexp.search(str(exc_value)): raise self.failureException('"%s" does not match "%s"' % (expected_regexp.pattern, str(exc_value))) else: if hasattr(expected_exception, '__name__'): excName = expected_exception.__name__ else: excName = str(expected_exception) raise self.failureException, "%s not raised" % excName def assertRegexpMatches(self, text, expected_regexp, msg=None): """Fail the test unless the text matches the regular expression.""" if isinstance(expected_regexp, basestring): expected_regexp = re.compile(expected_regexp) if not expected_regexp.search(text): msg = msg or "Regexp didn't match" msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text) raise self.failureException(msg) def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None): """Fail the test if the text matches the regular expression.""" if isinstance(unexpected_regexp, basestring): unexpected_regexp = re.compile(unexpected_regexp) match = unexpected_regexp.search(text) if match: msg = msg or "Regexp matched" msg = '%s: %r matches %r in %r' % (msg, text[match.start():match.end()], unexpected_regexp.pattern, text) raise self.failureException(msg) class FunctionTestCase(TestCase): """A test case that wraps a test function. This is useful for slipping pre-existing test functions into the unittest framework. Optionally, set-up and tidy-up functions can be supplied. As with TestCase, the tidy-up ('tearDown') function will always be called if the set-up ('setUp') function ran successfully. """ def __init__(self, testFunc, setUp=None, tearDown=None, description=None): super(FunctionTestCase, self).__init__() self._setUpFunc = setUp self._tearDownFunc = tearDown self._testFunc = testFunc self._description = description def setUp(self): if self._setUpFunc is not None: self._setUpFunc() def tearDown(self): if self._tearDownFunc is not None: self._tearDownFunc() def runTest(self): self._testFunc() def id(self): return self._testFunc.__name__ def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._setUpFunc == other._setUpFunc and \ self._tearDownFunc == other._tearDownFunc and \ self._testFunc == other._testFunc and \ self._description == other._description def __ne__(self, other): return not self == other def __hash__(self): return hash((type(self), self._setUpFunc, self._tearDownFunc, self._testFunc, self._description)) def __str__(self): return "%s (%s)" % (strclass(self.__class__), self._testFunc.__name__) def __repr__(self): return "<%s testFunc=%s>" % (strclass(self.__class__), self._testFunc) def shortDescription(self): if self._description is not None: return self._description doc = self._testFunc.__doc__ return doc and doc.split("\n")[0].strip() or None
cthrax/python-rtsp
refs/heads/master
capture-sdp.py
1
from optparse import OptionParser from twisted.internet import reactor from rtsp import RTSPClient, RTSPClientFactory from twisted.python import failure, log import signal import sys # http://blogmag.net/blog/read/38/Print_human_readable_file_size def sizeof_fmt(num): for x in ['bytes','KB','MB','GB','TB']: if num < 1024.0: return "%3.1f%s" % (num, x) num /= 1024.0 def success(result): print "success" reactor.stop() def error(failure): print("Failure: %s" % failure.getErrorMessage()) reactor.stop() def progress(factory): print('Downloaded %s' % sizeof_fmt(factory.data_received)) reactor.callLater(1, progress, factory) class SigHandler(): factory = None def setFactory(self, factory): self.factory = factory def sighandler(self, signum, frame): if (signum == signal.SIGINT): print("Received sigint, terminating stream.") self.factory.client.sendNextMessage() reactor.stop() if __name__ == '__main__': parser = OptionParser() parser.add_option('-u', '', dest='url', help='url to download', metavar='URL') parser.add_option('-f', '', dest='file', help='file to save to', metavar='FILENAME') options, args = parser.parse_args() if options.url is None: print('You must enter a url to download\n') parser.print_help() exit() if not options.file: options.file = re.search('[^/]*$', options.url).group(0) if not options.file or len(options.file) < 1: print('Invalid file name specified\n') parser.print_help() exit() sighandler = SigHandler() signal.signal(signal.SIGHUP | signal.SIGINT, sighandler.sighandler) log.startLogging(sys.stdout) factory = RTSPClientFactory(options.url, options.file) sighandler.setFactory(factory) factory.bandwidth = 99999999999 factory.deferred.addCallback(success).addErrback(error) reactor.connectTCP(factory.host, factory.port, factory) reactor.callLater(1, progress, factory) reactor.run()
raccoongang/edx-platform
refs/heads/ginkgo-rg
lms/djangoapps/django_comment_client/base/views.py
2
import functools import json import logging import random import time import urlparse from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.core import exceptions from django.http import Http404, HttpResponse, HttpResponseServerError from django.utils.translation import ugettext as _ from django.views.decorators import csrf from django.views.decorators.http import require_GET, require_POST from opaque_keys.edx.keys import CourseKey import django_comment_client.settings as cc_settings import lms.lib.comment_client as cc from courseware.access import has_access from courseware.courses import get_course_by_id, get_course_overview_with_access, get_course_with_access from django_comment_client.permissions import check_permissions_by_view, get_team, has_permission from django_comment_client.utils import ( JsonError, JsonResponse, add_courseware_context, discussion_category_id_access, get_ability, get_annotated_content_info, get_cached_discussion_id_map, get_group_id_for_comments_service, is_comment_too_deep, prepare_content ) from django_comment_common.signals import ( comment_created, comment_deleted, comment_edited, comment_endorsed, comment_voted, thread_created, thread_deleted, thread_edited, thread_voted ) from django_comment_common.utils import ThreadContext from eventtracking import tracker from lms.djangoapps.courseware.exceptions import CourseAccessRedirect from util.file import store_uploaded_file log = logging.getLogger(__name__) TRACKING_MAX_FORUM_BODY = 2000 TRACKING_MAX_FORUM_TITLE = 1000 _EVENT_NAME_TEMPLATE = 'edx.forum.{obj_type}.{action_name}' def track_forum_event(request, event_name, course, obj, data, id_map=None): """ Send out an analytics event when a forum event happens. Works for threads, responses to threads, and comments on those responses. """ user = request.user data['id'] = obj.id commentable_id = data['commentable_id'] team = get_team(commentable_id) if team is not None: data.update(team_id=team.team_id) if id_map is None: id_map = get_cached_discussion_id_map(course, [commentable_id], user) if commentable_id in id_map: data['category_name'] = id_map[commentable_id]["title"] data['category_id'] = commentable_id data['url'] = request.META.get('HTTP_REFERER', '') data['user_forums_roles'] = [ role.name for role in user.roles.filter(course_id=course.id) ] data['user_course_roles'] = [ role.role for role in user.courseaccessrole_set.filter(course_id=course.id) ] tracker.emit(event_name, data) def track_created_event(request, event_name, course, obj, data): """ Send analytics event for a newly created thread, response or comment. """ if len(obj.body) > TRACKING_MAX_FORUM_BODY: data['truncated'] = True else: data['truncated'] = False data['body'] = obj.body[:TRACKING_MAX_FORUM_BODY] track_forum_event(request, event_name, course, obj, data) def add_truncated_title_to_event_data(event_data, full_title): event_data['title_truncated'] = (len(full_title) > TRACKING_MAX_FORUM_TITLE) event_data['title'] = full_title[:TRACKING_MAX_FORUM_TITLE] def track_thread_created_event(request, course, thread, followed): """ Send analytics event for a newly created thread. """ event_name = _EVENT_NAME_TEMPLATE.format(obj_type='thread', action_name='created') event_data = { 'commentable_id': thread.commentable_id, 'group_id': thread.get("group_id"), 'thread_type': thread.thread_type, 'anonymous': thread.anonymous, 'anonymous_to_peers': thread.anonymous_to_peers, 'options': {'followed': followed}, # There is a stated desire for an 'origin' property that will state # whether this thread was created via courseware or the forum. # However, the view does not contain that data, and including it will # likely require changes elsewhere. } add_truncated_title_to_event_data(event_data, thread.title) track_created_event(request, event_name, course, thread, event_data) def track_comment_created_event(request, course, comment, commentable_id, followed): """ Send analytics event for a newly created response or comment. """ obj_type = 'comment' if comment.get("parent_id") else 'response' event_name = _EVENT_NAME_TEMPLATE.format(obj_type=obj_type, action_name='created') event_data = { 'discussion': {'id': comment.thread_id}, 'commentable_id': commentable_id, 'options': {'followed': followed}, } parent_id = comment.get('parent_id') if parent_id: event_data['response'] = {'id': parent_id} track_created_event(request, event_name, course, comment, event_data) def track_voted_event(request, course, obj, vote_value, undo_vote=False): """ Send analytics event for a vote on a thread or response. """ if isinstance(obj, cc.Thread): obj_type = 'thread' else: obj_type = 'response' event_name = _EVENT_NAME_TEMPLATE.format(obj_type=obj_type, action_name='voted') event_data = { 'commentable_id': obj.commentable_id, 'target_username': obj.get('username'), 'undo_vote': undo_vote, 'vote_value': vote_value, } track_forum_event(request, event_name, course, obj, event_data) def permitted(func): """ View decorator to verify the user is authorized to access this endpoint. """ @functools.wraps(func) def wrapper(request, *args, **kwargs): """ Wrapper for the view that only calls the view if the user is authorized. """ def fetch_content(): """ Extract the forum object from the keyword arguments to the view. """ if "thread_id" in kwargs: content = cc.Thread.find(kwargs["thread_id"]).to_dict() elif "comment_id" in kwargs: content = cc.Comment.find(kwargs["comment_id"]).to_dict() elif "commentable_id" in kwargs: content = cc.Commentable.find(kwargs["commentable_id"]).to_dict() else: content = None return content course_key = CourseKey.from_string(kwargs['course_id']) if check_permissions_by_view(request.user, course_key, fetch_content(), request.view_name): return func(request, *args, **kwargs) else: return JsonError("unauthorized", status=401) return wrapper def ajax_content_response(request, course_key, content): """ Standard AJAX response returning the content hierarchy of the current thread. """ user_info = cc.User.from_django_user(request.user).to_dict() annotated_content_info = get_annotated_content_info(course_key, content, request.user, user_info) return JsonResponse({ 'content': prepare_content(content, course_key), 'annotated_content_info': annotated_content_info, }) @require_POST @login_required @permitted def create_thread(request, course_id, commentable_id): """ Given a course and commentble ID, create the thread """ log.debug("Creating new thread in %r, id %r", course_id, commentable_id) course_key = CourseKey.from_string(course_id) course = get_course_with_access(request.user, 'load', course_key) post = request.POST user = request.user if course.allow_anonymous: anonymous = post.get('anonymous', 'false').lower() == 'true' else: anonymous = False if course.allow_anonymous_to_peers: anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true' else: anonymous_to_peers = False if 'title' not in post or not post['title'].strip(): return JsonError(_("Title can't be empty")) if 'body' not in post or not post['body'].strip(): return JsonError(_("Body can't be empty")) params = { 'anonymous': anonymous, 'anonymous_to_peers': anonymous_to_peers, 'commentable_id': commentable_id, 'course_id': course_key.to_deprecated_string(), 'user_id': user.id, 'thread_type': post["thread_type"], 'body': post["body"], 'title': post["title"], } # Check for whether this commentable belongs to a team, and add the right context if get_team(commentable_id) is not None: params['context'] = ThreadContext.STANDALONE else: params['context'] = ThreadContext.COURSE thread = cc.Thread(**params) # Divide the thread if required try: group_id = get_group_id_for_comments_service(request, course_key, commentable_id) except ValueError: return HttpResponseServerError("Invalid group id for commentable") if group_id is not None: thread.group_id = group_id thread.save() thread_created.send(sender=None, user=user, post=thread) # patch for backward compatibility to comments service if 'pinned' not in thread.attributes: thread['pinned'] = False follow = post.get('auto_subscribe', 'false').lower() == 'true' if follow: cc_user = cc.User.from_django_user(user) cc_user.follow(thread) data = thread.to_dict() add_courseware_context([data], course, user) track_thread_created_event(request, course, thread, follow) if request.is_ajax(): return ajax_content_response(request, course_key, data) else: return JsonResponse(prepare_content(data, course_key)) @require_POST @login_required @permitted def update_thread(request, course_id, thread_id): """ Given a course id and thread id, update a existing thread, used for both static and ajax submissions """ if 'title' not in request.POST or not request.POST['title'].strip(): return JsonError(_("Title can't be empty")) if 'body' not in request.POST or not request.POST['body'].strip(): return JsonError(_("Body can't be empty")) course_key = CourseKey.from_string(course_id) thread = cc.Thread.find(thread_id) # Get thread context first in order to be safe from reseting the values of thread object later thread_context = getattr(thread, "context", "course") thread.body = request.POST["body"] thread.title = request.POST["title"] user = request.user # The following checks should avoid issues we've seen during deploys, where end users are hitting an updated server # while their browser still has the old client code. This will avoid erasing present values in those cases. if "thread_type" in request.POST: thread.thread_type = request.POST["thread_type"] if "commentable_id" in request.POST: commentable_id = request.POST["commentable_id"] course = get_course_with_access(user, 'load', course_key) if thread_context == "course" and not discussion_category_id_access(course, user, commentable_id): return JsonError(_("Topic doesn't exist")) else: thread.commentable_id = commentable_id thread.save() thread_edited.send(sender=None, user=user, post=thread) if request.is_ajax(): return ajax_content_response(request, course_key, thread.to_dict()) else: return JsonResponse(prepare_content(thread.to_dict(), course_key)) def _create_comment(request, course_key, thread_id=None, parent_id=None): """ given a course_key, thread_id, and parent_id, create a comment, called from create_comment to do the actual creation """ assert isinstance(course_key, CourseKey) post = request.POST user = request.user if 'body' not in post or not post['body'].strip(): return JsonError(_("Body can't be empty")) course = get_course_with_access(user, 'load', course_key) if course.allow_anonymous: anonymous = post.get('anonymous', 'false').lower() == 'true' else: anonymous = False if course.allow_anonymous_to_peers: anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true' else: anonymous_to_peers = False comment = cc.Comment( anonymous=anonymous, anonymous_to_peers=anonymous_to_peers, user_id=user.id, course_id=course_key.to_deprecated_string(), thread_id=thread_id, parent_id=parent_id, body=post["body"] ) comment.save() comment_created.send(sender=None, user=user, post=comment) followed = post.get('auto_subscribe', 'false').lower() == 'true' if followed: cc_user = cc.User.from_django_user(request.user) cc_user.follow(comment.thread) track_comment_created_event(request, course, comment, comment.thread.commentable_id, followed) if request.is_ajax(): return ajax_content_response(request, course_key, comment.to_dict()) else: return JsonResponse(prepare_content(comment.to_dict(), course.id)) @require_POST @login_required @permitted def create_comment(request, course_id, thread_id): """ given a course_id and thread_id, test for comment depth. if not too deep, call _create_comment to create the actual comment. """ if is_comment_too_deep(parent=None): return JsonError(_("Comment level too deep")) return _create_comment(request, CourseKey.from_string(course_id), thread_id=thread_id) @require_POST @login_required @permitted def delete_thread(request, course_id, thread_id): """ given a course_id and thread_id, delete this thread this is ajax only """ course_key = CourseKey.from_string(course_id) thread = cc.Thread.find(thread_id) thread.delete() thread_deleted.send(sender=None, user=request.user, post=thread) return JsonResponse(prepare_content(thread.to_dict(), course_key)) @require_POST @login_required @permitted def update_comment(request, course_id, comment_id): """ given a course_id and comment_id, update the comment with payload attributes handles static and ajax submissions """ course_key = CourseKey.from_string(course_id) comment = cc.Comment.find(comment_id) if 'body' not in request.POST or not request.POST['body'].strip(): return JsonError(_("Body can't be empty")) comment.body = request.POST["body"] comment.save() comment_edited.send(sender=None, user=request.user, post=comment) if request.is_ajax(): return ajax_content_response(request, course_key, comment.to_dict()) else: return JsonResponse(prepare_content(comment.to_dict(), course_key)) @require_POST @login_required @permitted def endorse_comment(request, course_id, comment_id): """ given a course_id and comment_id, toggle the endorsement of this comment, ajax only """ course_key = CourseKey.from_string(course_id) comment = cc.Comment.find(comment_id) user = request.user comment.endorsed = request.POST.get('endorsed', 'false').lower() == 'true' comment.endorsement_user_id = user.id comment.save() comment_endorsed.send(sender=None, user=user, post=comment) return JsonResponse(prepare_content(comment.to_dict(), course_key)) @require_POST @login_required @permitted def openclose_thread(request, course_id, thread_id): """ given a course_id and thread_id, toggle the status of this thread ajax only """ course_key = CourseKey.from_string(course_id) thread = cc.Thread.find(thread_id) thread.closed = request.POST.get('closed', 'false').lower() == 'true' thread.save() return JsonResponse({ 'content': prepare_content(thread.to_dict(), course_key), 'ability': get_ability(course_key, thread.to_dict(), request.user), }) @require_POST @login_required @permitted def create_sub_comment(request, course_id, comment_id): """ given a course_id and comment_id, create a response to a comment after checking the max depth allowed, if allowed """ if is_comment_too_deep(parent=cc.Comment(comment_id)): return JsonError(_("Comment level too deep")) return _create_comment(request, CourseKey.from_string(course_id), parent_id=comment_id) @require_POST @login_required @permitted def delete_comment(request, course_id, comment_id): """ given a course_id and comment_id delete this comment ajax only """ course_key = CourseKey.from_string(course_id) comment = cc.Comment.find(comment_id) comment.delete() comment_deleted.send(sender=None, user=request.user, post=comment) return JsonResponse(prepare_content(comment.to_dict(), course_key)) def _vote_or_unvote(request, course_id, obj, value='up', undo_vote=False): """ Vote or unvote for a thread or a response. """ course_key = CourseKey.from_string(course_id) course = get_course_with_access(request.user, 'load', course_key) user = cc.User.from_django_user(request.user) if undo_vote: user.unvote(obj) # TODO(smarnach): Determine the value of the vote that is undone. Currently, you can # only cast upvotes in the user interface, so it is assumed that the vote value is 'up'. # (People could theoretically downvote by handcrafting AJAX requests.) else: user.vote(obj, value) track_voted_event(request, course, obj, value, undo_vote) return JsonResponse(prepare_content(obj.to_dict(), course_key)) @require_POST @login_required @permitted def vote_for_comment(request, course_id, comment_id, value): """ Given a course_id and comment_id, vote for this response. AJAX only. """ comment = cc.Comment.find(comment_id) result = _vote_or_unvote(request, course_id, comment, value) comment_voted.send(sender=None, user=request.user, post=comment) return result @require_POST @login_required @permitted def undo_vote_for_comment(request, course_id, comment_id): """ given a course id and comment id, remove vote ajax only """ return _vote_or_unvote(request, course_id, cc.Comment.find(comment_id), undo_vote=True) @require_POST @login_required @permitted def vote_for_thread(request, course_id, thread_id, value): """ given a course id and thread id vote for this thread ajax only """ thread = cc.Thread.find(thread_id) result = _vote_or_unvote(request, course_id, thread, value) thread_voted.send(sender=None, user=request.user, post=thread) return result @require_POST @login_required @permitted def undo_vote_for_thread(request, course_id, thread_id): """ given a course id and thread id, remove users vote for thread ajax only """ return _vote_or_unvote(request, course_id, cc.Thread.find(thread_id), undo_vote=True) @require_POST @login_required @permitted def flag_abuse_for_thread(request, course_id, thread_id): """ given a course_id and thread_id flag this thread for abuse ajax only """ course_key = CourseKey.from_string(course_id) user = cc.User.from_django_user(request.user) thread = cc.Thread.find(thread_id) thread.flagAbuse(user, thread) return JsonResponse(prepare_content(thread.to_dict(), course_key)) @require_POST @login_required @permitted def un_flag_abuse_for_thread(request, course_id, thread_id): """ given a course id and thread id, remove abuse flag for this thread ajax only """ user = cc.User.from_django_user(request.user) course_key = CourseKey.from_string(course_id) course = get_course_by_id(course_key) thread = cc.Thread.find(thread_id) remove_all = bool( has_permission(request.user, 'openclose_thread', course_key) or has_access(request.user, 'staff', course) ) thread.unFlagAbuse(user, thread, remove_all) return JsonResponse(prepare_content(thread.to_dict(), course_key)) @require_POST @login_required @permitted def flag_abuse_for_comment(request, course_id, comment_id): """ given a course and comment id, flag comment for abuse ajax only """ course_key = CourseKey.from_string(course_id) user = cc.User.from_django_user(request.user) comment = cc.Comment.find(comment_id) comment.flagAbuse(user, comment) return JsonResponse(prepare_content(comment.to_dict(), course_key)) @require_POST @login_required @permitted def un_flag_abuse_for_comment(request, course_id, comment_id): """ given a course_id and comment id, unflag comment for abuse ajax only """ user = cc.User.from_django_user(request.user) course_key = CourseKey.from_string(course_id) course = get_course_by_id(course_key) remove_all = bool( has_permission(request.user, 'openclose_thread', course_key) or has_access(request.user, 'staff', course) ) comment = cc.Comment.find(comment_id) comment.unFlagAbuse(user, comment, remove_all) return JsonResponse(prepare_content(comment.to_dict(), course_key)) @require_POST @login_required @permitted def pin_thread(request, course_id, thread_id): """ given a course id and thread id, pin this thread ajax only """ course_key = CourseKey.from_string(course_id) user = cc.User.from_django_user(request.user) thread = cc.Thread.find(thread_id) thread.pin(user, thread_id) return JsonResponse(prepare_content(thread.to_dict(), course_key)) @require_POST @login_required @permitted def un_pin_thread(request, course_id, thread_id): """ given a course id and thread id, remove pin from this thread ajax only """ course_key = CourseKey.from_string(course_id) user = cc.User.from_django_user(request.user) thread = cc.Thread.find(thread_id) thread.un_pin(user, thread_id) return JsonResponse(prepare_content(thread.to_dict(), course_key)) @require_POST @login_required @permitted def follow_thread(request, course_id, thread_id): user = cc.User.from_django_user(request.user) thread = cc.Thread.find(thread_id) user.follow(thread) return JsonResponse({}) @require_POST @login_required @permitted def follow_commentable(request, course_id, commentable_id): """ given a course_id and commentable id, follow this commentable ajax only """ user = cc.User.from_django_user(request.user) commentable = cc.Commentable.find(commentable_id) user.follow(commentable) return JsonResponse({}) @require_POST @login_required @permitted def unfollow_thread(request, course_id, thread_id): """ given a course id and thread id, stop following this thread ajax only """ user = cc.User.from_django_user(request.user) thread = cc.Thread.find(thread_id) user.unfollow(thread) return JsonResponse({}) @require_POST @login_required @permitted def unfollow_commentable(request, course_id, commentable_id): """ given a course id and commentable id stop following commentable ajax only """ user = cc.User.from_django_user(request.user) commentable = cc.Commentable.find(commentable_id) user.unfollow(commentable) return JsonResponse({}) @require_POST @login_required @csrf.csrf_exempt def upload(request, course_id): # ajax upload file to a question or answer """view that handles file upload via Ajax """ # check upload permission error = '' new_file_name = '' try: # TODO authorization #may raise exceptions.PermissionDenied #if request.user.is_anonymous(): # msg = _('Sorry, anonymous users cannot upload files') # raise exceptions.PermissionDenied(msg) #request.user.assert_can_upload_file() base_file_name = str(time.time()).replace('.', str(random.randint(0, 100000))) file_storage, new_file_name = store_uploaded_file( request, 'file-upload', cc_settings.ALLOWED_UPLOAD_FILE_TYPES, base_file_name, max_file_size=cc_settings.MAX_UPLOAD_FILE_SIZE ) except exceptions.PermissionDenied, err: error = unicode(err) except Exception, err: print err logging.critical(unicode(err)) error = _('Error uploading file. Please contact the site administrator. Thank you.') if error == '': result = _('Good') file_url = file_storage.url(new_file_name) parsed_url = urlparse.urlparse(file_url) file_url = urlparse.urlunparse( urlparse.ParseResult( parsed_url.scheme, parsed_url.netloc, parsed_url.path, '', '', '' ) ) else: result = '' file_url = '' # Using content-type of text/plain here instead of JSON because # IE doesn't know how to handle the JSON response and prompts the # user to save the JSON as a file instead of passing it to the callback. return HttpResponse(json.dumps({ 'result': { 'msg': result, 'error': error, 'file_url': file_url, } }), content_type="text/plain") @require_GET @login_required def users(request, course_id): """ Given a `username` query parameter, find matches for users in the forum for this course. Only exact matches are supported here, so the length of the result set will either be 0 or 1. """ course_key = CourseKey.from_string(course_id) try: get_course_overview_with_access(request.user, 'load', course_key, check_if_enrolled=True) except Http404: # course didn't exist, or requesting user does not have access to it. return JsonError(status=404) except CourseAccessRedirect: # user does not have access to the course. return JsonError(status=404) try: username = request.GET['username'] except KeyError: # 400 is default status for JsonError return JsonError(["username parameter is required"]) user_objs = [] try: matched_user = User.objects.get(username=username) cc_user = cc.User.from_django_user(matched_user) cc_user.course_id = course_key cc_user.retrieve(complete=False) if (cc_user['threads_count'] + cc_user['comments_count']) > 0: user_objs.append({ 'id': matched_user.id, 'username': matched_user.username, }) except User.DoesNotExist: pass return JsonResponse({"users": user_objs})
Garrett-R/scikit-learn
refs/heads/master
examples/bicluster/plot_spectral_coclustering.py
276
""" ============================================== A demo of the Spectral Co-Clustering algorithm ============================================== This example demonstrates how to generate a dataset and bicluster it using the the Spectral Co-Clustering algorithm. The dataset is generated using the ``make_biclusters`` function, which creates a matrix of small values and implants bicluster with large values. The rows and columns are then shuffled and passed to the Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to make biclusters contiguous shows how accurately the algorithm found the biclusters. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_biclusters from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.metrics import consensus_score data, rows, columns = make_biclusters( shape=(300, 300), n_clusters=5, noise=5, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralCoclustering(n_clusters=5, random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.3f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.show()
dan1/horizon-x509
refs/heads/master
openstack_dashboard/dashboards/identity/users/urls.py
57
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import patterns from django.conf.urls import url from openstack_dashboard.dashboards.identity.users import views VIEWS_MOD = 'openstack_dashboard.dashboards.identity.users.views' urlpatterns = patterns( VIEWS_MOD, url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<user_id>[^/]+)/update/$', views.UpdateView.as_view(), name='update'), url(r'^create/$', views.CreateView.as_view(), name='create'), url(r'^(?P<user_id>[^/]+)/detail/$', views.DetailView.as_view(), name='detail'), url(r'^(?P<user_id>[^/]+)/change_password/$', views.ChangePasswordView.as_view(), name='change_password'))
lmprice/ansible
refs/heads/devel
lib/ansible/plugins/callback/actionable.py
8
# (c) 2015, Andrew Gaffney <andrew@agaffney.org> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: actionable type: stdout short_description: shows only items that need attention description: - Use this callback when you dont care about OK nor Skipped. - This callback suppresses any non Failed or Changed status. version_added: "2.1" extends_documentation_fragment: - default_callback requirements: - set as stdout callback in configuration ''' from ansible import constants as C from ansible.plugins.callback.default import CallbackModule as CallbackModule_default class CallbackModule(CallbackModule_default): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'actionable' def __init__(self): self.super_ref = super(CallbackModule, self) self.super_ref.__init__() self.last_task = None self.last_task_banner = None self.shown_title = False def v2_playbook_on_handler_task_start(self, task): self.super_ref.v2_playbook_on_handler_task_start(task) self.shown_title = True def v2_playbook_on_task_start(self, task, is_conditional): self.last_task = task self.last_task_banner = self._get_task_banner(task) self.shown_title = False def display_task_banner(self): if not self.shown_title: self.super_ref.v2_playbook_on_task_start(self.last_task, None) self.shown_title = True def _print_task_banner(self, task): self._display.banner(self.last_task_banner) self._print_task_path(self.last_task) self._last_task_banner = self.last_task._uuid def _print_task_path(self, task): if self._display.verbosity >= 2: path = task.get_path() if path: self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG) def _get_task_banner(self, task): # args can be specified as no_log in several places: in the task or in # the argument spec. We can check whether the task is no_log but the # argument spec can't be because that is only run on the target # machine and we haven't run it thereyet at this time. # # So we give people a config option to affect display of the args so # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). args = '' if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: args = u', '.join(u'%s=%s' % a for a in task.args.items()) args = u' %s' % args return u"TASK [%s%s]" % (task.get_name().strip(), args) def v2_runner_on_failed(self, result, ignore_errors=False): self.display_task_banner() self.super_ref.v2_runner_on_failed(result, ignore_errors) def v2_runner_on_ok(self, result): if result._result.get('changed', False): self.display_task_banner() self.super_ref.v2_runner_on_ok(result) def v2_runner_on_unreachable(self, result): self.display_task_banner() self.super_ref.v2_runner_on_unreachable(result) def v2_runner_on_skipped(self, result): pass def v2_playbook_on_include(self, included_file): pass def v2_runner_item_on_ok(self, result): if result._result.get('changed', False): self.display_task_banner() self.super_ref.v2_runner_item_on_ok(result) def v2_runner_item_on_skipped(self, result): pass def v2_runner_item_on_failed(self, result): self.display_task_banner() self.super_ref.v2_runner_item_on_failed(result)
matuu/simpleai
refs/heads/master
samples/search/hello_world.py
4
# coding=utf-8 from simpleai.search import SearchProblem, astar GOAL = 'HELLO WORLD' class HelloProblem(SearchProblem): def actions(self, state): if len(state) < len(GOAL): return list(' ABCDEFGHIJKLMNOPQRSTUVWXYZ') else: return [] def result(self, state, action): return state + action def is_goal(self, state): return state == GOAL def heuristic(self, state): # how far are we from the goal? wrong = sum([1 if state[i] != GOAL[i] else 0 for i in range(len(state))]) missing = len(GOAL) - len(state) return wrong + missing problem = HelloProblem(initial_state='') result = astar(problem) print result.state print result.path()
MichaelNedzelsky/intellij-community
refs/heads/master
python/testData/inspections/AddSelfFunction_after.py
83
class A: def get_a(self): pass def foo(self): self.get_a()
pcncadcache/cachesystem
refs/heads/master
extern/redis-binlog/extern/rocksdb/coverage/parse_gcov_output.py
101
import optparse import re import sys from optparse import OptionParser # the gcov report follows certain pattern. Each file will have two lines # of report, from which we can extract the file name, total lines and coverage # percentage. def parse_gcov_report(gcov_input): per_file_coverage = {} total_coverage = None for line in sys.stdin: line = line.strip() # --First line of the coverage report (with file name in it)? match_obj = re.match("^File '(.*)'$", line) if match_obj: # fetch the file name from the first line of the report. current_file = match_obj.group(1) continue # -- Second line of the file report (with coverage percentage) match_obj = re.match("^Lines executed:(.*)% of (.*)", line) if match_obj: coverage = float(match_obj.group(1)) lines = int(match_obj.group(2)) if current_file is not None: per_file_coverage[current_file] = (coverage, lines) current_file = None else: # If current_file is not set, we reach the last line of report, # which contains the summarized coverage percentage. total_coverage = (coverage, lines) continue # If the line's pattern doesn't fall into the above categories. We # can simply ignore them since they're either empty line or doesn't # find executable lines of the given file. current_file = None return per_file_coverage, total_coverage def get_option_parser(): usage = "Parse the gcov output and generate more human-readable code " +\ "coverage report." parser = OptionParser(usage) parser.add_option( "--interested-files", "-i", dest="filenames", help="Comma separated files names. if specified, we will display " + "the coverage report only for interested source files. " + "Otherwise we will display the coverage report for all " + "source files." ) return parser def display_file_coverage(per_file_coverage, total_coverage): # To print out auto-adjustable column, we need to know the longest # length of file names. max_file_name_length = max( len(fname) for fname in per_file_coverage.keys() ) # -- Print header # size of separator is determined by 3 column sizes: # file name, coverage percentage and lines. header_template = \ "%" + str(max_file_name_length) + "s\t%s\t%s" separator = "-" * (max_file_name_length + 10 + 20) print header_template % ("Filename", "Coverage", "Lines") print separator # -- Print body # template for printing coverage report for each file. record_template = "%" + str(max_file_name_length) + "s\t%5.2f%%\t%10d" for fname, coverage_info in per_file_coverage.items(): coverage, lines = coverage_info print record_template % (fname, coverage, lines) # -- Print footer if total_coverage: print separator print record_template % ("Total", total_coverage[0], total_coverage[1]) def report_coverage(): parser = get_option_parser() (options, args) = parser.parse_args() interested_files = set() if options.filenames is not None: interested_files = set(f.strip() for f in options.filenames.split(',')) # To make things simple, right now we only read gcov report from the input per_file_coverage, total_coverage = parse_gcov_report(sys.stdin) # Check if we need to display coverage info for interested files. if len(interested_files): per_file_coverage = dict( (fname, per_file_coverage[fname]) for fname in interested_files if fname in per_file_coverage ) # If we only interested in several files, it makes no sense to report # the total_coverage total_coverage = None if not len(per_file_coverage): print >> sys.stderr, "Cannot find coverage info for the given files." return display_file_coverage(per_file_coverage, total_coverage) if __name__ == "__main__": report_coverage()
sxend/FrameworkBenchmarks
refs/heads/master
frameworks/Python/responder/responder_conf.py
20
import multiprocessing import os _is_travis = os.environ.get('TRAVIS') == 'true' workers = multiprocessing.cpu_count() if _is_travis: workers = 2 bind = "0.0.0.0:8080" keepalive = 120 errorlog = '-' pidfile = '/tmp/responder.pid' loglevel = 'error'
andmos/ansible
refs/heads/devel
lib/ansible/module_utils/network/netvisor/pn_nvos.py
18
# Copyright: (c) 2018, Pluribus Networks # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) # from __future__ import absolute_import, division, print_function __metaclass__ = type import shlex def pn_cli(module, switch=None, username=None, password=None, switch_local=None): """ Method to generate the cli portion to launch the Netvisor cli. :param module: The Ansible module to fetch username and password. :return: The cli string for further processing. """ cli = '/usr/bin/cli --quiet -e --no-login-prompt ' if username and password: cli += '--user "%s":"%s" ' % (username, password) if switch: cli += ' switch ' + switch if switch_local: cli += ' switch-local ' return cli def booleanArgs(arg, trueString, falseString): if arg is True: return " %s " % trueString elif arg is False: return " %s " % falseString else: return "" def run_cli(module, cli, state_map): """ This method executes the cli command on the target node(s) and returns the output. The module then exits based on the output. :param cli: the complete cli string to be executed on the target node(s). :param state_map: Provides state of the command. :param module: The Ansible module to fetch command """ state = module.params['state'] command = state_map[state] cmd = shlex.split(cli) result, out, err = module.run_command(cmd) remove_cmd = '/usr/bin/cli --quiet -e --no-login-prompt' results = dict( command=' '.join(cmd).replace(remove_cmd, ''), msg="%s operation completed" % command, changed=True ) # Response in JSON format if result != 0: module.exit_json( command=' '.join(cmd).replace(remove_cmd, ''), stderr=err.strip(), msg="%s operation failed" % command, changed=False ) if out: results['stdout'] = out.strip() module.exit_json(**results)
phobson/bokeh
refs/heads/master
bokeh/server/task.py
17
''' ''' from __future__ import absolute_import # TODO (havocp) this class may be unused class ServerTask(object): ''' ''' pass
Yelp/paasta
refs/heads/master
paasta_tools/firewall_logging.py
1
import argparse import logging import os import signal import socket import socketserver import sys import syslogmp from paasta_tools.firewall import services_running_here from paasta_tools.utils import _log from paasta_tools.utils import configure_log from paasta_tools.utils import load_system_paasta_config DEFAULT_NUM_WORKERS = 5 log = logging.getLogger(__name__) class SyslogUDPHandler(socketserver.BaseRequestHandler): def setup(self): configure_log() self.cluster = load_system_paasta_config().get_cluster() def handle(self): data, socket = self.request syslog_to_paasta_log(data, self.cluster) def syslog_to_paasta_log(data, cluster): iptables_log = parse_syslog(data) if iptables_log is None: return service, instance = lookup_service_instance_by_ip(iptables_log["SRC"]) if service is None or instance is None: return # prepend hostname log_line = iptables_log["hostname"] + ": " + iptables_log["message"] _log( service=service, component="security", level="debug", cluster=cluster, instance=instance, line=log_line, ) def parse_syslog(data): parsed_data = syslogmp.parse(data) try: full_message = parsed_data.message.decode() except UnicodeDecodeError: return None if not full_message.startswith("kernel: ["): # Not a kernel message return None close_bracket = full_message.find("]") if close_bracket == -1: return None iptables_message = full_message[close_bracket + 1 :].strip() parts = iptables_message.split(" ") # parts[0] is the log-prefix # parts[1..] is either KEY=VALUE or just KEY if not parts[1].startswith("IN="): # not an iptables message return None fields = {k: v for k, _, v in (field.partition("=") for field in parts[1:])} fields["hostname"] = parsed_data.hostname fields["message"] = iptables_message return fields def lookup_service_instance_by_ip(ip_lookup): for service, instance, mac, ip in services_running_here(): if ip == ip_lookup: return (service, instance) log.info(f"Unable to find container for ip {ip_lookup}") return (None, None) def parse_args(argv=None): parser = argparse.ArgumentParser( description="Adapts iptables syslog messages into scribe" ) parser.add_argument( "-v", "--verbose", action="store_true", dest="verbose", default=False ) parser.add_argument( "-l", "--listen-host", help="Default %(default)s", default="127.0.0.1" ) parser.add_argument( "-p", "--listen-port", type=int, help="Default %(default)s", default=1516 ) parser.add_argument( "-w", "--num-workers", type=int, help="Default %(default)s", default=DEFAULT_NUM_WORKERS, ) args = parser.parse_args(argv) return args def setup_logging(verbose): if verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) class MultiUDPServer(socketserver.UDPServer): # UDPServer with SO_REUSEPORT enabled so that incoming packets are # load-balanced across listeners def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # UDPServer is old-style class so can't use super socketserver.UDPServer.server_bind(self) def run_server(listen_host, listen_port): server = MultiUDPServer((listen_host, listen_port), SyslogUDPHandler) server.serve_forever() def main(argv=None): args = parse_args(argv) setup_logging(args.verbose) assert args.num_workers > 0 # start n-1 separate processes, then run_server() on this one num_forks = args.num_workers - 1 for x in range(num_forks): if os.fork() == 0: run_server(args.listen_host, args.listen_port) # propagate SIGTERM to all my children then exit signal.signal( signal.SIGTERM, lambda signum, _: os.killpg(os.getpid(), signum) or sys.exit(1) ) run_server(args.listen_host, args.listen_port)
srisankethu/babel
refs/heads/master
babel/support.py
10
# -*- coding: utf-8 -*- """ babel.support ~~~~~~~~~~~~~ Several classes and functions that help with integrating and using Babel in applications. .. note: the code in this module is not used by Babel itself :copyright: (c) 2013 by the Babel Team. :license: BSD, see LICENSE for more details. """ import gettext import locale from babel.core import Locale from babel.dates import format_date, format_datetime, format_time, \ format_timedelta from babel.numbers import format_number, format_decimal, format_currency, \ format_percent, format_scientific from babel._compat import PY2, text_type, text_to_native class Format(object): """Wrapper class providing the various date and number formatting functions bound to a specific locale and time-zone. >>> from babel.util import UTC >>> from datetime import date >>> fmt = Format('en_US', UTC) >>> fmt.date(date(2007, 4, 1)) u'Apr 1, 2007' >>> fmt.decimal(1.2345) u'1.234' """ def __init__(self, locale, tzinfo=None): """Initialize the formatter. :param locale: the locale identifier or `Locale` instance :param tzinfo: the time-zone info (a `tzinfo` instance or `None`) """ self.locale = Locale.parse(locale) self.tzinfo = tzinfo def date(self, date=None, format='medium'): """Return a date formatted according to the given pattern. >>> from datetime import date >>> fmt = Format('en_US') >>> fmt.date(date(2007, 4, 1)) u'Apr 1, 2007' """ return format_date(date, format, locale=self.locale) def datetime(self, datetime=None, format='medium'): """Return a date and time formatted according to the given pattern. >>> from datetime import datetime >>> from pytz import timezone >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern')) >>> fmt.datetime(datetime(2007, 4, 1, 15, 30)) u'Apr 1, 2007, 11:30:00 AM' """ return format_datetime(datetime, format, tzinfo=self.tzinfo, locale=self.locale) def time(self, time=None, format='medium'): """Return a time formatted according to the given pattern. >>> from datetime import datetime >>> from pytz import timezone >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern')) >>> fmt.time(datetime(2007, 4, 1, 15, 30)) u'11:30:00 AM' """ return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale) def timedelta(self, delta, granularity='second', threshold=.85, format='medium', add_direction=False): """Return a time delta according to the rules of the given locale. >>> from datetime import timedelta >>> fmt = Format('en_US') >>> fmt.timedelta(timedelta(weeks=11)) u'3 months' """ return format_timedelta(delta, granularity=granularity, threshold=threshold, format=format, add_direction=add_direction, locale=self.locale) def number(self, number): """Return an integer number formatted for the locale. >>> fmt = Format('en_US') >>> fmt.number(1099) u'1,099' """ return format_number(number, locale=self.locale) def decimal(self, number, format=None): """Return a decimal number formatted for the locale. >>> fmt = Format('en_US') >>> fmt.decimal(1.2345) u'1.234' """ return format_decimal(number, format, locale=self.locale) def currency(self, number, currency): """Return a number in the given currency formatted for the locale. """ return format_currency(number, currency, locale=self.locale) def percent(self, number, format=None): """Return a number formatted as percentage for the locale. >>> fmt = Format('en_US') >>> fmt.percent(0.34) u'34%' """ return format_percent(number, format, locale=self.locale) def scientific(self, number): """Return a number formatted using scientific notation for the locale. """ return format_scientific(number, locale=self.locale) class LazyProxy(object): """Class for proxy objects that delegate to a specified function to evaluate the actual object. >>> def greeting(name='world'): ... return 'Hello, %s!' % name >>> lazy_greeting = LazyProxy(greeting, name='Joe') >>> print(lazy_greeting) Hello, Joe! >>> u' ' + lazy_greeting u' Hello, Joe!' >>> u'(%s)' % lazy_greeting u'(Hello, Joe!)' This can be used, for example, to implement lazy translation functions that delay the actual translation until the string is actually used. The rationale for such behavior is that the locale of the user may not always be available. In web applications, you only know the locale when processing a request. The proxy implementation attempts to be as complete as possible, so that the lazy objects should mostly work as expected, for example for sorting: >>> greetings = [ ... LazyProxy(greeting, 'world'), ... LazyProxy(greeting, 'Joe'), ... LazyProxy(greeting, 'universe'), ... ] >>> greetings.sort() >>> for greeting in greetings: ... print(greeting) Hello, Joe! Hello, universe! Hello, world! """ __slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled'] def __init__(self, func, *args, **kwargs): is_cache_enabled = kwargs.pop('enable_cache', True) # Avoid triggering our own __setattr__ implementation object.__setattr__(self, '_func', func) object.__setattr__(self, '_args', args) object.__setattr__(self, '_kwargs', kwargs) object.__setattr__(self, '_is_cache_enabled', is_cache_enabled) object.__setattr__(self, '_value', None) @property def value(self): if self._value is None: value = self._func(*self._args, **self._kwargs) if not self._is_cache_enabled: return value object.__setattr__(self, '_value', value) return self._value def __contains__(self, key): return key in self.value def __nonzero__(self): return bool(self.value) def __dir__(self): return dir(self.value) def __iter__(self): return iter(self.value) def __len__(self): return len(self.value) def __str__(self): return str(self.value) def __unicode__(self): return unicode(self.value) def __add__(self, other): return self.value + other def __radd__(self, other): return other + self.value def __mod__(self, other): return self.value % other def __rmod__(self, other): return other % self.value def __mul__(self, other): return self.value * other def __rmul__(self, other): return other * self.value def __call__(self, *args, **kwargs): return self.value(*args, **kwargs) def __lt__(self, other): return self.value < other def __le__(self, other): return self.value <= other def __eq__(self, other): return self.value == other def __ne__(self, other): return self.value != other def __gt__(self, other): return self.value > other def __ge__(self, other): return self.value >= other def __delattr__(self, name): delattr(self.value, name) def __getattr__(self, name): return getattr(self.value, name) def __setattr__(self, name, value): setattr(self.value, name, value) def __delitem__(self, key): del self.value[key] def __getitem__(self, key): return self.value[key] def __setitem__(self, key, value): self.value[key] = value def __copy__(self): return LazyProxy( self._func, enable_cache=self._is_cache_enabled, *self._args, **self._kwargs ) def __deepcopy__(self, memo): from copy import deepcopy return LazyProxy( deepcopy(self._func, memo), enable_cache=deepcopy(self._is_cache_enabled, memo), *deepcopy(self._args, memo), **deepcopy(self._kwargs, memo) ) class NullTranslations(gettext.NullTranslations, object): DEFAULT_DOMAIN = None def __init__(self, fp=None): """Initialize a simple translations class which is not backed by a real catalog. Behaves similar to gettext.NullTranslations but also offers Babel's on *gettext methods (e.g. 'dgettext()'). :param fp: a file-like object (ignored in this class) """ # These attributes are set by gettext.NullTranslations when a catalog # is parsed (fp != None). Ensure that they are always present because # some *gettext methods (including '.gettext()') rely on the attributes. self._catalog = {} self.plural = lambda n: int(n != 1) super(NullTranslations, self).__init__(fp=fp) self.files = list(filter(None, [getattr(fp, 'name', None)])) self.domain = self.DEFAULT_DOMAIN self._domains = {} def dgettext(self, domain, message): """Like ``gettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).gettext(message) def ldgettext(self, domain, message): """Like ``lgettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).lgettext(message) def udgettext(self, domain, message): """Like ``ugettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).ugettext(message) # backward compatibility with 0.9 dugettext = udgettext def dngettext(self, domain, singular, plural, num): """Like ``ngettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).ngettext(singular, plural, num) def ldngettext(self, domain, singular, plural, num): """Like ``lngettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).lngettext(singular, plural, num) def udngettext(self, domain, singular, plural, num): """Like ``ungettext()`` but look the message up in the specified domain. """ return self._domains.get(domain, self).ungettext(singular, plural, num) # backward compatibility with 0.9 dungettext = udngettext # Most of the downwards code, until it get's included in stdlib, from: # http://bugs.python.org/file10036/gettext-pgettext.patch # # The encoding of a msgctxt and a msgid in a .mo file is # msgctxt + "\x04" + msgid (gettext version >= 0.15) CONTEXT_ENCODING = '%s\x04%s' def pgettext(self, context, message): """Look up the `context` and `message` id in the catalog and return the corresponding message string, as an 8-bit string encoded with the catalog's charset encoding, if known. If there is no entry in the catalog for the `message` id and `context` , and a fallback has been set, the look up is forwarded to the fallback's ``pgettext()`` method. Otherwise, the `message` id is returned. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_msg_id, missing) if tmsg is missing: if self._fallback: return self._fallback.pgettext(context, message) return message # Encode the Unicode tmsg back to an 8-bit string, if possible if self._output_charset: return text_to_native(tmsg, self._output_charset) elif self._charset: return text_to_native(tmsg, self._charset) return tmsg def lpgettext(self, context, message): """Equivalent to ``pgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_msg_id, missing) if tmsg is missing: if self._fallback: return self._fallback.lpgettext(context, message) return message if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) def npgettext(self, context, singular, plural, num): """Do a plural-forms lookup of a message id. `singular` is used as the message id for purposes of lookup in the catalog, while `num` is used to determine which plural form to use. The returned message string is an 8-bit string encoded with the catalog's charset encoding, if known. If the message id for `context` is not found in the catalog, and a fallback is specified, the request is forwarded to the fallback's ``npgettext()`` method. Otherwise, when ``num`` is 1 ``singular`` is returned, and ``plural`` is returned in all other cases. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_msg_id, self.plural(num))] if self._output_charset: return text_to_native(tmsg, self._output_charset) elif self._charset: return text_to_native(tmsg, self._charset) return tmsg except KeyError: if self._fallback: return self._fallback.npgettext(context, singular, plural, num) if num == 1: return singular else: return plural def lnpgettext(self, context, singular, plural, num): """Equivalent to ``npgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_msg_id, self.plural(num))] if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) except KeyError: if self._fallback: return self._fallback.lnpgettext(context, singular, plural, num) if num == 1: return singular else: return plural def upgettext(self, context, message): """Look up the `context` and `message` id in the catalog and return the corresponding message string, as a Unicode string. If there is no entry in the catalog for the `message` id and `context`, and a fallback has been set, the look up is forwarded to the fallback's ``upgettext()`` method. Otherwise, the `message` id is returned. """ ctxt_message_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_message_id, missing) if tmsg is missing: if self._fallback: return self._fallback.upgettext(context, message) return text_type(message) return tmsg def unpgettext(self, context, singular, plural, num): """Do a plural-forms lookup of a message id. `singular` is used as the message id for purposes of lookup in the catalog, while `num` is used to determine which plural form to use. The returned message string is a Unicode string. If the message id for `context` is not found in the catalog, and a fallback is specified, the request is forwarded to the fallback's ``unpgettext()`` method. Otherwise, when `num` is 1 `singular` is returned, and `plural` is returned in all other cases. """ ctxt_message_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_message_id, self.plural(num))] except KeyError: if self._fallback: return self._fallback.unpgettext(context, singular, plural, num) if num == 1: tmsg = text_type(singular) else: tmsg = text_type(plural) return tmsg def dpgettext(self, domain, context, message): """Like `pgettext()`, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).pgettext(context, message) def udpgettext(self, domain, context, message): """Like `upgettext()`, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).upgettext(context, message) # backward compatibility with 0.9 dupgettext = udpgettext def ldpgettext(self, domain, context, message): """Equivalent to ``dpgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ return self._domains.get(domain, self).lpgettext(context, message) def dnpgettext(self, domain, context, singular, plural, num): """Like ``npgettext``, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).npgettext(context, singular, plural, num) def udnpgettext(self, domain, context, singular, plural, num): """Like ``unpgettext``, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).unpgettext(context, singular, plural, num) # backward compatibility with 0.9 dunpgettext = udnpgettext def ldnpgettext(self, domain, context, singular, plural, num): """Equivalent to ``dnpgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ return self._domains.get(domain, self).lnpgettext(context, singular, plural, num) if not PY2: ugettext = gettext.NullTranslations.gettext ungettext = gettext.NullTranslations.ngettext class Translations(NullTranslations, gettext.GNUTranslations): """An extended translation catalog class.""" DEFAULT_DOMAIN = 'messages' def __init__(self, fp=None, domain=None): """Initialize the translations catalog. :param fp: the file-like object the translation should be read from :param domain: the message domain (default: 'messages') """ super(Translations, self).__init__(fp=fp) self.domain = domain or self.DEFAULT_DOMAIN if not PY2: ugettext = gettext.GNUTranslations.gettext ungettext = gettext.GNUTranslations.ngettext @classmethod def load(cls, dirname=None, locales=None, domain=None): """Load translations from the given directory. :param dirname: the directory containing the ``MO`` files :param locales: the list of locales in order of preference (items in this list can be either `Locale` objects or locale strings) :param domain: the message domain (default: 'messages') """ if locales is not None: if not isinstance(locales, (list, tuple)): locales = [locales] locales = [str(locale) for locale in locales] if not domain: domain = cls.DEFAULT_DOMAIN filename = gettext.find(domain, dirname, locales) if not filename: return NullTranslations() with open(filename, 'rb') as fp: return cls(fp=fp, domain=domain) def __repr__(self): return '<%s: "%s">' % (type(self).__name__, self._info.get('project-id-version')) def add(self, translations, merge=True): """Add the given translations to the catalog. If the domain of the translations is different than that of the current catalog, they are added as a catalog that is only accessible by the various ``d*gettext`` functions. :param translations: the `Translations` instance with the messages to add :param merge: whether translations for message domains that have already been added should be merged with the existing translations """ domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN) if merge and domain == self.domain: return self.merge(translations) existing = self._domains.get(domain) if merge and existing is not None: existing.merge(translations) else: translations.add_fallback(self) self._domains[domain] = translations return self def merge(self, translations): """Merge the given translations into the catalog. Message translations in the specified catalog override any messages with the same identifier in the existing catalog. :param translations: the `Translations` instance with the messages to merge """ if isinstance(translations, gettext.GNUTranslations): self._catalog.update(translations._catalog) if isinstance(translations, Translations): self.files.extend(translations.files) return self
nkgilley/home-assistant
refs/heads/dev
homeassistant/components/hisense_aehw4a1/climate.py
21
"""Pyaehw4a1 platform to control of Hisense AEH-W4A1 Climate Devices.""" import logging from pyaehw4a1.aehw4a1 import AehW4a1 import pyaehw4a1.exceptions from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( FAN_AUTO, FAN_HIGH, FAN_LOW, FAN_MEDIUM, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_BOOST, PRESET_ECO, PRESET_NONE, PRESET_SLEEP, SUPPORT_FAN_MODE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE, SWING_BOTH, SWING_HORIZONTAL, SWING_OFF, SWING_VERTICAL, ) from homeassistant.const import ( ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from . import CONF_IP_ADDRESS, DOMAIN SUPPORT_FLAGS = ( SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE | SUPPORT_PRESET_MODE ) MIN_TEMP_C = 16 MAX_TEMP_C = 32 MIN_TEMP_F = 61 MAX_TEMP_F = 90 HVAC_MODES = [ HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, ] FAN_MODES = [ "mute", FAN_LOW, FAN_MEDIUM, FAN_HIGH, FAN_AUTO, ] SWING_MODES = [ SWING_OFF, SWING_VERTICAL, SWING_HORIZONTAL, SWING_BOTH, ] PRESET_MODES = [ PRESET_NONE, PRESET_ECO, PRESET_BOOST, PRESET_SLEEP, "sleep_2", "sleep_3", "sleep_4", ] AC_TO_HA_STATE = { "0001": HVAC_MODE_HEAT, "0010": HVAC_MODE_COOL, "0011": HVAC_MODE_DRY, "0000": HVAC_MODE_FAN_ONLY, } HA_STATE_TO_AC = { HVAC_MODE_OFF: "off", HVAC_MODE_HEAT: "mode_heat", HVAC_MODE_COOL: "mode_cool", HVAC_MODE_DRY: "mode_dry", HVAC_MODE_FAN_ONLY: "mode_fan", } AC_TO_HA_FAN_MODES = { "00000000": FAN_AUTO, # fan value for heat mode "00000001": FAN_AUTO, "00000010": "mute", "00000100": FAN_LOW, "00000110": FAN_MEDIUM, "00001000": FAN_HIGH, } HA_FAN_MODES_TO_AC = { "mute": "speed_mute", FAN_LOW: "speed_low", FAN_MEDIUM: "speed_med", FAN_HIGH: "speed_max", FAN_AUTO: "speed_auto", } AC_TO_HA_SWING = { "00": SWING_OFF, "10": SWING_VERTICAL, "01": SWING_HORIZONTAL, "11": SWING_BOTH, } _LOGGER = logging.getLogger(__name__) def _build_entity(device): _LOGGER.debug("Found device at %s", device) return ClimateAehW4a1(device) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the AEH-W4A1 climate platform.""" # Priority 1: manual config if hass.data[DOMAIN].get(CONF_IP_ADDRESS): devices = hass.data[DOMAIN][CONF_IP_ADDRESS] else: # Priority 2: scanned interfaces devices = await AehW4a1().discovery() entities = [_build_entity(device) for device in devices] async_add_entities(entities, True) class ClimateAehW4a1(ClimateEntity): """Representation of a Hisense AEH-W4A1 module for climate device.""" def __init__(self, device): """Initialize the climate device.""" self._unique_id = device self._device = AehW4a1(device) self._hvac_modes = HVAC_MODES self._fan_modes = FAN_MODES self._swing_modes = SWING_MODES self._preset_modes = PRESET_MODES self._available = None self._on = None self._temperature_unit = None self._current_temperature = None self._target_temperature = None self._hvac_mode = None self._fan_mode = None self._swing_mode = None self._preset_mode = None self._previous_state = None async def async_update(self): """Pull state from AEH-W4A1.""" try: status = await self._device.command("status_102_0") except pyaehw4a1.exceptions.ConnectionError as library_error: _LOGGER.warning( "Unexpected error of %s: %s", self._unique_id, library_error ) self._available = False return self._available = True self._on = status["run_status"] if status["temperature_Fahrenheit"] == "0": self._temperature_unit = TEMP_CELSIUS else: self._temperature_unit = TEMP_FAHRENHEIT self._current_temperature = int(status["indoor_temperature_status"], 2) if self._on == "1": device_mode = status["mode_status"] self._hvac_mode = AC_TO_HA_STATE[device_mode] fan_mode = status["wind_status"] self._fan_mode = AC_TO_HA_FAN_MODES[fan_mode] swing_mode = f'{status["up_down"]}{status["left_right"]}' self._swing_mode = AC_TO_HA_SWING[swing_mode] if self._hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT): self._target_temperature = int(status["indoor_temperature_setting"], 2) else: self._target_temperature = None if status["efficient"] == "1": self._preset_mode = PRESET_BOOST elif status["low_electricity"] == "1": self._preset_mode = PRESET_ECO elif status["sleep_status"] == "0000001": self._preset_mode = PRESET_SLEEP elif status["sleep_status"] == "0000010": self._preset_mode = "sleep_2" elif status["sleep_status"] == "0000011": self._preset_mode = "sleep_3" elif status["sleep_status"] == "0000100": self._preset_mode = "sleep_4" else: self._preset_mode = PRESET_NONE else: self._hvac_mode = HVAC_MODE_OFF self._fan_mode = None self._swing_mode = None self._target_temperature = None self._preset_mode = None @property def available(self): """Return True if entity is available.""" return self._available @property def name(self): """Return the name of the climate device.""" return self._unique_id @property def temperature_unit(self): """Return the unit of measurement.""" return self._temperature_unit @property def current_temperature(self): """Return the current temperature.""" return self._current_temperature @property def target_temperature(self): """Return the temperature we are trying to reach.""" return self._target_temperature @property def hvac_mode(self): """Return hvac target hvac state.""" return self._hvac_mode @property def hvac_modes(self): """Return the list of available operation modes.""" return self._hvac_modes @property def fan_mode(self): """Return the fan setting.""" return self._fan_mode @property def fan_modes(self): """Return the list of available fan modes.""" return self._fan_modes @property def preset_mode(self): """Return the preset mode if on.""" return self._preset_mode @property def preset_modes(self): """Return the list of available preset modes.""" return self._preset_modes @property def swing_mode(self): """Return swing operation.""" return self._swing_mode @property def swing_modes(self): """Return the list of available fan modes.""" return self._swing_modes @property def min_temp(self): """Return the minimum temperature.""" if self._temperature_unit == TEMP_CELSIUS: return MIN_TEMP_C return MIN_TEMP_F @property def max_temp(self): """Return the maximum temperature.""" if self._temperature_unit == TEMP_CELSIUS: return MAX_TEMP_C return MAX_TEMP_F @property def precision(self): """Return the precision of the system.""" return PRECISION_WHOLE @property def target_temperature_step(self): """Return the supported step of target temperature.""" return 1 @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS async def async_set_temperature(self, **kwargs): """Set new target temperatures.""" if self._on != "1": _LOGGER.warning( "AC at %s is off, could not set temperature", self._unique_id ) return temp = kwargs.get(ATTR_TEMPERATURE) if temp is not None: _LOGGER.debug("Setting temp of %s to %s", self._unique_id, temp) if self._preset_mode != PRESET_NONE: await self.async_set_preset_mode(PRESET_NONE) if self._temperature_unit == TEMP_CELSIUS: await self._device.command(f"temp_{int(temp)}_C") else: await self._device.command(f"temp_{int(temp)}_F") async def async_set_fan_mode(self, fan_mode): """Set new fan mode.""" if self._on != "1": _LOGGER.warning("AC at %s is off, could not set fan mode", self._unique_id) return if self._hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_FAN_ONLY) and ( self._hvac_mode != HVAC_MODE_FAN_ONLY or fan_mode != FAN_AUTO ): _LOGGER.debug("Setting fan mode of %s to %s", self._unique_id, fan_mode) await self._device.command(HA_FAN_MODES_TO_AC[fan_mode]) async def async_set_swing_mode(self, swing_mode): """Set new target swing operation.""" if self._on != "1": _LOGGER.warning( "AC at %s is off, could not set swing mode", self._unique_id ) return _LOGGER.debug("Setting swing mode of %s to %s", self._unique_id, swing_mode) swing_act = self._swing_mode if swing_mode == SWING_OFF and swing_act != SWING_OFF: if swing_act in (SWING_HORIZONTAL, SWING_BOTH): await self._device.command("hor_dir") if swing_act in (SWING_VERTICAL, SWING_BOTH): await self._device.command("vert_dir") if swing_mode == SWING_BOTH and swing_act != SWING_BOTH: if swing_act in (SWING_OFF, SWING_HORIZONTAL): await self._device.command("vert_swing") if swing_act in (SWING_OFF, SWING_VERTICAL): await self._device.command("hor_swing") if swing_mode == SWING_VERTICAL and swing_act != SWING_VERTICAL: if swing_act in (SWING_OFF, SWING_HORIZONTAL): await self._device.command("vert_swing") if swing_act in (SWING_BOTH, SWING_HORIZONTAL): await self._device.command("hor_dir") if swing_mode == SWING_HORIZONTAL and swing_act != SWING_HORIZONTAL: if swing_act in (SWING_BOTH, SWING_VERTICAL): await self._device.command("vert_dir") if swing_act in (SWING_OFF, SWING_VERTICAL): await self._device.command("hor_swing") async def async_set_preset_mode(self, preset_mode): """Set new preset mode.""" if self._on != "1": if preset_mode == PRESET_NONE: return await self.async_turn_on() _LOGGER.debug("Setting preset mode of %s to %s", self._unique_id, preset_mode) if preset_mode == PRESET_ECO: await self._device.command("energysave_on") self._previous_state = preset_mode elif preset_mode == PRESET_BOOST: await self._device.command("turbo_on") self._previous_state = preset_mode elif preset_mode == PRESET_SLEEP: await self._device.command("sleep_1") self._previous_state = self._hvac_mode elif preset_mode == "sleep_2": await self._device.command("sleep_2") self._previous_state = self._hvac_mode elif preset_mode == "sleep_3": await self._device.command("sleep_3") self._previous_state = self._hvac_mode elif preset_mode == "sleep_4": await self._device.command("sleep_4") self._previous_state = self._hvac_mode elif self._previous_state is not None: if self._previous_state == PRESET_ECO: await self._device.command("energysave_off") elif self._previous_state == PRESET_BOOST: await self._device.command("turbo_off") elif self._previous_state in HA_STATE_TO_AC: await self._device.command(HA_STATE_TO_AC[self._previous_state]) self._previous_state = None async def async_set_hvac_mode(self, hvac_mode): """Set new operation mode.""" _LOGGER.debug("Setting operation mode of %s to %s", self._unique_id, hvac_mode) if hvac_mode == HVAC_MODE_OFF: await self.async_turn_off() else: await self._device.command(HA_STATE_TO_AC[hvac_mode]) if self._on != "1": await self.async_turn_on() async def async_turn_on(self): """Turn on.""" _LOGGER.debug("Turning %s on", self._unique_id) await self._device.command("on") async def async_turn_off(self): """Turn off.""" _LOGGER.debug("Turning %s off", self._unique_id) await self._device.command("off")
tear44/Cinnamon
refs/heads/master
docs/search-providers-examples/apt@cinnamon.org/search_provider.py
21
# -*- coding=utf-8 -*- import subprocess import sys import gettext import json gettext.install("cinnamon", "/usr/share/locale") if __name__ == "__main__": results = [] packages = subprocess.check_output(["apt-cache", "search"] + sys.argv[1].split(" ")).splitlines()[:10] for p in packages: i = p.index(" - ") name = p[:i] description = p[i+3:] results.append({'id': name, 'label': _("Install package : ") + name, 'description': description}) print json.dumps(results)
greyfenrir/taurus
refs/heads/master
scripts/installer/bzt_win.py
1
#!python3.6 import os, sys sys.path.insert(0, 'pkgs') os.environ["PATH"] += os.pathsep + os.path.join(os.path.dirname(sys.executable), 'Scripts') def main(): sys.exit(os.system("cmd /k bzt --help"))
fitnr/twitter_markov
refs/heads/master
twitter_markov/__main__.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014-2016 Neil Freeman contact@fakeisthenewreal.org # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals, print_function import os import sys from signal import signal, SIGPIPE, SIG_DFL import argparse import twitter_bot_utils as tbu from . import TwitterMarkov from . import checking from . import __version__ as version TWEETER_DESC = 'Post markov chain ("ebooks") tweets to Twitter' LEARNER_DESC = 'Turn a twitter archive into a twitter_markov-ready text file' def main(): parser = argparse.ArgumentParser( 'twittermarkov', description='Tweet with a markov bot, or teach it from a twitter archive.') tbu.args.add_default_args(parser, version, ()) subparsers = parser.add_subparsers() tweeter = subparsers.add_parser('tweet', description=TWEETER_DESC, usage='%(prog)s [options]') tbu.args.add_default_args(tweeter, include=('user', 'config', 'dry-run', 'verbose', 'quiet')) tweeter.add_argument('-r', '--reply', action='store_const', const='reply', dest='action', help='tweet responses to recent mentions') tweeter.add_argument('--corpus', dest='corpus', metavar='corpus', type=str, help='text file, one sentence per line') tweeter.add_argument('--max-len', type=int, default=140, help='maximum output length. default: 140') tweeter.add_argument('--state-size', type=int, help='model state size. default: 2') tweeter.add_argument('--no-learn', dest='learn', action='store_false', help='skip learning (by default, recent tweets from the "parent" account are added to corpus)') tweeter.set_defaults(subparser='tweet', func=tweet_func, action='tweet') learner = subparsers.add_parser('corpus', description=LEARNER_DESC, usage="%(prog)s [options] archive corpus") learner.add_argument('-o', type=str, dest='output', metavar='corpus', help='output text file (defaults to stdout)', default='/dev/stdout') learner.add_argument('--no-retweets', action='store_true', help='skip retweets') learner.add_argument('--no-replies', action='store_true', help='filter out replies') learner.add_argument('--no-mentions', action='store_true', help='filter out mentions') learner.add_argument('--no-urls', action='store_true', help='filter out urls') learner.add_argument('--no-media', action='store_true', help='filter out media') learner.add_argument('--no-hashtags', action='store_true', help='filter out hashtags') learner.add_argument('-q', '--quiet', action='store_true', help='run quietly') learner.add_argument('archive', type=str, metavar='archive', default=os.getcwd(), help='archive csv file (e.g. tweets.csv found in Twitter archive)') learner.set_defaults(subparser='learn', func=learn_func, action='learn') args = parser.parse_args() try: func = args.func except AttributeError: parser.parse_args(['--help']) argdict = vars(args) del argdict['func'] if args.subparser == 'tweet': func(**argdict) elif args.subparser == 'learn': func(**argdict) def tweet_func(action, max_len=None, **kwargs): tm = TwitterMarkov(**kwargs) try: if action == 'tweet': tm.log.debug('tweeting') tm.tweet(max_len=max_len) elif action == 'reply': tm.log.debug('replying') tm.reply_all(max_len=max_len) except RuntimeError: tm.log.error('model was unable to compose a tweet') return def learn_func(**kwargs): if not kwargs['quiet']: print("Reading " + kwargs['archive'], file=sys.stderr) archive = tbu.archive.read_csv(kwargs.get('archive')) gen = checking.generator(archive, **kwargs) tweets = (tweet.replace(u'\n', u' ') + '\n' for tweet in gen) if kwargs['output'] in ('-', '/dev/stdout'): signal(SIGPIPE, SIG_DFL) sys.stdout.writelines(tweets) else: if not kwargs['quiet']: print("Writing " + kwargs['output'], file=sys.stderr) with open(kwargs.get('output'), 'w') as f: f.writelines(tweets) if __name__ == '__main__': main()
imitrichev/cantera
refs/heads/master
interfaces/cython/cantera/mixmaster/ThermoProp.py
4
import sys if sys.version_info[0] == 3: from tkinter import * else: from Tkinter import * from .UnitChooser import UnitVar _tv = ['Temperature','Internal Energy','Enthalpy'] _pv = ['Pressure', 'Density'] def badpair(a,b): if a.name in _tv: if not b.name in _pv: return 1 else: if not b.name in _tv: return 1 class ThermoProp: def __init__(self, master, thermoframe, row, name, value, units, defaultunit=0): self.value = DoubleVar() self.thermoframe = thermoframe self.entry = UnitVar(master,units,defaultunit) self.entry.grid(column=1,row=row,sticky=W) self.entry.v.config(state=DISABLED,bg='lightgray') self.checked=IntVar() self.checked.set(0) self.name = name self.c=Checkbutton(master, text=name, variable=self.checked, onvalue=1, offvalue=0, command=self.check ) self.c.grid(column=0,row=row, sticky=W+N) def check(self): if self == self.thermoframe.last1: self.checked.set(1) return elif self == self.thermoframe.last2: self.checked.set(1) self.thermoframe.last2 = self.thermoframe.last1 self.thermoframe.last1 = self return # elif badpair(self, self.thermoframe.last1): # self.checked.set(0) # return self._check() self.thermoframe.last2.checked.set(0) self.thermoframe.last2._check() self.thermoframe.last2 = self.thermoframe.last1 self.thermoframe.last1 = self def _check(self): if self.isChecked(): self.entry.v.config(state=NORMAL,bg='white') else: self.entry.v.config(state=DISABLED,bg='lightgray') def isChecked(self): return self.checked.get() def set(self, value): self.entry.set(value) def get(self): return self.entry.get()
lin-credible/scikit-learn
refs/heads/master
sklearn/manifold/t_sne.py
106
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de> # License: BSD 3 clause (C) 2014 # This is the standard t-SNE implementation. There are faster modifications of # the algorithm: # * Barnes-Hut-SNE: reduces the complexity of the gradient computation from # N^2 to N log N (http://arxiv.org/abs/1301.3342) # * Fast Optimization for t-SNE: # http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf import numpy as np from scipy import linalg from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from ..base import BaseEstimator from ..utils import check_array from ..utils import check_random_state from ..utils.extmath import _ravel from ..decomposition import RandomizedPCA from ..metrics.pairwise import pairwise_distances from . import _utils MACHINE_EPSILON = np.finfo(np.double).eps def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity conditional_P = _utils._binary_search_perplexity( distances, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P def _kl_divergence(params, P, alpha, n_samples, n_components): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. alpha : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= alpha n **= (alpha + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) # Gradient: dC/dY grad = np.ndarray((n_samples, n_components)) PQd = squareform((P - Q) * n) for i in range(n_samples): np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i]) grad = grad.ravel() c = 2.0 * (alpha + 1.0) / alpha grad *= c return kl_divergence, grad def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30, momentum=0.5, learning_rate=1000.0, min_gain=0.01, min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0, args=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.5) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 1000.0) The learning rate should be extremely high for t-SNE! Values in the range [100.0, 1000.0] are common. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. min_error_diff : float, optional (default: 1e-7) If the absolute difference of two successive cost function values is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = 0 for i in range(it, n_iter): new_error, grad = objective(p, *args) error_diff = np.abs(new_error - error) error = new_error grad_norm = linalg.norm(grad) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if min_grad_norm >= grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break if min_error_diff >= error_diff: if verbose >= 2: print("[t-SNE] Iteration %d: error difference %f. Finished." % (i + 1, error_diff)) break inc = update * grad >= 0.0 dec = np.invert(inc) gains[inc] += 0.05 gains[dec] *= 0.95 np.clip(gains, min_gain, np.inf) grad *= gains update = momentum * update - learning_rate * grad p += update if verbose >= 2 and (i + 1) % 10 == 0: print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f" % (i + 1, error, grad_norm)) return p, error, i def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i (r(i, j) - k)} where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t class TSNE(BaseEstimator): """t-distributed Stochastic Neighbor Embedding. t-SNE [1] is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results. It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high. This will suppress some noise and speed up the computation of pairwise distances between samples. For more tips see Laurens van der Maaten's FAQ [2]. Read more in the :ref:`User Guide <t_sne>`. Parameters ---------- n_components : int, optional (default: 2) Dimension of the embedded space. perplexity : float, optional (default: 30) The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selcting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter. early_exaggeration : float, optional (default: 4.0) Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. learning_rate : float, optional (default: 1000) The learning rate can be a critical parameter. It should be between 100 and 1000. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. If the cost function gets stuck in a bad local minimum increasing the learning rate helps sometimes. n_iter : int, optional (default: 1000) Maximum number of iterations for the optimization. Should be at least 200. metric : string or callable, optional The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. The default is "euclidean" which is interpreted as squared euclidean distance. init : string, optional (default: "random") Initialization of embedding. Possible options are 'random' and 'pca'. PCA initialization cannot be used with precomputed distances and is usually more globally stable than random initialization. verbose : int, optional (default: 0) Verbosity level. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Note that different initializations might result in different local minima of the cost function. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. training_data_ : array-like, shape (n_samples, n_features) Stores the training data. Examples -------- >>> import numpy as np >>> from sklearn.manifold import TSNE >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = TSNE(n_components=2, random_state=0) >>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([[ 887.28..., 238.61...], [ -714.79..., 3243.34...], [ 957.30..., -2505.78...], [-1130.28..., -974.78...]) References ---------- [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html """ def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, metric="euclidean", init="random", verbose=0, random_state=None): if init not in ["pca", "random"]: raise ValueError("'init' must be either 'pca' or 'random'") self.n_components = n_components self.perplexity = perplexity self.early_exaggeration = early_exaggeration self.learning_rate = learning_rate self.n_iter = n_iter self.metric = metric self.init = init self.verbose = verbose self.random_state = random_state def fit(self, X, y=None): """Fit the model using X as training data. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is " "%f" % self.early_exaggeration) if self.n_iter < 200: raise ValueError("n_iter should be at least 200") if self.metric == "precomputed": if self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be used " "with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) # Degrees of freedom of the Student's t-distribution. The suggestion # alpha = n_components - 1 comes from "Learning a Parametric Embedding # by Preserving Local Structure" Laurens van der Maaten, 2009. alpha = max(self.n_components - 1.0, 1) n_samples = X.shape[0] self.training_data_ = X P = _joint_probabilities(distances, self.perplexity, self.verbose) if self.init == 'pca': pca = RandomizedPCA(n_components=self.n_components, random_state=random_state) X_embedded = pca.fit_transform(X) elif self.init == 'random': X_embedded = None else: raise ValueError("Unsupported initialization scheme: %s" % self.init) self.embedding_ = self._tsne(P, alpha, n_samples, random_state, X_embedded=X_embedded) return self def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with three stages: # * early exaggeration with momentum 0.5 # * early exaggeration with momentum 0.8 # * final optimization with momentum 0.8 # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. if X_embedded is None: # Initialize embedding randomly X_embedded = 1e-4 * random_state.randn(n_samples, self.n_components) params = X_embedded.ravel() # Early exaggeration P *= self.early_exaggeration params, error, it = _gradient_descent( _kl_divergence, params, it=0, n_iter=50, momentum=0.5, min_grad_norm=0.0, min_error_diff=0.0, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) params, error, it = _gradient_descent( _kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8, min_grad_norm=0.0, min_error_diff=0.0, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) if self.verbose: print("[t-SNE] Error after %d iterations with early " "exaggeration: %f" % (it + 1, error)) # Final optimization P /= self.early_exaggeration params, error, it = _gradient_descent( _kl_divergence, params, it=it + 1, n_iter=self.n_iter, momentum=0.8, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, error)) X_embedded = params.reshape(n_samples, self.n_components) return X_embedded def fit_transform(self, X, y=None): """Transform X to the embedded space. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ self.fit(X) return self.embedding_
SofiaReis/django-cms
refs/heads/develop
menus/menu_pool.py
23
# -*- coding: utf-8 -*- from logging import getLogger from django.conf import settings from django.contrib import messages from django.contrib.sites.models import Site from django.core.cache import cache from django.core.exceptions import ValidationError from django.core.urlresolvers import NoReverseMatch from django.utils.translation import get_language from django.utils.translation import ugettext_lazy as _ from cms.utils import get_cms_setting from cms.utils.django_load import load from menus.base import Menu from menus.exceptions import NamespaceAlreadyRegistered from menus.models import CacheKey import copy logger = getLogger('menus') def _build_nodes_inner_for_one_menu(nodes, menu_class_name): ''' This is an easier to test "inner loop" building the menu tree structure for one menu (one language, one site) ''' done_nodes = {} # Dict of node.id:Node final_nodes = [] # This is to prevent infinite loops - we need to compare the number of # times we see a specific node to "something", and for the time being, # it's the total number of nodes list_total_length = len(nodes) while nodes: # For when the node has a parent_id but we haven't seen it yet. # We must not append it to the final list in this case! should_add_to_final_list = True node = nodes.pop(0) # Increment the "seen" counter for this specific node. node._counter = getattr(node, '_counter', 0) + 1 # Implicit namespacing by menu.__name__ if not node.namespace: node.namespace = menu_class_name if node.namespace not in done_nodes: # We need to create the namespace dict to avoid KeyErrors done_nodes[node.namespace] = {} # If we have seen the parent_id already... if node.parent_id in done_nodes[node.namespace]: # Implicit parent namespace by menu.__name__ if not node.parent_namespace: node.parent_namespace = menu_class_name parent = done_nodes[node.namespace][node.parent_id] parent.children.append(node) node.parent = parent # If it has a parent_id but we haven't seen it yet... elif node.parent_id: # We check for infinite loops here, by comparing the number of # times we "saw" this node to the number of nodes in the list if node._counter < list_total_length: nodes.append(node) # Never add this node to the final list until it has a real # parent (node.parent) should_add_to_final_list = False if should_add_to_final_list: final_nodes.append(node) # add it to the "seen" list done_nodes[node.namespace][node.id] = node return final_nodes class MenuPool(object): def __init__(self): self.menus = {} self.modifiers = [] self.discovered = False self._expanded = False def discover_menus(self): if self.discovered: return # FIXME: Remove in 3.4 load('menu') load('cms_menus') from menus.modifiers import register register() self.discovered = True self._expanded = False def _expand_menus(self): """ Expands the menu_pool by converting any found CMSAttachMenu entries to one entry for each instance they are attached to. and instantiates menus from the existing menu classes. """ # Ideally, this would have been done in discover_menus(), but the pages # aren't loaded when that executes. This private method is used to # perform the expansion and instantiate the menus classes into menu- # instances just before any menus are built. if self._expanded: return expanded_menus = {} for menu_class_name, menu_cls in self.menus.items(): # In order to be eligible for "expansion", the menu_cls must, in # fact, be an instantiable class. We are lenient about this here, # though, because the CMS has previously allowed attaching # CMSAttachMenu's as objects rather than classes. if isinstance(menu_cls, Menu): # A Menu **instance** was registered, this is non-standard, but # acceptable. However, it cannot be "expanded", so, just add it # as-is to the list of expanded_menus. menu_cls = menu_cls.__class__ if hasattr(menu_cls, "get_instances"): # It quacks like a CMSAttachMenu, expand away! # If a menu exists but has no instances, # it's included in the available menus as is instances = menu_cls.get_instances() if not instances: expanded_menus[menu_class_name] = menu_cls() else: for instance in instances: namespace = "{0}:{1}".format( menu_class_name, instance.pk) menu_inst = menu_cls() menu_inst.instance = instance expanded_menus[namespace] = menu_inst elif hasattr(menu_cls, "get_nodes"): # This is another type of Menu, cannot be expanded, but must be # instantiated, none-the-less. expanded_menus[menu_class_name] = menu_cls() else: raise ValidationError( "Something was registered as a menu, but isn't.") self._expanded = True self.menus = expanded_menus def clear(self, site_id=None, language=None, all=False): ''' This invalidates the cache for a given menu (site_id and language) ''' if all: cache_keys = CacheKey.objects.get_keys() else: cache_keys = CacheKey.objects.get_keys(site_id, language) to_be_deleted = cache_keys.distinct().values_list('key', flat=True) if to_be_deleted: cache.delete_many(to_be_deleted) cache_keys.delete() def register_menu(self, menu_cls): import warnings if menu_cls.__module__.split('.')[-1] == 'menu': warnings.warn('menu.py filename is deprecated, ' 'and it will be removed in version 3.4; ' 'please rename it to cms_menus.py', DeprecationWarning) from menus.base import Menu assert issubclass(menu_cls, Menu) # If we should register a menu after we've already expanded the existing # ones, we need to mark it as such. self._expanded = False if menu_cls.__name__ in self.menus.keys(): raise NamespaceAlreadyRegistered( "[{0}] a menu with this name is already registered".format( menu_cls.__name__)) # Note: menu_cls should still be the menu CLASS at this point. It will # be instantiated in self._expand_menus(). self.menus[menu_cls.__name__] = menu_cls def register_modifier(self, modifier_class): import os import inspect import warnings source_file = os.path.basename(inspect.stack()[1][1]) if source_file == 'menu.py': warnings.warn('menu.py filename is deprecated, ' 'and it will be removed in version 3.4; ' 'please rename it to cms_menus.py', DeprecationWarning) from menus.base import Modifier assert issubclass(modifier_class, Modifier) if modifier_class not in self.modifiers: self.modifiers.append(modifier_class) def _build_nodes(self, request, site_id): """ This is slow. Caching must be used. One menu is built per language and per site. Namespaces: they are ID prefixes to avoid node ID clashes when plugging multiple trees together. - We iterate on the list of nodes. - We store encountered nodes in a dict (with namespaces): done_nodes[<namespace>][<node's id>] = node - When a node has a parent defined, we lookup that parent in done_nodes if it's found: set the node as the node's parent's child (re-read this) else: the node is put at the bottom of the list """ # Before we do anything, make sure that the menus are expanded. self._expand_menus() # Cache key management lang = get_language() prefix = getattr(settings, "CMS_CACHE_PREFIX", "menu_cache_") key = "%smenu_nodes_%s_%s" % (prefix, lang, site_id) if request.user.is_authenticated(): key += "_%s_user" % request.user.pk cached_nodes = cache.get(key, None) if cached_nodes: return cached_nodes final_nodes = [] for menu_class_name in self.menus: menu = self.menus[menu_class_name] try: if isinstance(menu, type): menu = menu() nodes = menu.get_nodes(request) except NoReverseMatch: # Apps might raise NoReverseMatch if an apphook does not yet # exist, skip them instead of crashing nodes = [] toolbar = getattr(request, 'toolbar', None) if toolbar and toolbar.is_staff: messages.error(request, _('Menu %s cannot be loaded. Please, make sure all ' 'its urls exist and can be resolved.') % menu_class_name) logger.error("Menu %s could not be loaded." % menu_class_name, exc_info=True) # nodes is a list of navigation nodes (page tree in cms + others) final_nodes += _build_nodes_inner_for_one_menu( nodes, menu_class_name) cache.set(key, final_nodes, get_cms_setting('CACHE_DURATIONS')['menus']) # We need to have a list of the cache keys for languages and sites that # span several processes - so we follow the Django way and share through # the database. It's still cheaper than recomputing every time! # This way we can selectively invalidate per-site and per-language, # since the cache shared but the keys aren't CacheKey.objects.get_or_create(key=key, language=lang, site=site_id) return final_nodes def apply_modifiers(self, nodes, request, namespace=None, root_id=None, post_cut=False, breadcrumb=False): if not post_cut: nodes = self._mark_selected(request, nodes) for cls in self.modifiers: inst = cls() nodes = inst.modify( request, nodes, namespace, root_id, post_cut, breadcrumb) return nodes def get_nodes(self, request, namespace=None, root_id=None, site_id=None, breadcrumb=False): self.discover_menus() if not site_id: site_id = Site.objects.get_current().pk nodes = self._build_nodes(request, site_id) nodes = copy.deepcopy(nodes) nodes = self.apply_modifiers(nodes, request, namespace, root_id, post_cut=False, breadcrumb=breadcrumb) return nodes def _mark_selected(self, request, nodes): # There /may/ be two nodes that get marked with selected. A published # and a draft version of the node. We'll mark both, later, the unused # one will be removed anyway. sel = [] for node in nodes: node.sibling = False node.ancestor = False node.descendant = False node_abs_url = node.get_absolute_url() if node_abs_url == request.path[:len(node_abs_url)]: if sel: if len(node_abs_url) > len(sel[0].get_absolute_url()): sel = [node] elif len(node_abs_url) == len(sel[0].get_absolute_url()): sel.append(node) else: sel = [node] for node in nodes: node.selected = (node in sel) return nodes def get_menus_by_attribute(self, name, value): """ Returns the list of menus that match the name/value criteria provided. """ # Note that we are limiting the output to only single instances of any # specific menu class. This is to address issue (#4041) which has # cropped-up in 3.0.13/3.0.0. self.discover_menus() self._expand_menus() return sorted(list(set([(menu.__class__.__name__, menu.name) for menu_class_name, menu in self.menus.items() if getattr(menu, name, None) == value]))) def get_nodes_by_attribute(self, nodes, name, value): return [node for node in nodes if node.attr.get(name, None) == value] menu_pool = MenuPool()
unifycore/ryu
refs/heads/master
ryu/lib/of_config/__init__.py
7
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os.path import sys SCHEMA_DIR = os.path.dirname(__file__) _PREFIX = 'of-config-' _SUFFIX = '.xsd' _files = glob.glob(os.path.join(SCHEMA_DIR, 'of-config-*.xsd')) OF_CONFIG_XSD_FILES = dict( (os.path.basename(f)[len(_PREFIX):-len(_SUFFIX)], f) for f in _files) # For convinience # OF_CONFIG_1_0_XSD = os.path.join(SCHEMA_DIR, 'of-config-1.0.xsd') # and so on _this_module = sys.modules[__name__] for (version, xsd_file) in OF_CONFIG_XSD_FILES.items(): setattr(_this_module, 'OF_CONFIG_%s_XSD' % version.replace('.', '_'), xsd_file) OFCONFIG_1_1_CONFIG = 'urn:onf:params:xml:ns:onf:of12:config' OFCONFIG_1_1_YANG = 'urn:onf:of12:config:yang' # LINC specific? OFCONFIG_1_1_1_YANG = 'urn:onf:of111:config:yang' OFCONFIG_YANG_NAMESPACES = { '1.1': OFCONFIG_1_1_YANG, '1.1.1': OFCONFIG_1_1_1_YANG, }
cedadev/cis
refs/heads/master
cis/test/integration/base_integration_test.py
2
from netCDF4 import Dataset import os import unittest from hamcrest import assert_that, greater_than_or_equal_to, less_than_or_equal_to, is_ from functools import reduce class BaseIntegrationTest(unittest.TestCase): OUTPUT_FILENAME = "test_integration_out.nc" def setUp(self): # Set force overwrite in case working files are still present os.environ['CIS_FORCE_OVERWRITE'] = "True" self.clean_output() def tearDown(self): # Pop off the environemnt variable os.environ.pop('CIS_FORCE_OVERWRITE') self.clean_output() def clean_output(self): if hasattr(self, 'ds') and self.ds.isopen(): self.ds.close() if os.path.exists(self.OUTPUT_FILENAME): os.remove(self.OUTPUT_FILENAME) def check_output_contains_variables(self, output_path, var_names): self.ds = Dataset(output_path) for var in var_names: try: var = self.ds.variables[var] except KeyError: raise AssertionError("Variable %s not found in output file" % var) self.ds.close() def check_output_file_variable_attribute_contains_string(self, output_path, variable, attribute, string): self.ds = Dataset(output_path) try: var = self.ds.variables[variable] except KeyError: raise AssertionError("Variable %s not found in output file" % variable) try: att_string = getattr(var, attribute) except AttributeError: raise AssertionError("Attribute %s not found in variable" % attribute) assert string in att_string self.ds.close() def check_latlon_subsetting(self, lat_max, lat_min, lon_max, lon_min, lat_var='latitude', lon_var='longitude'): self.ds = Dataset(self.OUTPUT_FILENAME) lat = self.ds.variables[lat_var][:] lon = self.ds.variables[lon_var][:] assert_that(min(lon), greater_than_or_equal_to(lon_min)) assert_that(max(lon), less_than_or_equal_to(lon_max)) assert_that(min(lat), greater_than_or_equal_to(lat_min)) assert_that(max(lat), less_than_or_equal_to(lat_max)) self.ds.close() def check_alt_subsetting(self, alt_max, alt_min): self.ds = Dataset(self.OUTPUT_FILENAME) alt = self.ds.variables['altitude'][:] assert_that(min(alt), greater_than_or_equal_to(alt_min)) assert_that(max(alt), less_than_or_equal_to(alt_max)) self.ds.close() def check_pres_subsetting(self, pres_max, pres_min, pres_name='air_pressure'): import numpy as np self.ds = Dataset(self.OUTPUT_FILENAME) pres = self.ds.variables[pres_name][:] assert_that(np.min(pres), greater_than_or_equal_to(pres_min)) assert_that(np.max(pres), less_than_or_equal_to(pres_max)) self.ds.close() @staticmethod def _clean_sample_file_name(sample_file): import re return re.sub(r'([\\]):', r':', sample_file) def check_output_col_grid(self, sample_file, sample_var, output_file, output_vars, expected_shape=None): """ Check that the output grid matches the sample grid in shape. :param sample_file: :param sample_var: :param output_file: :param output_vars: :return: """ from cis import read_data from operator import mul if expected_shape is None: sample_shape = read_data(self._clean_sample_file_name(sample_file), sample_var).data.shape else: sample_shape = expected_shape self.ds = Dataset(self._clean_sample_file_name(output_file)) for output_var in output_vars: output_shape = self.ds.variables[output_var].shape # This copes with dims in different orders, length 1 values being taken out etc assert_that(reduce(mul, sample_shape), is_(reduce(mul, output_shape))) self.ds.close() def check_output_vars_are_different(self, output_file, output_vars): """ Check that the output variables are NOT exactly the same :param output_file: :param output_vars: :return: """ from itertools import combinations import numpy as np self.ds = Dataset(self._clean_sample_file_name(output_file)) # Loop over each possible pair of output var for a, b in combinations(output_vars, 2): a_data = self.ds.variables[a] b_data = self.ds.variables[b] assert not np.allclose(a_data, b_data) self.ds.close()
SilentCircle/sentry
refs/heads/master
src/sentry/migrations/0001_initial.py
12
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'GroupedMessage' db.create_table('sentry_groupedmessage', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)), ('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)), ('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)), ('message', self.gf('django.db.models.fields.TextField')()), ('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)), ('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)), ('status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)), ('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)), ('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)), ('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)), )) db.send_create_signal('sentry', ['GroupedMessage']) # Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum'] db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum']) # Adding model 'Message' db.create_table('sentry_message', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)), ('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)), ('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)), ('message', self.gf('django.db.models.fields.TextField')()), ('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)), ('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)), ('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)), ('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), )) db.send_create_signal('sentry', ['Message']) def backwards(self, orm): # Deleting model 'GroupedMessage' db.delete_table('sentry_groupedmessage') # Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum'] db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum']) # Deleting model 'Message' db.delete_table('sentry_message') models = { 'sentry.groupedmessage': { 'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}) }, 'sentry.message': { 'Meta': {'object_name': 'Message'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'view': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) } } complete_apps = ['sentry']
epiphany27/NewsBlur
refs/heads/master
vendor/facebook.py
16
#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Python client library for the Facebook Platform. This client library is designed to support the Graph API and the official Facebook JavaScript SDK, which is the canonical way to implement Facebook authentication. Read more about the Graph API at http://developers.facebook.com/docs/api. You can download the Facebook JavaScript SDK at http://github.com/facebook/connect-js/. If your application is using Google AppEngine's webapp framework, your usage of this module might look like this: user = facebook.get_user_from_cookie(self.request.cookies, key, secret) if user: graph = facebook.GraphAPI(user["access_token"]) profile = graph.get_object("me") friends = graph.get_connections("me", "friends") """ import cgi import time import urllib import urllib2 import hashlib import hmac import base64 import logging # Find a JSON parser try: import simplejson as json except ImportError: try: from django.utils import simplejson as json except ImportError: import json _parse_json = json.loads # Find a query string parser try: from urlparse import parse_qs except ImportError: from cgi import parse_qs class GraphAPI(object): """A client for the Facebook Graph API. See http://developers.facebook.com/docs/api for complete documentation for the API. The Graph API is made up of the objects in Facebook (e.g., people, pages, events, photos) and the connections between them (e.g., friends, photo tags, and event RSVPs). This client provides access to those primitive types in a generic way. For example, given an OAuth access token, this will fetch the profile of the active user and the list of the user's friends: graph = facebook.GraphAPI(access_token) user = graph.get_object("me") friends = graph.get_connections(user["id"], "friends") You can see a list of all of the objects and connections supported by the API at http://developers.facebook.com/docs/reference/api/. You can obtain an access token via OAuth or by using the Facebook JavaScript SDK. See http://developers.facebook.com/docs/authentication/ for details. If you are using the JavaScript SDK, you can use the get_user_from_cookie() method below to get the OAuth access token for the active user from the cookie saved by the SDK. """ def __init__(self, access_token=None): self.access_token = access_token def get_object(self, id, **args): """Fetchs the given object from the graph.""" return self.request(id, args) def get_objects(self, ids, **args): """Fetchs all of the given object from the graph. We return a map from ID to object. If any of the IDs are invalid, we raise an exception. """ args["ids"] = ",".join(ids) return self.request("", args) def get_connections(self, id, connection_name, **args): """Fetchs the connections for given object.""" return self.request(id + "/" + connection_name, args) def put_object(self, parent_object, connection_name, **data): """Writes the given object to the graph, connected to the given parent. For example, graph.put_object("me", "feed", message="Hello, world") writes "Hello, world" to the active user's wall. Likewise, this will comment on a the first post of the active user's feed: feed = graph.get_connections("me", "feed") post = feed["data"][0] graph.put_object(post["id"], "comments", message="First!") See http://developers.facebook.com/docs/api#publishing for all of the supported writeable objects. Most write operations require extended permissions. For example, publishing wall posts requires the "publish_stream" permission. See http://developers.facebook.com/docs/authentication/ for details about extended permissions. """ assert self.access_token, "Write operations require an access token" return self.request(parent_object + "/" + connection_name, post_args=data) def put_wall_post(self, message, attachment={}, profile_id="me"): """Writes a wall post to the given profile's wall. We default to writing to the authenticated user's wall if no profile_id is specified. attachment adds a structured attachment to the status message being posted to the Wall. It should be a dictionary of the form: {"name": "Link name" "link": "http://www.example.com/", "caption": "{*actor*} posted a new review", "description": "This is a longer description of the attachment", "picture": "http://www.example.com/thumbnail.jpg"} """ return self.put_object(profile_id, "feed", message=message, **attachment) def put_comment(self, object_id, message): """Writes the given comment on the given post.""" return self.put_object(object_id, "comments", message=message) def put_like(self, object_id): """Likes the given post.""" return self.put_object(object_id, "likes") def delete_object(self, id): """Deletes the object with the given ID from the graph.""" self.request(id, post_args={"method": "delete"}) def put_photo(self, image, message=None, album_id=None, **kwargs): """Uploads an image using multipart/form-data image=File like object for the image message=Caption for your image album_id=None posts to /me/photos which uses or creates and uses an album for your application. """ object_id = album_id or "me" #it would have been nice to reuse self.request; but multipart is messy in urllib post_args = { 'access_token': self.access_token, 'source': image, 'message': message } post_args.update(kwargs) content_type, body = self._encode_multipart_form(post_args) req = urllib2.Request("https://graph.facebook.com/%s/photos" % object_id, data=body) req.add_header('Content-Type', content_type) try: data = urllib2.urlopen(req).read() #For Python 3 use this: #except urllib2.HTTPError as e: except urllib2.HTTPError, e: data = e.read() # Facebook sends OAuth errors as 400, and urllib2 throws an exception, we want a GraphAPIError try: response = _parse_json(data) # Raise an error if we got one, but don't freak out if Facebook just gave us a Bool value if response and isinstance(response, dict) and response.get("error"): raise GraphAPIError(response["error"].get("code", 1), response["error"]["message"]) except ValueError: response = data return response # based on: http://code.activestate.com/recipes/146306/ def _encode_multipart_form(self, fields): """Fields are a dict of form name-> value For files, value should be a file object. Other file-like objects might work and a fake name will be chosen. Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields.items(): logging.debug("Encoding %s, (%s)%s" % (key, type(value), value)) if not value: continue L.append('--' + BOUNDARY) if hasattr(value, 'read') and callable(value.read): filename = getattr(value,'name','%s.jpg' % key) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) L.append('Content-Type: image/jpeg') value = value.read() logging.debug(type(value)) else: L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') if isinstance(value, unicode): logging.debug("Convert to ascii") value = value.encode('ascii') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def request(self, path, args=None, post_args=None): """Fetches the given path in the Graph API. We translate args to a valid query string. If post_args is given, we send a POST request to the given path with the given arguments. """ if not args: args = {} if self.access_token: if post_args is not None: post_args["access_token"] = self.access_token else: args["access_token"] = self.access_token post_data = None if post_args is None else urllib.urlencode(post_args) try: file = urllib2.urlopen("https://graph.facebook.com/" + path + "?" + urllib.urlencode(args), post_data) except urllib2.HTTPError, e: response = _parse_json( e.read() ) raise GraphAPIError(response["error"]["type"], response["error"]["message"]) try: fileInfo = file.info() if fileInfo.maintype == 'text': response = _parse_json(file.read()) elif fileInfo.maintype == 'image': mimetype = fileInfo['content-type'] response = { "data": file.read(), "mime-type": mimetype, "url": file.url, } else: raise GraphAPIError('Response Error', 'Maintype was not text or image') finally: file.close() if response and isinstance(response, dict) and response.get("error"): raise GraphAPIError(response["error"]["type"], response["error"]["message"]) return response def api_request(self, path, args=None, post_args=None): """Fetches the given path in the Graph API. We translate args to a valid query string. If post_args is given, we send a POST request to the given path with the given arguments. """ if not args: args = {} if self.access_token: if post_args is not None: post_args["access_token"] = self.access_token else: args["access_token"] = self.access_token if self.api_key: if post_args is not None: post_args["api_key"] = self.api_key else: args["api_key"] = self.api_key if post_args is not None: post_args["format"] = "json-strings" else: args["format"] = "json-strings" post_data = None if post_args is None else urllib.urlencode(post_args) file = urllib.urlopen("https://api.facebook.com/method/" + path + "?" + urllib.urlencode(args), post_data) try: response = _parse_json(file.read()) finally: file.close() if response and response.get("error"): raise GraphAPIError(response["error"]["type"], response["error"]["message"]) return response def fql(self, query, args=None, post_args=None): """FQL query. Two reasons to have this method: 1. Graph api does not expose some info fields of a user, e.g. a user's networks/affiliations, we have to fall back to old api. 2. FQL is a strong tool. Example query: "SELECT affiliations FROM user WHERE uid = me()" """ if not args: args = {} if self.access_token: if post_args is not None: post_args["access_token"] = self.access_token else: args["access_token"] = self.access_token post_data = None if post_args is None else urllib.urlencode(post_args) args["query"] = query args["format"]="json" file = urllib2.urlopen("https://api.facebook.com/method/fql.query?" + urllib.urlencode(args), post_data) try: content = file.read() response = _parse_json(content) #Return a list if success, return a dictionary if failed if type(response) is dict and "error_code" in response: raise GraphAPIError(response["error_code"],response["error_msg"]) except Exception, e: raise e finally: file.close() return response class GraphAPIError(Exception): def __init__(self, type, message): Exception.__init__(self, message) self.type = type def get_user_from_cookie(cookies, app_id, app_secret): """Parses the cookie set by the official Facebook JavaScript SDK. cookies should be a dictionary-like object mapping cookie names to cookie values. If the user is logged in via Facebook, we return a dictionary with the keys "uid" and "access_token". The former is the user's Facebook ID, and the latter can be used to make authenticated requests to the Graph API. If the user is not logged in, we return None. Download the official Facebook JavaScript SDK at http://github.com/facebook/connect-js/. Read more about Facebook authentication at http://developers.facebook.com/docs/authentication/. """ cookie = cookies.get("fbsr_" + app_id, "") if not cookie: return None parsed_request = parse_signed_request(cookie, app_secret) result = get_access_token_from_code(parsed_request["code"], "", app_id, app_secret) result["uid"] = parsed_request["user_id"] return result def parse_signed_request(signed_request, app_secret): """ Return dictionary with signed request data. We return a dictionary containing the information in the signed_request. This will include a user_id if the user has authorised your application, as well as any information requested in the scope. If the signed_request is malformed or corrupted, False is returned. """ try: l = signed_request.split('.', 2) encoded_sig = str(l[0]) payload = str(l[1]) sig = base64.urlsafe_b64decode(encoded_sig + "=" * ((4 - len(encoded_sig) % 4) % 4)) data = base64.urlsafe_b64decode(payload + "=" * ((4 - len(payload) % 4) % 4)) except IndexError: return False # raise ValueError('signed_request malformed') except TypeError: return False # raise ValueError('signed_request had corrupted payload') data = _parse_json(data) if data.get('algorithm', '').upper() != 'HMAC-SHA256': return False # raise ValueError('signed_request used unknown algorithm') expected_sig = hmac.new(app_secret, msg=payload, digestmod=hashlib.sha256).digest() if sig != expected_sig: return False # raise ValueError('signed_request had signature mismatch') return data def auth_url(app_id, canvas_url, perms = None): url = "https://www.facebook.com/dialog/oauth?" kvps = {'client_id': app_id, 'redirect_uri': canvas_url} if perms: kvps['scope'] = ",".join(perms) return url + urllib.urlencode(kvps) def get_access_token_from_code(code, redirect_uri, app_id, app_secret): """ Get a user-specific access token from the "code" returned from a Facebook OAuth dialog. Returns a dict containing the access token and its expiration date (if applicable). """ args = { "code": code, "redirect_uri": redirect_uri, "client_id": app_id, "client_secret": app_secret, } # We would use GraphAPI.request() here, except for that the fact that the # response is a key-value pair, and not JSON. response = urllib.urlopen("https://graph.facebook.com/oauth/access_token" + "?" + urllib.urlencode(args)).read() query_str = parse_qs(response) if "access_token" in query_str: result = {"access_token":query_str["access_token"][0]} if "expires" in query_str: result["expires"] = query_str["expires"][0] return result else: response = json.loads(response) raise GraphAPIError(response["error"]["type"], response["error"]["message"]) def get_app_access_token(app_id, app_secret): """ Get the access_token for the app that can be used for insights and creating test users app_id = retrieved from the developer page app_secret = retrieved from the developer page returns the application access_token """ # Get an app access token args = {'grant_type':'client_credentials', 'client_id':app_id, 'client_secret':app_secret} file = urllib2.urlopen("https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args)) try: result = file.read().split("=")[1] finally: file.close() return result
aselle/tensorflow
refs/heads/master
tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
10
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rnn module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib import rnn as rnn_lib from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops as ops_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import state_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util import nest class Plus1RNNCell(rnn_lib.RNNCell): """RNN Cell generating (output, new_state) = (input + 1, state + 1).""" @property def output_size(self): return 5 @property def state_size(self): return 5 def __call__(self, input_, state, scope=None): return (input_ + 1, state + 1) class DummyMultiDimensionalLSTM(rnn_lib.RNNCell): """LSTM Cell generating (output, new_state) = (input + 1, state + 1). The input to this cell may have an arbitrary number of dimensions that follow the preceding 'Time' and 'Batch' dimensions. """ def __init__(self, dims): """Initialize the Multi-dimensional LSTM cell. Args: dims: tuple that contains the dimensions of the output of the cell, without including 'Time' or 'Batch' dimensions. """ if not isinstance(dims, tuple): raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM " "should be a tuple of ints.") self._dims = dims self._output_size = tensor_shape.TensorShape(self._dims) self._state_size = (tensor_shape.TensorShape(self._dims), tensor_shape.TensorShape(self._dims)) @property def output_size(self): return self._output_size @property def state_size(self): return self._state_size def __call__(self, input_, state, scope=None): h, c = state return (input_ + 1, (h + 1, c + 1)) class NestedRNNCell(rnn_lib.RNNCell): """RNN Cell generating (output, new_state) = (input + 1, state + 1). The input, output and state of this cell is a tuple of two tensors. """ @property def output_size(self): return (5, 5) @property def state_size(self): return (6, 6) def __call__(self, input_, state, scope=None): h, c = state x, y = input_ return ((x + 1, y + 1), (h + 1, c + 1)) class TestStateSaver(object): def __init__(self, batch_size, state_size): self._batch_size = batch_size self._state_size = state_size self.saved_state = {} def state(self, name): if isinstance(self._state_size, dict): state_size = self._state_size[name] else: state_size = self._state_size if isinstance(state_size, int): state_size = (state_size,) elif isinstance(state_size, tuple): pass else: raise TypeError("state_size should either be an int or a tuple") return array_ops.zeros((self._batch_size,) + state_size) def save_state(self, name, state): self.saved_state[name] = state return array_ops.identity(state) @property def batch_size(self): return self._batch_size @property def state_size(self): return self._state_size class TestStateSaverWithCounters(TestStateSaver): """Class wrapper around TestStateSaver. A dummy class used for testing of static_state_saving_rnn. It helps test if save_state and state functions got called same number of time when we evaluate output of rnn cell and state or either of them separately. It inherits from the TestStateSaver and adds the counters for calls of functions. """ def __init__(self, batch_size, state_size): super(TestStateSaverWithCounters, self).__init__(batch_size, state_size) self._num_state_calls = variables_lib.Variable(0) self._num_save_state_calls = variables_lib.Variable(0) def state(self, name): with ops_lib.control_dependencies( [state_ops.assign_add(self._num_state_calls, 1)]): return super(TestStateSaverWithCounters, self).state(name) def save_state(self, name, state): with ops_lib.control_dependencies([state_ops.assign_add( self._num_save_state_calls, 1)]): return super(TestStateSaverWithCounters, self).save_state(name, state) @property def num_state_calls(self): return self._num_state_calls @property def num_save_state_calls(self): return self._num_save_state_calls class RNNTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def testInvalidSequenceLengthShape(self): cell = Plus1RNNCell() inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))] with self.assertRaisesRegexp(ValueError, "must be a vector"): rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4) def testRNN(self): cell = Plus1RNNCell() batch_size = 2 input_size = 5 max_length = 8 # unrolled up to this length inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) for out, inp in zip(outputs, inputs): self.assertEqual(out.get_shape(), inp.get_shape()) self.assertEqual(out.dtype, inp.dtype) with self.test_session(use_gpu=True) as sess: input_value = np.random.randn(batch_size, input_size) values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value}) # Outputs for v in values[:-1]: self.assertAllClose(v, input_value + 1.0) # Final state self.assertAllClose(values[-1], max_length * np.ones( (batch_size, input_size), dtype=np.float32)) def testDropout(self): cell = Plus1RNNCell() full_dropout_cell = rnn_cell.DropoutWrapper( cell, input_keep_prob=1e-12, seed=0) (name, dep), = full_dropout_cell._checkpoint_dependencies self.assertIs(dep, cell) self.assertEqual("cell", name) batch_size = 2 input_size = 5 max_length = 8 inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] with variable_scope.variable_scope("share_scope"): outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) with variable_scope.variable_scope("drop_scope"): dropped_outputs, _ = rnn.static_rnn( full_dropout_cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) for out, inp in zip(outputs, inputs): self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list()) self.assertEqual(out.dtype, inp.dtype) with self.test_session(use_gpu=True) as sess: input_value = np.random.randn(batch_size, input_size) values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value}) full_dropout_values = sess.run( dropped_outputs, feed_dict={ inputs[0]: input_value }) for v in values[:-1]: self.assertAllClose(v, input_value + 1.0) for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros) self.assertAllClose(d_v, np.ones_like(input_value)) def testDynamicCalculation(self): cell = Plus1RNNCell() sequence_length = array_ops.placeholder(dtypes.int64) batch_size = 2 input_size = 5 max_length = 8 inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] with variable_scope.variable_scope("drop_scope"): dynamic_outputs, dynamic_state = rnn.static_rnn( cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32) self.assertEqual(len(dynamic_outputs), len(inputs)) with self.test_session(use_gpu=True) as sess: input_value = np.random.randn(batch_size, input_size) dynamic_values = sess.run( dynamic_outputs, feed_dict={ inputs[0]: input_value, sequence_length: [2, 3] }) dynamic_state_value = sess.run( [dynamic_state], feed_dict={ inputs[0]: input_value, sequence_length: [2, 3] }) # outputs are fully calculated for t = 0, 1 for v in dynamic_values[:2]: self.assertAllClose(v, input_value + 1.0) # outputs at t = 2 are zero for entry 0, calculated for entry 1 self.assertAllClose(dynamic_values[2], np.vstack((np.zeros((input_size)), 1.0 + input_value[1, :]))) # outputs at t = 3+ are zero for v in dynamic_values[3:]: self.assertAllEqual(v, np.zeros_like(input_value)) # the final states are: # entry 0: the values from the calculation at t=1 # entry 1: the values from the calculation at t=2 self.assertAllEqual(dynamic_state_value[0], np.vstack((1.0 * (1 + 1) * np.ones((input_size)), 1.0 * (2 + 1) * np.ones((input_size))))) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): with self.test_session(use_gpu=True, graph=ops_lib.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) else: factory(prefix) # check that all the variables names starts # with the proper scope. variables_lib.global_variables_initializer() all_vars = variables_lib.global_variables() prefix = prefix or "rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("RNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testScope(self): def factory(scope): cell = Plus1RNNCell() batch_size = 2 input_size = 5 max_length = 8 # unrolled up to this length inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) class LSTMTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def testDType(self): # Test case for GitHub issue 16228 # Not passing dtype in constructor results in default float32 lstm = rnn_cell.LSTMCell(10) input_tensor = array_ops.ones([10, 50]) lstm.build(input_tensor.get_shape()) self.assertEqual(lstm._bias.dtype, dtypes.float32_ref) # Explicitly pass dtype in constructor for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: lstm = rnn_cell.LSTMCell(10, dtype=dtype) input_tensor = array_ops.ones([10, 50]) lstm.build(input_tensor.get_shape()) self.assertEqual(lstm._bias.dtype, dtype._as_ref) def testNoProjNoSharding(self): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( num_units, initializer=initializer, state_is_tuple=False) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size, num_units]) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) sess.run(outputs, feed_dict={inputs[0]: input_value}) def testCellClipping(self): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, cell_clip=0.0, initializer=initializer, state_is_tuple=False) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size, num_units]) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) values = sess.run(outputs, feed_dict={inputs[0]: input_value}) for value in values: # if cell c is clipped to 0, tanh(c) = 0 => m==0 self.assertAllEqual(value, np.zeros((batch_size, num_units))) def testNoProjNoShardingSimpleStateSaver(self): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) state_saver = TestStateSaver(batch_size, 2 * num_units) cell = rnn_cell.LSTMCell( num_units, use_peepholes=False, initializer=initializer, state_is_tuple=False) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] with variable_scope.variable_scope("share_scope"): outputs, state = rnn.static_state_saving_rnn( cell, inputs, state_saver=state_saver, state_name="save_lstm") self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size, num_units]) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) (last_state_value, saved_state_value) = sess.run( [state, state_saver.saved_state["save_lstm"]], feed_dict={ inputs[0]: input_value }) self.assertAllEqual(last_state_value, saved_state_value) def testNoProjNoShardingTupleStateSaver(self): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) state_saver = TestStateSaver(batch_size, num_units) cell = rnn_cell.LSTMCell( num_units, use_peepholes=False, initializer=initializer, state_is_tuple=True) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] with variable_scope.variable_scope("share_scope"): outputs, state = rnn.static_state_saving_rnn( cell, inputs, state_saver=state_saver, state_name=("c", "m")) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size, num_units]) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) last_and_saved_states = sess.run( state + (state_saver.saved_state["c"], state_saver.saved_state["m"]), feed_dict={ inputs[0]: input_value }) self.assertEqual(4, len(last_and_saved_states)) self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:]) def testNoProjNoShardingNestedTupleStateSaver(self): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) state_saver = TestStateSaver( batch_size, { "c0": num_units, "m0": num_units, "c1": num_units + 1, "m1": num_units + 1, "c2": num_units + 2, "m2": num_units + 2, "c3": num_units + 3, "m3": num_units + 3 }) def _cell(i): return rnn_cell.LSTMCell( num_units + i, use_peepholes=False, initializer=initializer, state_is_tuple=True) # This creates a state tuple which has 4 sub-tuples of length 2 each. cell = rnn_cell.MultiRNNCell( [_cell(i) for i in range(4)], state_is_tuple=True) self.assertEqual(len(cell.state_size), 4) for i in range(4): self.assertEqual(len(cell.state_size[i]), 2) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size)) ] state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3")) with variable_scope.variable_scope("share_scope"): outputs, state = rnn.static_state_saving_rnn( cell, inputs, state_saver=state_saver, state_name=state_names) self.assertEqual(len(outputs), len(inputs)) # Final output comes from _cell(3) which has state size num_units + 3 for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3]) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) last_states = sess.run( list(nest.flatten(state)), feed_dict={ inputs[0]: input_value }) saved_states = sess.run( list(state_saver.saved_state.values()), feed_dict={ inputs[0]: input_value }) self.assertEqual(8, len(last_states)) self.assertEqual(8, len(saved_states)) flat_state_names = nest.flatten(state_names) named_saved_states = dict( zip(state_saver.saved_state.keys(), saved_states)) for i in range(8): self.assertAllEqual(last_states[i], named_saved_states[flat_state_names[i]]) def testProjNoSharding(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=False) outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) sess.run(outputs, feed_dict={inputs[0]: input_value}) def _testStateTupleWithProjAndSequenceLength(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 sequence_length = [4, 6] with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] cell_notuple = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=False) cell_tuple = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=True) with variable_scope.variable_scope("root") as scope: outputs_notuple, state_notuple = rnn.static_rnn( cell_notuple, inputs, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) scope.reuse_variables() # TODO(ebrevdo): For this test, we ensure values are identical and # therefore the weights here are tied. In the future, we may consider # making the state_is_tuple property mutable so we can avoid # having to do this - especially if users ever need to reuse # the parameters from different RNNCell instances. Right now, # this seems an unrealistic use case except for testing. cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access outputs_tuple, state_tuple = rnn.static_rnn( cell_tuple, inputs, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) self.assertEqual(len(outputs_notuple), len(inputs)) self.assertEqual(len(outputs_tuple), len(inputs)) self.assertTrue(isinstance(state_tuple, tuple)) self.assertTrue(isinstance(state_notuple, ops_lib.Tensor)) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) outputs_notuple_v = sess.run( outputs_notuple, feed_dict={ inputs[0]: input_value }) outputs_tuple_v = sess.run( outputs_tuple, feed_dict={ inputs[0]: input_value }) self.assertAllEqual(outputs_notuple_v, outputs_tuple_v) (state_notuple_v,) = sess.run( (state_notuple,), feed_dict={ inputs[0]: input_value }) state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value}) self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v)) def testProjSharding(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 num_proj_shards = 3 num_unit_shards = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, num_unit_shards=num_unit_shards, num_proj_shards=num_proj_shards, initializer=initializer, state_is_tuple=False) outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs), len(inputs)) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) sess.run(outputs, feed_dict={inputs[0]: input_value}) def testDoubleInput(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 num_proj_shards = 3 num_unit_shards = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float64, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, num_unit_shards=num_unit_shards, num_proj_shards=num_proj_shards, initializer=initializer, state_is_tuple=False) outputs, _ = rnn.static_rnn( cell, inputs, initial_state=cell.zero_state(batch_size, dtypes.float64)) self.assertEqual(len(outputs), len(inputs)) variables_lib.global_variables_initializer().run() input_value = np.asarray( np.random.randn(batch_size, input_size), dtype=np.float64) values = sess.run(outputs, feed_dict={inputs[0]: input_value}) self.assertEqual(values[0].dtype, input_value.dtype) def testShardNoShardEquivalentOutput(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 num_proj_shards = 3 num_unit_shards = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] initializer = init_ops.constant_initializer(0.001) cell_noshard = rnn_cell.LSTMCell( num_units, num_proj=num_proj, use_peepholes=True, initializer=initializer, num_unit_shards=num_unit_shards, num_proj_shards=num_proj_shards, state_is_tuple=False) cell_shard = rnn_cell.LSTMCell( num_units, use_peepholes=True, initializer=initializer, num_proj=num_proj, state_is_tuple=False) with variable_scope.variable_scope("noshard_scope"): outputs_noshard, state_noshard = rnn.static_rnn( cell_noshard, inputs, dtype=dtypes.float32) with variable_scope.variable_scope("shard_scope"): outputs_shard, state_shard = rnn.static_rnn( cell_shard, inputs, dtype=dtypes.float32) self.assertEqual(len(outputs_noshard), len(inputs)) self.assertEqual(len(outputs_noshard), len(outputs_shard)) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) feeds = dict((x, input_value) for x in inputs) values_noshard = sess.run(outputs_noshard, feed_dict=feeds) values_shard = sess.run(outputs_shard, feed_dict=feeds) state_values_noshard = sess.run([state_noshard], feed_dict=feeds) state_values_shard = sess.run([state_shard], feed_dict=feeds) self.assertEqual(len(values_noshard), len(values_shard)) self.assertEqual(len(state_values_noshard), len(state_values_shard)) for (v_noshard, v_shard) in zip(values_noshard, values_shard): self.assertAllClose(v_noshard, v_shard, atol=1e-3) for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard): self.assertAllClose(s_noshard, s_shard, atol=1e-3) def testDoubleInputWithDropoutAndDynamicCalculation(self): """Smoke test for using LSTM with doubles, dropout, dynamic calculation.""" num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 num_proj_shards = 3 num_unit_shards = 2 max_length = 8 with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: sequence_length = array_ops.placeholder(dtypes.int64) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float64, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, num_unit_shards=num_unit_shards, num_proj_shards=num_proj_shards, initializer=initializer, state_is_tuple=False) dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0) outputs, state = rnn.static_rnn( dropout_cell, inputs, sequence_length=sequence_length, initial_state=cell.zero_state(batch_size, dtypes.float64)) self.assertEqual(len(outputs), len(inputs)) variables_lib.global_variables_initializer().run(feed_dict={ sequence_length: [2, 3] }) input_value = np.asarray( np.random.randn(batch_size, input_size), dtype=np.float64) values = sess.run( outputs, feed_dict={ inputs[0]: input_value, sequence_length: [2, 3] }) state_value = sess.run( [state], feed_dict={ inputs[0]: input_value, sequence_length: [2, 3] }) self.assertEqual(values[0].dtype, input_value.dtype) self.assertEqual(state_value[0].dtype, input_value.dtype) def testSharingWeightsWithReuse(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed) initializer_d = init_ops.random_uniform_initializer( -1, 1, seed=self._seed + 1) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=False) cell_d = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer_d, state_is_tuple=False) with variable_scope.variable_scope("share_scope"): outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) with variable_scope.variable_scope("share_scope", reuse=True): outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) with variable_scope.variable_scope("diff_scope"): outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) output_values = sess.run( outputs0 + outputs1 + outputs2, feed_dict={ inputs[0]: input_value }) outputs0_values = output_values[:max_length] outputs1_values = output_values[max_length:2 * max_length] outputs2_values = output_values[2 * max_length:] self.assertEqual(len(outputs0_values), len(outputs1_values)) self.assertEqual(len(outputs0_values), len(outputs2_values)) for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values): # Same weights used by both RNNs so outputs should be the same. self.assertAllEqual(o1, o2) # Different weights used so outputs should be different. self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6) def testSharingWeightsWithDifferentNamescope(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=False) with ops_lib.name_scope("scope0"): with variable_scope.variable_scope("share_scope"): outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) with ops_lib.name_scope("scope1"): with variable_scope.variable_scope("share_scope", reuse=True): outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) output_values = sess.run( outputs0 + outputs1, feed_dict={ inputs[0]: input_value }) outputs0_values = output_values[:max_length] outputs1_values = output_values[max_length:] self.assertEqual(len(outputs0_values), len(outputs1_values)) for out0, out1 in zip(outputs0_values, outputs1_values): self.assertAllEqual(out0, out1) def testDynamicRNNAllowsUnknownTimeDimension(self): inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20]) cell = rnn_cell.GRUCell(30) # Smoke test, this should not raise an error rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32) @test_util.run_in_graph_and_eager_modes def testDynamicRNNWithTupleStates(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 sequence_length = [4, 6] in_graph_mode = not context.executing_eagerly() with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) if in_graph_mode: inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] else: inputs = max_length * [ constant_op.constant( np.random.randn(batch_size, input_size).astype(np.float32)) ] inputs_c = array_ops.stack(inputs) cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, num_proj=num_proj, initializer=initializer, state_is_tuple=True) with variable_scope.variable_scope("root") as scope: outputs_static, state_static = rnn.static_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) scope.reuse_variables() outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs_c, dtype=dtypes.float32, time_major=True, sequence_length=sequence_length, scope=scope) self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple)) self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple)) self.assertEqual(state_static[0], state_static.c) self.assertEqual(state_static[1], state_static.h) self.assertEqual(state_dynamic[0], state_dynamic.c) self.assertEqual(state_dynamic[1], state_dynamic.h) if in_graph_mode: variables_lib.global_variables_initializer().run() input_value = np.random.randn(batch_size, input_size) outputs_static = sess.run( outputs_static, feed_dict={ inputs[0]: input_value }) outputs_dynamic = sess.run( outputs_dynamic, feed_dict={ inputs[0]: input_value }) state_static = sess.run( state_static, feed_dict={ inputs[0]: input_value }) state_dynamic = sess.run( state_dynamic, feed_dict={ inputs[0]: input_value }) if in_graph_mode: self.assertAllEqual(outputs_static, outputs_dynamic) else: self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic) self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic)) @test_util.run_in_graph_and_eager_modes def testDynamicRNNWithNestedTupleStates(self): num_units = 3 input_size = 5 batch_size = 2 num_proj = 4 max_length = 8 sequence_length = [4, 6] in_graph_mode = not context.executing_eagerly() with self.test_session(graph=ops_lib.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) if in_graph_mode: inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] else: inputs = max_length * [ constant_op.constant( np.random.randn(batch_size, input_size).astype(np.float32)) ] inputs_c = array_ops.stack(inputs) def _cell(i): return rnn_cell.LSTMCell( num_units + i, use_peepholes=True, num_proj=num_proj + i, initializer=initializer, state_is_tuple=True) # This creates a state tuple which has 4 sub-tuples of length 2 each. cell = rnn_cell.MultiRNNCell( [_cell(i) for i in range(4)], state_is_tuple=True) self.assertEqual(len(cell.state_size), 4) for i in range(4): self.assertEqual(len(cell.state_size[i]), 2) test_zero = cell.zero_state(1, dtypes.float32) self.assertEqual(len(test_zero), 4) for i in range(4): self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0]) self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1]) with variable_scope.variable_scope("root") as scope: outputs_static, state_static = rnn.static_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) scope.reuse_variables() outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs_c, dtype=dtypes.float32, time_major=True, sequence_length=sequence_length, scope=scope) if in_graph_mode: input_value = np.random.randn(batch_size, input_size) variables_lib.global_variables_initializer().run() outputs_static = sess.run( outputs_static, feed_dict={ inputs[0]: input_value }) outputs_dynamic = sess.run( outputs_dynamic, feed_dict={ inputs[0]: input_value }) state_static = sess.run( nest.flatten(state_static), feed_dict={ inputs[0]: input_value }) state_dynamic = sess.run( nest.flatten(state_dynamic), feed_dict={ inputs[0]: input_value }) if in_graph_mode: self.assertAllEqual(outputs_static, outputs_dynamic) else: self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic) state_static = nest.flatten(state_static) state_dynamic = nest.flatten(state_dynamic) self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic)) def _testDynamicEquivalentToStaticRNN(self, use_sequence_length): time_steps = 8 num_units = 3 num_proj = 4 input_size = 5 batch_size = 2 input_values = np.random.randn(time_steps, batch_size, input_size).astype( np.float32) if use_sequence_length: sequence_length = np.random.randint(0, time_steps, size=batch_size) else: sequence_length = None in_graph_mode = not context.executing_eagerly() # TODO(b/68017812): Eager ignores operation seeds, so we need to create a # single cell and reuse it across the static and dynamic RNNs. Remove this # special case once is fixed. if not in_graph_mode: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, initializer=initializer, num_proj=num_proj, state_is_tuple=False) ########### Step 1: Run static graph and generate readouts with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: if in_graph_mode: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) else: concat_inputs = constant_op.constant(input_values) inputs = array_ops.unstack(concat_inputs) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) # TODO(akshayka): Remove special case once b/68017812 is fixed. if in_graph_mode: cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, initializer=initializer, num_proj=num_proj, state_is_tuple=False) with variable_scope.variable_scope("dynamic_scope"): outputs_static, state_static = rnn.static_rnn( cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32) if in_graph_mode: # Generate gradients and run sessions to obtain outputs feeds = {concat_inputs: input_values} # Initialize variables_lib.global_variables_initializer().run(feed_dict=feeds) # Generate gradients of sum of outputs w.r.t. inputs static_gradients = gradients_impl.gradients( outputs_static + [state_static], [concat_inputs]) # Generate gradients of individual outputs w.r.t. inputs static_individual_gradients = nest.flatten([ gradients_impl.gradients(y, [concat_inputs]) for y in [outputs_static[0], outputs_static[-1], state_static] ]) # Generate gradients of individual variables w.r.t. inputs trainable_variables = ops_lib.get_collection( ops_lib.GraphKeys.TRAINABLE_VARIABLES) assert len(trainable_variables) > 1, ( "Count of trainable variables: %d" % len(trainable_variables)) # pylint: disable=bad-builtin static_individual_variable_gradients = nest.flatten([ gradients_impl.gradients(y, trainable_variables) for y in [outputs_static[0], outputs_static[-1], state_static] ]) # Test forward pass values_static = sess.run(outputs_static, feed_dict=feeds) (state_value_static,) = sess.run((state_static,), feed_dict=feeds) # Test gradients to inputs and variables w.r.t. outputs & final state static_grad_values = sess.run(static_gradients, feed_dict=feeds) static_individual_grad_values = sess.run( static_individual_gradients, feed_dict=feeds) static_individual_var_grad_values = sess.run( static_individual_variable_gradients, feed_dict=feeds) ########## Step 2: Run dynamic graph and generate readouts with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: if in_graph_mode: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) else: concat_inputs = constant_op.constant(input_values) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) # TODO(akshayka): Remove this special case once b/68017812 is # fixed. if in_graph_mode: cell = rnn_cell.LSTMCell( num_units, use_peepholes=True, initializer=initializer, num_proj=num_proj, state_is_tuple=False) with variable_scope.variable_scope("dynamic_scope"): outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs=concat_inputs, sequence_length=sequence_length, time_major=True, dtype=dtypes.float32) split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps) if in_graph_mode: feeds = {concat_inputs: input_values} # Initialize variables_lib.global_variables_initializer().run(feed_dict=feeds) # Generate gradients of sum of outputs w.r.t. inputs dynamic_gradients = gradients_impl.gradients( split_outputs_dynamic + [state_dynamic], [concat_inputs]) # Generate gradients of several individual outputs w.r.t. inputs dynamic_individual_gradients = nest.flatten([ gradients_impl.gradients(y, [concat_inputs]) for y in [ split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic ] ]) # Generate gradients of individual variables w.r.t. inputs trainable_variables = ops_lib.get_collection( ops_lib.GraphKeys.TRAINABLE_VARIABLES) assert len(trainable_variables) > 1, ( "Count of trainable variables: %d" % len(trainable_variables)) dynamic_individual_variable_gradients = nest.flatten([ gradients_impl.gradients(y, trainable_variables) for y in [ split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic ] ]) # Test forward pass values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds) (state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds) # Test gradients to inputs and variables w.r.t. outputs & final state dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds) dynamic_individual_grad_values = sess.run( dynamic_individual_gradients, feed_dict=feeds) dynamic_individual_var_grad_values = sess.run( dynamic_individual_variable_gradients, feed_dict=feeds) ######### Step 3: Comparisons if not in_graph_mode: values_static = outputs_static values_dynamic = split_outputs_dynamic state_value_static = state_static state_value_dynamic = state_dynamic self.assertEqual(len(values_static), len(values_dynamic)) for (value_static, value_dynamic) in zip(values_static, values_dynamic): self.assertAllEqual(value_static, value_dynamic) self.assertAllEqual(state_value_static, state_value_dynamic) if in_graph_mode: self.assertAllEqual(static_grad_values, dynamic_grad_values) self.assertEqual( len(static_individual_grad_values), len(dynamic_individual_grad_values)) self.assertEqual( len(static_individual_var_grad_values), len(dynamic_individual_var_grad_values)) for i, (a, b) in enumerate( zip(static_individual_grad_values, dynamic_individual_grad_values)): tf_logging.info("Comparing individual gradients iteration %d" % i) self.assertAllEqual(a, b) for i, (a, b) in enumerate( zip(static_individual_var_grad_values, dynamic_individual_var_grad_values)): tf_logging.info( "Comparing individual variable gradients iteration %d" % i) self.assertAllEqual(a, b) @test_util.run_in_graph_and_eager_modes def testDynamicEquivalentToStaticRNN(self): self._testDynamicEquivalentToStaticRNN(use_sequence_length=False) self._testDynamicEquivalentToStaticRNN(use_sequence_length=False) class BidirectionalRNNTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) sequence_length = array_ops.placeholder( dtypes.int64) if use_sequence_length else None cell_fw = rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) cell_bw = rnn_cell.LSTMCell( num_units, input_size, initializer=initializer, state_is_tuple=False) inputs = max_length * [ array_ops.placeholder( dtypes.float32, shape=(batch_size, input_size) if use_shape else (None, input_size)) ] outputs, state_fw, state_bw = rnn.static_bidirectional_rnn( cell_fw, cell_bw, inputs, dtype=dtypes.float32, sequence_length=sequence_length, scope=scope) self.assertEqual(len(outputs), len(inputs)) for out in outputs: self.assertEqual(out.get_shape().as_list(), [batch_size if use_shape else None, 2 * num_units]) input_value = np.random.randn(batch_size, input_size) outputs = array_ops.stack(outputs) return input_value, inputs, outputs, state_fw, state_bw, sequence_length def _testBidirectionalRNN(self, use_shape): with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createBidirectionalRNN(use_shape, True)) variables_lib.global_variables_initializer().run() # Run with pre-specified sequence length of 2, 3 out, s_fw, s_bw = sess.run( [outputs, state_fw, state_bw], feed_dict={ inputs[0]: input_value, sequence_length: [2, 3] }) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward output has to be the same, # but reversed in time. The format is output[time][batch][depth], and # due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # # First sequence in batch is length=2 # Check that the time=0 forward output is equal to time=1 backward output self.assertEqual(out[0][0][0], out[1][0][3]) self.assertEqual(out[0][0][1], out[1][0][4]) self.assertEqual(out[0][0][2], out[1][0][5]) # Check that the time=1 forward output is equal to time=0 backward output self.assertEqual(out[1][0][0], out[0][0][3]) self.assertEqual(out[1][0][1], out[0][0][4]) self.assertEqual(out[1][0][2], out[0][0][5]) # Second sequence in batch is length=3 # Check that the time=0 forward output is equal to time=2 backward output self.assertEqual(out[0][1][0], out[2][1][3]) self.assertEqual(out[0][1][1], out[2][1][4]) self.assertEqual(out[0][1][2], out[2][1][5]) # Check that the time=1 forward output is equal to time=1 backward output self.assertEqual(out[1][1][0], out[1][1][3]) self.assertEqual(out[1][1][1], out[1][1][4]) self.assertEqual(out[1][1][2], out[1][1][5]) # Check that the time=2 forward output is equal to time=0 backward output self.assertEqual(out[2][1][0], out[0][1][3]) self.assertEqual(out[2][1][1], out[0][1][4]) self.assertEqual(out[2][1][2], out[0][1][5]) # Via the reasoning above, the forward and backward final state should be # exactly the same self.assertAllClose(s_fw, s_bw) def _testBidirectionalRNNWithoutSequenceLength(self, use_shape): with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, _ = ( self._createBidirectionalRNN(use_shape, False)) variables_lib.global_variables_initializer().run() out, s_fw, s_bw = sess.run( [outputs, state_fw, state_bw], feed_dict={ inputs[0]: input_value }) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward output has to be the same, # but reversed in time. The format is output[time][batch][depth], and # due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # # Both sequences in batch are length=8. Check that the time=i # forward output is equal to time=8-1-i backward output for i in xrange(8): self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3]) self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4]) self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5]) for i in xrange(8): self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3]) self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4]) self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5]) # Via the reasoning above, the forward and backward final state should be # exactly the same self.assertAllClose(s_fw, s_bw) def testBidirectionalRNN(self): self._testBidirectionalRNN(use_shape=False) self._testBidirectionalRNN(use_shape=True) def testBidirectionalRNNWithoutSequenceLength(self): self._testBidirectionalRNNWithoutSequenceLength(use_shape=False) self._testBidirectionalRNNWithoutSequenceLength(use_shape=True) def _createBidirectionalDynamicRNN(self, use_shape, use_state_tuple, use_time_major, use_sequence_length, scope=None): num_units = 3 input_size = 5 batch_size = 2 max_length = 8 initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) sequence_length = ( array_ops.placeholder(dtypes.int64) if use_sequence_length else None) cell_fw = rnn_cell.LSTMCell( num_units, initializer=initializer, state_is_tuple=use_state_tuple) cell_bw = rnn_cell.LSTMCell( num_units, initializer=initializer, state_is_tuple=use_state_tuple) inputs = max_length * [ array_ops.placeholder( dtypes.float32, shape=(batch_size if use_shape else None, input_size)) ] inputs_c = array_ops.stack(inputs) if not use_time_major: inputs_c = array_ops.transpose(inputs_c, [1, 0, 2]) outputs, states = rnn.bidirectional_dynamic_rnn( cell_fw, cell_bw, inputs_c, sequence_length, dtype=dtypes.float32, time_major=use_time_major, scope=scope) outputs = array_ops.concat(outputs, 2) state_fw, state_bw = states outputs_shape = [None, max_length, 2 * num_units] if use_shape: outputs_shape[0] = batch_size if use_time_major: outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0] self.assertEqual(outputs.get_shape().as_list(), outputs_shape) input_value = np.random.randn(batch_size, input_size) return input_value, inputs, outputs, state_fw, state_bw, sequence_length def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple, use_time_major, use_sequence_length): with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createBidirectionalDynamicRNN( use_shape, use_state_tuple, use_time_major, use_sequence_length)) variables_lib.global_variables_initializer().run() # Run with pre-specified sequence length of 2, 3 feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {}) feed_dict.update({inputs[0]: input_value}) if use_state_tuple: out, c_fw, m_fw, c_bw, m_bw = sess.run( [outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]], feed_dict=feed_dict) s_fw = (c_fw, m_fw) s_bw = (c_bw, m_bw) else: feed_dict.update({inputs[0]: input_value}) out, s_fw, s_bw = sess.run( [outputs, state_fw, state_bw], feed_dict=feed_dict) # Since the forward and backward LSTM cells were initialized with the # same parameters, the forward and backward output has to be the same, # but reversed in time. The format is output[time][batch][depth], and # due to depth concatenation (as num_units=3 for both RNNs): # - forward output: out[][][depth] for 0 <= depth < 3 # - backward output: out[][][depth] for 4 <= depth < 6 # if not use_time_major: out = np.swapaxes(out, 0, 1) if use_sequence_length: # First sequence in batch is length=2 # Check that the t=0 forward output is equal to t=1 backward output self.assertEqual(out[0][0][0], out[1][0][3]) self.assertEqual(out[0][0][1], out[1][0][4]) self.assertEqual(out[0][0][2], out[1][0][5]) # Check that the t=1 forward output is equal to t=0 backward output self.assertEqual(out[1][0][0], out[0][0][3]) self.assertEqual(out[1][0][1], out[0][0][4]) self.assertEqual(out[1][0][2], out[0][0][5]) # Second sequence in batch is length=3 # Check that the t=0 forward output is equal to t=2 backward output self.assertEqual(out[0][1][0], out[2][1][3]) self.assertEqual(out[0][1][1], out[2][1][4]) self.assertEqual(out[0][1][2], out[2][1][5]) # Check that the t=1 forward output is equal to t=1 backward output self.assertEqual(out[1][1][0], out[1][1][3]) self.assertEqual(out[1][1][1], out[1][1][4]) self.assertEqual(out[1][1][2], out[1][1][5]) # Check that the t=2 forward output is equal to t=0 backward output self.assertEqual(out[2][1][0], out[0][1][3]) self.assertEqual(out[2][1][1], out[0][1][4]) self.assertEqual(out[2][1][2], out[0][1][5]) # Via the reasoning above, the forward and backward final state should # be exactly the same self.assertAllClose(s_fw, s_bw) else: # not use_sequence_length max_length = 8 # from createBidirectionalDynamicRNN for t in range(max_length): self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6]) self.assertAllClose(s_fw, s_bw) def testBidirectionalDynamicRNN(self): # Generate 2^5 option values # from [True, True, True, True, True] to [False, False, False, False, False] options = itertools.product([True, False], repeat=4) for option in options: self._testBidirectionalDynamicRNN( use_shape=option[0], use_state_tuple=option[1], use_time_major=option[2], use_sequence_length=option[3]) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): # REMARKS: factory(scope) is a function accepting a scope # as an argument, such scope can be None, a string # or a VariableScope instance. with self.test_session(use_gpu=True, graph=ops_lib.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) else: factory(prefix) # check that all the variables names starts # with the proper scope. variables_lib.global_variables_initializer() all_vars = variables_lib.global_variables() prefix = prefix or "bidirectional_rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("BiRNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testBidirectionalRNNScope(self): def factory(scope): return self._createBidirectionalRNN( use_shape=True, use_sequence_length=True, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) def testBidirectionalDynamicRNNScope(self): def get_factory(use_time_major): def factory(scope): return self._createBidirectionalDynamicRNN( use_shape=True, use_state_tuple=True, use_sequence_length=True, use_time_major=use_time_major, scope=scope) return factory self._testScope(get_factory(True), use_outer_scope=True) self._testScope(get_factory(True), use_outer_scope=False) self._testScope(get_factory(True), prefix=None, use_outer_scope=False) self._testScope(get_factory(False), use_outer_scope=True) self._testScope(get_factory(False), use_outer_scope=False) self._testScope(get_factory(False), prefix=None, use_outer_scope=False) class MultiDimensionalLSTMTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def testMultiDimensionalLSTMAllRNNContainers(self): feature_dims = (3, 4, 5) input_size = feature_dims batch_size = 2 max_length = 8 sequence_length = [4, 6] with self.test_session(graph=ops_lib.Graph()) as sess: inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None,) + input_size) ] inputs_using_dim = max_length * [ array_ops.placeholder( dtypes.float32, shape=(batch_size,) + input_size) ] inputs_c = array_ops.stack(inputs) # Create a cell for the whole test. This is fine because the cell has no # variables. cell = DummyMultiDimensionalLSTM(feature_dims) state_saver = TestStateSaver(batch_size, input_size) outputs_static, state_static = rnn.static_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length) outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs_c, dtype=dtypes.float32, time_major=True, sequence_length=sequence_length) outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn( cell, cell, inputs_using_dim, dtype=dtypes.float32, sequence_length=sequence_length) outputs_sav, state_sav = rnn.static_state_saving_rnn( cell, inputs_using_dim, sequence_length=sequence_length, state_saver=state_saver, state_name=("h", "c")) self.assertEqual(outputs_dynamic.get_shape().as_list(), inputs_c.get_shape().as_list()) for out, inp in zip(outputs_static, inputs): self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list()) for out, inp in zip(outputs_bid, inputs_using_dim): input_shape_list = inp.get_shape().as_list() # fwd and bwd activations are concatenated along the second dim. input_shape_list[1] *= 2 self.assertEqual(out.get_shape().as_list(), input_shape_list) variables_lib.global_variables_initializer().run() input_total_size = (batch_size,) + input_size input_value = np.random.randn(*input_total_size) outputs_static_v = sess.run( outputs_static, feed_dict={ inputs[0]: input_value }) outputs_dynamic_v = sess.run( outputs_dynamic, feed_dict={ inputs[0]: input_value }) outputs_bid_v = sess.run( outputs_bid, feed_dict={ inputs_using_dim[0]: input_value }) outputs_sav_v = sess.run( outputs_sav, feed_dict={ inputs_using_dim[0]: input_value }) self.assertAllEqual(outputs_static_v, outputs_dynamic_v) self.assertAllEqual(outputs_static_v, outputs_sav_v) outputs_static_array = np.array(outputs_static_v) outputs_static_array_double = np.concatenate( (outputs_static_array, outputs_static_array), axis=2) outputs_bid_array = np.array(outputs_bid_v) self.assertAllEqual(outputs_static_array_double, outputs_bid_array) state_static_v = sess.run( state_static, feed_dict={ inputs[0]: input_value }) state_dynamic_v = sess.run( state_dynamic, feed_dict={ inputs[0]: input_value }) state_bid_fw_v = sess.run( state_fw, feed_dict={ inputs_using_dim[0]: input_value }) state_bid_bw_v = sess.run( state_bw, feed_dict={ inputs_using_dim[0]: input_value }) state_sav_v = sess.run( state_sav, feed_dict={ inputs_using_dim[0]: input_value }) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v)) class NestedLSTMTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def testNestedIOLSTMAllRNNContainers(self): input_size = 5 batch_size = 2 state_size = 6 max_length = 8 sequence_length = [4, 6] with self.test_session(graph=ops_lib.Graph()) as sess: state_saver = TestStateSaver(batch_size, state_size) single_input = (array_ops.placeholder( dtypes.float32, shape=(None, input_size)), array_ops.placeholder( dtypes.float32, shape=(None, input_size))) inputs = max_length * [single_input] inputs_c = (array_ops.stack([input_[0] for input_ in inputs]), array_ops.stack([input_[1] for input_ in inputs])) single_input_using_dim = (array_ops.placeholder( dtypes.float32, shape=(batch_size, input_size)), array_ops.placeholder( dtypes.float32, shape=(batch_size, input_size))) inputs_using_dim = max_length * [single_input_using_dim] # Create a cell for the whole test. This is fine because the cell has no # variables. cell = NestedRNNCell() outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs_c, dtype=dtypes.float32, time_major=True, sequence_length=sequence_length) outputs_static, state_static = rnn.static_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length) outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn( cell, cell, inputs_using_dim, dtype=dtypes.float32, sequence_length=sequence_length) outputs_sav, state_sav = rnn.static_state_saving_rnn( cell, inputs_using_dim, sequence_length=sequence_length, state_saver=state_saver, state_name=("h", "c")) def _assert_same_shape(input1, input2, double=False): flat_input1 = nest.flatten(input1) flat_input2 = nest.flatten(input2) for inp1, inp2 in zip(flat_input1, flat_input2): input_shape = inp1.get_shape().as_list() if double: input_shape[1] *= 2 self.assertEqual(input_shape, inp2.get_shape().as_list()) _assert_same_shape(inputs_c, outputs_dynamic) _assert_same_shape(inputs, outputs_static) _assert_same_shape(inputs_using_dim, outputs_sav) _assert_same_shape(inputs_using_dim, outputs_bid, double=True) variables_lib.global_variables_initializer().run() input_total_size = (batch_size, input_size) input_value = (np.random.randn(*input_total_size), np.random.randn(*input_total_size)) outputs_dynamic_v = sess.run( outputs_dynamic, feed_dict={ single_input: input_value }) outputs_static_v = sess.run( outputs_static, feed_dict={ single_input: input_value }) outputs_sav_v = sess.run( outputs_sav, feed_dict={ single_input_using_dim: input_value }) outputs_bid_v = sess.run( outputs_bid, feed_dict={ single_input_using_dim: input_value }) self.assertAllEqual(outputs_static_v, np.transpose(outputs_dynamic_v, (1, 0, 2, 3))) self.assertAllEqual(outputs_static_v, outputs_sav_v) outputs_static_array = np.array(outputs_static_v) outputs_static_array_double = np.concatenate( (outputs_static_array, outputs_static_array), axis=3) outputs_bid_array = np.array(outputs_bid_v) self.assertAllEqual(outputs_static_array_double, outputs_bid_array) state_dynamic_v = sess.run( state_dynamic, feed_dict={ single_input: input_value }) state_static_v = sess.run( state_static, feed_dict={ single_input: input_value }) state_bid_fw_v = sess.run( state_fw, feed_dict={ single_input_using_dim: input_value }) state_bid_bw_v = sess.run( state_bw, feed_dict={ single_input_using_dim: input_value }) state_sav_v = sess.run( state_sav, feed_dict={ single_input_using_dim: input_value }) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v)) self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v)) class StateSaverRNNTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def _factory(self, scope, state_saver): num_units = state_saver.state_size // 2 batch_size = state_saver.batch_size input_size = 5 max_length = 8 initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( num_units, use_peepholes=False, initializer=initializer, state_is_tuple=False) inputs = max_length * [ array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size)) ] out, state = rnn.static_state_saving_rnn( cell, inputs, state_saver=state_saver, state_name="save_lstm", scope=scope) return out, state, state_saver def _testScope(self, prefix="prefix", use_outer_scope=True): num_units = 3 batch_size = 2 state_saver = TestStateSaver(batch_size, 2 * num_units) with self.test_session(use_gpu=True, graph=ops_lib.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: self._factory(scope=scope, state_saver=state_saver) else: self._factory(scope=prefix, state_saver=state_saver) variables_lib.global_variables_initializer() # check that all the variables names starts # with the proper scope. all_vars = variables_lib.global_variables() prefix = prefix or "rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("RNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testStateSaverRNNScope(self): self._testScope(use_outer_scope=True) self._testScope(use_outer_scope=False) self._testScope(prefix=None, use_outer_scope=False) def testStateSaverCallsSaveState(self): """Test that number of calls to state and save_state is equal. Test if the order of actual evaluating or skipping evaluation of out, state tensors, which are the output tensors from static_state_saving_rnn, have influence on number of calls to save_state and state methods of state_saver object (the number of calls should be same.) """ num_units = 3 batch_size = 2 state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units) out, state, state_saver = self._factory(scope=None, state_saver=state_saver) with self.test_session() as sess: sess.run(variables_lib.global_variables_initializer()) sess.run(variables_lib.local_variables_initializer()) _, _, num_state_calls, num_save_state_calls = sess.run([ out, state, state_saver.num_state_calls, state_saver.num_save_state_calls]) self.assertEqual(num_state_calls, num_save_state_calls) _, num_state_calls, num_save_state_calls = sess.run([ out, state_saver.num_state_calls, state_saver.num_save_state_calls]) self.assertEqual(num_state_calls, num_save_state_calls) _, num_state_calls, num_save_state_calls = sess.run([ state, state_saver.num_state_calls, state_saver.num_save_state_calls]) self.assertEqual(num_state_calls, num_save_state_calls) class GRUTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def testDynamic(self): time_steps = 8 num_units = 3 input_size = 5 batch_size = 2 input_values = np.random.randn(time_steps, batch_size, input_size) sequence_length = np.random.randint(0, time_steps, size=batch_size) with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) cell = rnn_cell.GRUCell(num_units=num_units) with variable_scope.variable_scope("dynamic_scope"): outputs_dynamic, state_dynamic = rnn.dynamic_rnn( cell, inputs=concat_inputs, sequence_length=sequence_length, time_major=True, dtype=dtypes.float32) feeds = {concat_inputs: input_values} # Initialize variables_lib.global_variables_initializer().run(feed_dict=feeds) sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): with self.test_session(use_gpu=True, graph=ops_lib.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) else: factory(prefix) variables_lib.global_variables_initializer() # check that all the variables names starts # with the proper scope. all_vars = variables_lib.global_variables() prefix = prefix or "rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("RNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testDynamicScope(self): time_steps = 8 num_units = 3 input_size = 5 batch_size = 2 sequence_length = np.random.randint(0, time_steps, size=batch_size) def factory(scope): concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) cell = rnn_cell.GRUCell(num_units=num_units) return rnn.dynamic_rnn( cell, inputs=concat_inputs, sequence_length=sequence_length, time_major=True, dtype=dtypes.float32, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) class RawRNNTest(test.TestCase): def setUp(self): self._seed = 23489 np.random.seed(self._seed) def _testRawRNN(self, max_time): with self.test_session(graph=ops_lib.Graph()) as sess: batch_size = 16 input_depth = 4 num_units = 3 inputs = array_ops.placeholder( shape=(max_time, batch_size, input_depth), dtype=dtypes.float32) sequence_length = array_ops.placeholder( shape=(batch_size,), dtype=dtypes.int32) inputs_ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) inputs_ta = inputs_ta.unstack(inputs) cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, unused_loop_state): emit_output = cell_output # == None for time == 0 if cell_output is None: # time == 0 next_state = cell.zero_state(batch_size, dtypes.float32) else: next_state = cell_state # copy state through elements_finished = (time_ >= sequence_length) finished = math_ops.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = control_flow_ops.cond( finished, lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, None) reuse_scope = variable_scope.get_variable_scope() outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope) outputs = outputs_ta.stack() reuse_scope.reuse_variables() outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn( cell, inputs, time_major=True, dtype=dtypes.float32, sequence_length=sequence_length, scope=reuse_scope) variables = variables_lib.trainable_variables() gradients = gradients_impl.gradients([outputs, final_state], [inputs] + variables) gradients_dynamic_rnn = gradients_impl.gradients( [outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables) variables_lib.global_variables_initializer().run() rand_input = np.random.randn(max_time, batch_size, input_depth) if max_time == 0: rand_seq_len = np.zeros(batch_size) else: rand_seq_len = np.random.randint(max_time, size=batch_size) # To ensure same output lengths for dynamic_rnn and raw_rnn rand_seq_len[0] = max_time (outputs_val, outputs_dynamic_rnn_val, final_state_val, final_state_dynamic_rnn_val) = sess.run( [outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn], feed_dict={ inputs: rand_input, sequence_length: rand_seq_len }) self.assertAllClose(outputs_dynamic_rnn_val, outputs_val) self.assertAllClose(final_state_dynamic_rnn_val, final_state_val) # NOTE: Because with 0 time steps, raw_rnn does not have shape # information about the input, it is impossible to perform # gradients comparisons as the gradients eval will fail. So # this case skips the gradients test. if max_time > 0: self.assertEqual(len(gradients), len(gradients_dynamic_rnn)) gradients_val = sess.run( gradients, feed_dict={ inputs: rand_input, sequence_length: rand_seq_len }) gradients_dynamic_rnn_val = sess.run( gradients_dynamic_rnn, feed_dict={ inputs: rand_input, sequence_length: rand_seq_len }) self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val)) input_gradients_val = gradients_val[0] input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0] self.assertAllClose(input_gradients_val, input_gradients_dynamic_rnn_val) for i in range(1, len(gradients_val)): self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i]) def testRawRNNZeroLength(self): # NOTE: Because with 0 time steps, raw_rnn does not have shape # information about the input, it is impossible to perform # gradients comparisons as the gradients eval will fail. So this # case skips the gradients test. self._testRawRNN(max_time=0) def testRawRNN(self): self._testRawRNN(max_time=10) def testLoopState(self): with self.test_session(graph=ops_lib.Graph()): max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) inputs_ta = inputs_ta.unstack(inputs) cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, loop_state): if cell_output is None: loop_state = constant_op.constant([0]) next_state = cell.zero_state(batch_size, dtypes.float32) else: loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1]) next_state = cell_state emit_output = cell_output # == None for time == 0 elements_finished = array_ops.tile([time_ >= max_time], [batch_size]) finished = math_ops.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = control_flow_ops.cond( finished, lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, loop_state) r = rnn.raw_rnn(cell, loop_fn) loop_state = r[-1] self.assertEqual([10], loop_state.eval()) def testLoopStateWithTensorArray(self): with self.test_session(graph=ops_lib.Graph()): max_time = 4 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) inputs_ta = inputs_ta.unstack(inputs) cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, loop_state): if cell_output is None: loop_state = tensor_array_ops.TensorArray( dynamic_size=True, size=0, dtype=dtypes.int32, clear_after_read=False) loop_state = loop_state.write(0, 1) next_state = cell.zero_state(batch_size, dtypes.float32) else: loop_state = loop_state.write(time_, loop_state.read(time_ - 1) + time_) next_state = cell_state emit_output = cell_output # == None for time == 0 elements_finished = array_ops.tile([time_ >= max_time], [batch_size]) finished = math_ops.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = control_flow_ops.cond( finished, lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, loop_state) r = rnn.raw_rnn(cell, loop_fn) loop_state = r[-1] loop_state = loop_state.stack() self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval()) def testEmitDifferentStructureThanCellOutput(self): with self.test_session(graph=ops_lib.Graph()) as sess: max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 inputs = np.random.randn(max_time, batch_size, input_depth) inputs_ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) inputs_ta = inputs_ta.unstack(inputs) # Verify emit shapes may be unknown by feeding a placeholder that # determines an emit shape. unknown_dim = array_ops.placeholder(dtype=dtypes.int32) cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, _): if cell_output is None: emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32), array_ops.zeros([unknown_dim], dtype=dtypes.int64)) next_state = cell.zero_state(batch_size, dtypes.float32) else: emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32), array_ops.ones( [batch_size, unknown_dim], dtype=dtypes.int64)) next_state = cell_state elements_finished = array_ops.tile([time_ >= max_time], [batch_size]) finished = math_ops.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = control_flow_ops.cond( finished, lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, None) r = rnn.raw_rnn(cell, loop_fn) output_ta = r[0] self.assertEqual(2, len(output_ta)) self.assertEqual([dtypes.int32, dtypes.int64], [ta.dtype for ta in output_ta]) output = [ta.stack() for ta in output_ta] output_vals = sess.run(output, feed_dict={unknown_dim: 1}) self.assertAllEqual( np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0]) self.assertAllEqual( np.ones((max_time, batch_size, 1), np.int64), output_vals[1]) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): with self.test_session(use_gpu=True, graph=ops_lib.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) else: factory(prefix) variables_lib.global_variables_initializer() # check that all the variables names starts # with the proper scope. all_vars = variables_lib.global_variables() prefix = prefix or "rnn" scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")] tf_logging.info("RNN with scope: %s (%s)" % (prefix, "scope" if use_outer_scope else "str")) for v in scope_vars: tf_logging.info(v.name) self.assertEqual(len(scope_vars), len(all_vars)) def testRawRNNScope(self): max_time = 10 batch_size = 16 input_depth = 4 num_units = 3 def factory(scope): inputs = array_ops.placeholder( shape=(max_time, batch_size, input_depth), dtype=dtypes.float32) sequence_length = array_ops.placeholder( shape=(batch_size,), dtype=dtypes.int32) inputs_ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=array_ops.shape(inputs)[0]) inputs_ta = inputs_ta.unstack(inputs) cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True) def loop_fn(time_, cell_output, cell_state, unused_loop_state): emit_output = cell_output # == None for time == 0 if cell_output is None: # time == 0 next_state = cell.zero_state(batch_size, dtypes.float32) else: next_state = cell_state elements_finished = (time_ >= sequence_length) finished = math_ops.reduce_all(elements_finished) # For the very final iteration, we must emit a dummy input next_input = control_flow_ops.cond( finished, lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32), lambda: inputs_ta.read(time_)) return (elements_finished, next_input, next_state, emit_output, None) return rnn.raw_rnn(cell, loop_fn, scope=scope) self._testScope(factory, use_outer_scope=True) self._testScope(factory, use_outer_scope=False) self._testScope(factory, prefix=None, use_outer_scope=False) class DeviceWrapperCell(rnn_cell.RNNCell): """Class to ensure cell calculation happens on a specific device.""" def __init__(self, cell, device): self._cell = cell self._device = device @property def output_size(self): return self._cell.output_size @property def state_size(self): return self._cell.state_size def __call__(self, input_, state, scope=None): if self._device is not None: with ops_lib.device(self._device): return self._cell(input_, state, scope=scope) else: return self._cell(input_, state, scope=scope) class TensorArrayOnCorrectDeviceTest(test.TestCase): def _execute_rnn_on(self, rnn_device=None, cell_device=None, input_device=None): batch_size = 3 time_steps = 7 input_size = 5 num_units = 10 cell = rnn_cell.LSTMCell(num_units, use_peepholes=True) gpu_cell = DeviceWrapperCell(cell, cell_device) inputs = np.random.randn(batch_size, time_steps, input_size).astype( np.float32) sequence_length = np.random.randint(0, time_steps, size=batch_size) if input_device is not None: with ops_lib.device(input_device): inputs = constant_op.constant(inputs) if rnn_device is not None: with ops_lib.device(rnn_device): outputs, _ = rnn.dynamic_rnn( gpu_cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32) else: outputs, _ = rnn.dynamic_rnn( gpu_cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32) with self.test_session(use_gpu=True) as sess: opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() variables_lib.global_variables_initializer().run() sess.run(outputs, options=opts, run_metadata=run_metadata) return run_metadata def _retrieve_cpu_gpu_stats(self, run_metadata): cpu_stats = None gpu_stats = None step_stats = run_metadata.step_stats for ds in step_stats.dev_stats: if "cpu:0" in ds.device[-5:].lower(): cpu_stats = ds.node_stats if "gpu:0" == ds.device[-5:].lower(): gpu_stats = ds.node_stats return cpu_stats, gpu_stats def testRNNOnCPUCellOnGPU(self): if not test.is_gpu_available(): return # Test requires access to a GPU gpu_dev = test.gpu_device_name() run_metadata = self._execute_rnn_on( rnn_device="/cpu:0", cell_device=gpu_dev) cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata) def _assert_in(op_str, in_stats, out_stats): self.assertTrue(any(op_str in s.node_name for s in in_stats)) self.assertFalse(any(op_str in s.node_name for s in out_stats)) # Writes happen at output of RNN cell _assert_in("TensorArrayWrite", gpu_stats, cpu_stats) # Gather happens on final TensorArray _assert_in("TensorArrayGather", gpu_stats, cpu_stats) # Reads happen at input to RNN cell _assert_in("TensorArrayRead", cpu_stats, gpu_stats) # Scatters happen to get initial input into TensorArray _assert_in("TensorArrayScatter", cpu_stats, gpu_stats) def testRNNOnCPUCellOnCPU(self): if not test.is_gpu_available(): return # Test requires access to a GPU gpu_dev = test.gpu_device_name() run_metadata = self._execute_rnn_on( rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev) cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata) def _assert_in(op_str, in_stats, out_stats): self.assertTrue(any(op_str in s.node_name for s in in_stats)) self.assertFalse(any(op_str in s.node_name for s in out_stats)) # All TensorArray operations happen on CPU _assert_in("TensorArray", cpu_stats, gpu_stats) def testInputOnGPUCellNotDeclared(self): if not test.is_gpu_available(): return # Test requires access to a GPU gpu_dev = test.gpu_device_name() run_metadata = self._execute_rnn_on(input_device=gpu_dev) cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata) def _assert_in(op_str, in_stats, out_stats): self.assertTrue(any(op_str in s.node_name for s in in_stats)) self.assertFalse(any(op_str in s.node_name for s in out_stats)) # Everything happens on GPU _assert_in("TensorArray", gpu_stats, cpu_stats) if __name__ == "__main__": test.main()
NL66278/odoo
refs/heads/8.0
addons/point_of_sale/controllers/main.py
43
# -*- coding: utf-8 -*- import logging import simplejson import os import openerp import time import random from openerp import http from openerp.http import request from openerp.addons.web.controllers.main import module_boot, login_redirect _logger = logging.getLogger(__name__) class PosController(http.Controller): @http.route('/pos/web', type='http', auth='user') def a(self, debug=False, **k): cr, uid, context, session = request.cr, request.uid, request.context, request.session if not session.uid: return login_redirect() PosSession = request.registry['pos.session'] pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context) PosSession.login(cr,uid,pos_session_ids,context=context) modules = simplejson.dumps(module_boot(request.db)) init = """ var wc = new s.web.WebClient(); wc.show_application = function(){ wc.action_manager.do_action("pos.ui"); }; wc.setElement($(document.body)); wc.start(); """ html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{ 'modules': modules, 'init': init, }) return html
hcsturix74/django
refs/heads/master
tests/utils_tests/test_os_utils.py
482
import os import unittest from django.core.exceptions import SuspiciousFileOperation from django.utils._os import safe_join class SafeJoinTests(unittest.TestCase): def test_base_path_ends_with_sep(self): drive, path = os.path.splitdrive(safe_join("/abc/", "abc")) self.assertEqual( path, "{0}abc{0}abc".format(os.path.sep) ) def test_root_path(self): drive, path = os.path.splitdrive(safe_join("/", "path")) self.assertEqual( path, "{}path".format(os.path.sep), ) drive, path = os.path.splitdrive(safe_join("/", "")) self.assertEqual( path, os.path.sep, ) def test_parent_path(self): with self.assertRaises(SuspiciousFileOperation): safe_join("/abc/", "../def")
Rustem/toptal-blog-celery-toy-ex
refs/heads/master
docs/conf.py
1
# Celery Uncovered documentation build configuration file, created by # sphinx-quickstart. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Celery Uncovered' copyright = """2017, xepa4ep""" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'celery_uncovereddoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'celery_uncovered.tex', 'Celery Uncovered Documentation', """xepa4ep""", 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'celery_uncovered', 'Celery Uncovered Documentation', ["""xepa4ep"""], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'celery_uncovered', 'Celery Uncovered Documentation', """xepa4ep""", 'Celery Uncovered', """A short description of the project.""", 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote'
asimshankar/tensorflow
refs/heads/master
tensorflow/python/eager/tensor_test.py
5
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for TensorFlow "Eager" Mode's Tensor class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import re import sys import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import io_ops def _create_tensor(value, device=None, dtype=None): ctx = context.context() if device is None: device = ctx.device_name if dtype is not None: dtype = dtype.as_datatype_enum try: return ops.EagerTensor( value, context=ctx._handle, device=device, dtype=dtype) except core._NotOkStatusException as e: # pylint: disable=protected-access raise core._status_to_exception(e.code, e.message) class TFETensorTest(test_util.TensorFlowTestCase): def testScalarTensor(self): t = _create_tensor(3, dtype=dtypes.int32) self.assertAllEqual(t, _create_tensor(np.array(3))) self.assertEqual(dtypes.int32, t.dtype) self.assertEqual(0, t.shape.ndims) self.assertAllEqual([], t.shape.as_list()) self.assertIn("tf.Tensor", str(t)) self.assertIn("tf.Tensor", repr(t)) def testBadConstructorArgs(self): ctx = context.context() handle = ctx._handle device = ctx.device_name # Missing context. with self.assertRaisesRegexp( TypeError, r"Required argument 'context' \(pos 2\) not found"): ops.EagerTensor(1, device=device) # Missing device. with self.assertRaisesRegexp( TypeError, r"Required argument 'device' \(pos 3\) not found"): ops.EagerTensor(1, context=handle) # Bad dtype type. with self.assertRaisesRegexp(TypeError, "Expecting a DataType value for dtype. Got"): ops.EagerTensor(1, context=handle, device=device, dtype="1") # Following errors happen when trying to copy to GPU. if not context.context().num_gpus(): self.skipTest("No GPUs found") with ops.device("/device:GPU:0"): device = ctx.device_name # Bad context. with self.assertRaisesRegexp( TypeError, "Expecting a PyCapsule encoded context handle. Got"): ops.EagerTensor(1.0, context=1, device=device) # Bad device. with self.assertRaisesRegexp( TypeError, "Error parsing device argument to CopyToDevice"): ops.EagerTensor(1.0, context=handle, device=1) def testNumpyValue(self): values = np.array([3.0]) t = _create_tensor(values) self.assertAllEqual(values, t) @test_util.assert_no_new_pyobjects_executing_eagerly def testNumpyDtypeSurvivesThroughTensorConversion(self): scalar_creators = [np.int32, np.int64, np.float32, np.float64] conversion_functions = [ops.convert_to_tensor, constant_op.constant] for scalar_creator in scalar_creators: for conversion_function in conversion_functions: np_val = scalar_creator(3) tensor_val = conversion_function(np_val) self.assertEqual(tensor_val.numpy().dtype, np_val.dtype) self.assertEqual(tensor_val.numpy(), np_val) def testNumpyValueWithCast(self): values = np.array([3.0], dtype=np.float32) t = _create_tensor(values, dtype=dtypes.float64) self.assertAllEqual(values, t) ctx = context.context() # Bad dtype value. with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"): ops.EagerTensor( values, context=ctx._handle, device=ctx.device_name, dtype=12345) def testNumpyOrderHandling(self): n = np.array([[1, 2], [3, 4]], order="F") t = _create_tensor(n) self.assertAllEqual([[1, 2], [3, 4]], t) def testNumpyArrayDtype(self): tensor = constant_op.constant([1.0, 2.0, 3.0]) numpy_tensor = np.asarray(tensor, dtype=np.int32) self.assertAllEqual(numpy_tensor, [1, 2, 3]) def testNdimsAgreesWithNumpy(self): numpy_tensor = np.asarray(1.0) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([1.0, 2.0, 3.0]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) def testLenAgreesWithNumpy(self): numpy_tensor = np.asarray(1.0) tensor = constant_op.constant(numpy_tensor) with self.assertRaises(TypeError): len(numpy_tensor) with self.assertRaisesRegexp( TypeError, r"Scalar tensor has no `len[(][)]`"): len(tensor) numpy_tensor = np.asarray([1.0, 2.0, 3.0]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(len(numpy_tensor), len(tensor)) numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(len(numpy_tensor), len(tensor)) def testCopy(self): t = constant_op.constant(1.0) tt = copy.copy(t) self.assertAllEqual(tt, 1.0) del tt tt = copy.deepcopy(t) self.assertAllEqual(tt, 1.0) del tt self.assertAllEqual(t, 1.0) def testConstantDtype(self): self.assertEqual( constant_op.constant(1, dtype=np.int64).dtype, dtypes.int64) def testTensorAndNumpyMatrix(self): expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32) actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]]) self.assertAllEqual(expected, actual) self.assertEqual(np.float32, actual.dtype) self.assertEqual(dtypes.float32, actual.dtype) self.assertAllEqual([2, 2], actual.shape.as_list()) def testFloatDowncast(self): # Unless explicitly specified, float64->float32 t = _create_tensor(3.0) self.assertEqual(dtypes.float32, t.dtype) t = _create_tensor(3.0, dtype=dtypes.float64) self.assertEqual(dtypes.float64, t.dtype) def testBool(self): self.assertFalse(bool(_create_tensor(False))) self.assertFalse(bool(_create_tensor([False]))) self.assertFalse(bool(_create_tensor([[False]]))) self.assertFalse(bool(_create_tensor([0]))) self.assertFalse(bool(_create_tensor([0.]))) self.assertTrue(bool(_create_tensor([1]))) self.assertTrue(bool(_create_tensor([1.]))) def testIntDowncast(self): t = _create_tensor(3) self.assertEqual(dtypes.int32, t.dtype) t = _create_tensor(3, dtype=dtypes.int64) self.assertEqual(dtypes.int64, t.dtype) t = _create_tensor(2**33) self.assertEqual(dtypes.int64, t.dtype) def testTensorCreationFailure(self): with self.assertRaises(ValueError): # Should fail because the each row of the Python object has a different # number of columns. self.assertEqual(None, _create_tensor([[1], [1, 2]])) def testMultiLineTensorStr(self): t = _create_tensor(np.eye(3)) tensor_str = str(t) self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str) self.assertIn(str(t), tensor_str) def testMultiLineTensorRepr(self): t = _create_tensor(np.eye(3)) tensor_repr = repr(t) self.assertTrue(tensor_repr.startswith("<")) self.assertTrue(tensor_repr.endswith(">")) self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" % (t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr) def testTensorStrReprObeyNumpyPrintOptions(self): orig_threshold = np.get_printoptions()["threshold"] orig_edgeitems = np.get_printoptions()["edgeitems"] np.set_printoptions(threshold=2, edgeitems=1) t = _create_tensor(np.arange(10, dtype=np.int32)) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t))) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t))) # Clean up: reset to previous printoptions. np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems) def testZeroDimTensorStr(self): t = _create_tensor(42) self.assertIn("42, shape=(), dtype=int32", str(t)) def testZeroDimTensorRepr(self): t = _create_tensor(42) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t)) def testZeroSizeTensorStr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertIn("[], shape=(0,), dtype=float32", str(t)) def testZeroSizeTensorRepr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(0,), dtype=float32, numpy=%r" % (t._id, t.numpy()), repr(t)) def testStringTensor(self): t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]]) t = _create_tensor(t_np_orig) t_np = t.numpy() self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig)) def testIterateOverTensor(self): l = [[1, 2], [3, 4]] t = _create_tensor(l) for list_element, tensor_element in zip(l, t): self.assertAllEqual(list_element, tensor_element.numpy()) def testStringTensorOnGPU(self): if not context.context().num_gpus(): self.skipTest("No GPUs found") with ops.device("/device:GPU:0"): with self.assertRaisesRegexp( RuntimeError, "Can't copy Tensor with type string to device"): _create_tensor("test string") def testInvalidUTF8ProducesReasonableError(self): if sys.version_info[0] < 3: self.skipTest("Test is only valid in python3.") with self.assertRaises(UnicodeDecodeError): io_ops.read_file(b"\xff") @test_util.run_in_graph_and_eager_modes def testConvertToTensorPreferredDtypeIsRespected(self): self.assertEqual( ops.convert_to_tensor(0.5, preferred_dtype=dtypes.int32).dtype, dtypes.float32) self.assertEqual( ops.convert_to_tensor(0.5, preferred_dtype=dtypes.float64).dtype, dtypes.float64) @test_util.run_in_graph_and_eager_modes def testCompatibility(self): integer_types = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] # Floats are not compatible with ints for t in integer_types: with self.assertRaises(TypeError): constant_op.constant(0.5, dtype=t) # Ints compatible with floats self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float16)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float32)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float64)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.bfloat16)), 5.0) # Ints and floats are compatible with complex types self.assertEqual( constant_op.constant([[1.0]], dtype=dtypes.complex128).dtype, dtypes.complex128) self.assertEqual( constant_op.constant([[1]], dtype=dtypes.complex128).dtype, dtypes.complex128) # Quantized types are not compatible with floats quantized_types = [dtypes.qint16, dtypes.qint32, dtypes.qint8, dtypes.quint16, dtypes.quint8] for t in quantized_types: with self.assertRaises(TypeError): constant_op.constant(0.5, dtype=t) # TODO(b/118402529): quantized types are broken in eager. @test_util.run_in_graph_and_eager_modes def testCConvertToTensor(self): with self.assertRaises(TypeError): _ = constant_op.constant(0) < 0.5 @test_util.run_in_graph_and_eager_modes def testConvertToTensorAllowsOverflow(self): _ = ops.convert_to_tensor(123456789, dtype=dtypes.uint8) def testEagerTensorError(self): with self.assertRaisesRegexp( TypeError, "Cannot convert provided value to EagerTensor. " "Provided value.*Requested dtype.*"): _ = ops.convert_to_tensor(1., dtype=dtypes.int32) class TFETensorUtilTest(test_util.TensorFlowTestCase): def testListOfThree(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32) t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 0) self.assertAllEqual(np.array([3, 2, 4]), r.numpy()) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 1) self.assertAllEqual(np.array([2, 3, 1]), r.numpy()) def testEmptyTensorList(self): a = pywrap_tensorflow.TFE_Py_TensorShapeSlice([], 0) self.assertTrue(isinstance(a, ops.EagerTensor)) self.assertEqual(0, a.numpy().size) def testTensorListContainsNonTensors(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 1 has type \"str\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, "abc"], 0) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 0 has type \"int\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([2, t1], 0) def testTensorListNotList(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"tensors argument must be a list or a tuple. Got.*EagerTensor"): pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2) def testNegativeSliceDim(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( ValueError, r"Slice dimension must be non-negative. Got -2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], -2) def testUnicode(self): self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf") def testFloatTensor(self): self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype) self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype) self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype) self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype) def testSliceDimOutOfRange(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([1, 2], dtype=dtypes.int32) t3 = _create_tensor(2, dtype=dtypes.int32) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(2\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], 2) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 1 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t3], 0) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 2 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2, t1, t3], 0) @test_util.assert_no_new_pyobjects_executing_eagerly def testTensorDir(self): t = array_ops.zeros(1) t.test_attr = "Test" instance_dir = dir(t) type_dir = dir(ops.EagerTensor) # Monkey patched attributes should show up in dir(t) self.assertIn("test_attr", instance_dir) instance_dir.remove("test_attr") self.assertEqual(instance_dir, type_dir) def testNonRectangularPackAsConstant(self): l = [array_ops.zeros((10, 1)).numpy(), array_ops.zeros(1).numpy()] with self.assertRaisesRegexp( ValueError, "non-rectangular Python sequence"): constant_op.constant(l) if __name__ == "__main__": test.main()
Flexget/Flexget
refs/heads/develop
flexget/tests/api_tests/test_server_api.py
2
import json import os from unittest.mock import patch import pytest from flexget import __version__ from flexget.api.app import __version__ as __api_version__ from flexget.api.app import base_message from flexget.api.core.server import ObjectsContainer as OC from flexget.manager import Manager from flexget.tests.conftest import MockManager from flexget.utils.tools import get_latest_flexget_version_number class TestServerAPI: config = """ tasks: test: rss: url: http://test/rss mock: - title: entry 1 """ def test_pid(self, api_client, schema_match): rsp = api_client.get('/server/pid/', headers={}) assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.pid_object, data) assert not errors assert data['pid'] == os.getpid() def test_reload(self, api_client, schema_match): with patch.object(MockManager, 'load_config') as mocked_load_config: payload = {'operation': 'reload'} rsp = api_client.json_post('/server/manage/', data=json.dumps(payload)) assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors assert mocked_load_config.called def test_shutdown(self, api_client, schema_match): with patch.object(MockManager, 'shutdown') as mocked_shutdown: payload = {'operation': 'shutdown'} rsp = api_client.json_post('/server/manage/', data=json.dumps(payload)) assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors assert mocked_shutdown.called def test_get_config(self, api_client, schema_match): rsp = api_client.get('/server/config/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match({'type': 'object'}, data) assert not errors assert data == { 'tasks': { 'test': { 'mock': [{'title': 'entry 1'}], 'rss': { 'url': 'http://test/rss', 'group_links': False, 'ascii': False, 'escape': False, 'silent': False, 'all_entries': True, }, } } } def test_get_raw_config(self, manager, api_client, schema_match): manager.config_path = os.path.join(os.path.dirname(__file__), 'raw_config.yml') rsp = api_client.get('/server/raw_config/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.raw_config_object, data) assert not errors assert ( data['raw_config'] == 'dGFza3M6CiAgdGVzdDoKICAgIHJzczoKICAgICAgdXJsOiBodHRwOi8vdGVzdC9yc3MKICAgIG1' 'vY2s6CiAgICAgIC0gdGl0bGU6IGVudHJ5IDE=' ) @pytest.mark.online def test_version(self, api_client, schema_match): latest = get_latest_flexget_version_number() rsp = api_client.get('/server/version/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.version_object, data) assert not errors assert data == { 'flexget_version': __version__, 'api_version': __api_version__, 'latest_version': latest, } def test_crash_logs_without_crash_log(self, api_client, schema_match): rsp = api_client.get('/server/crash_logs') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.crash_logs, data) assert not errors assert not data def test_crash_logs_with_crashes(self, api_client, schema_match, manager): manager.config_base = os.path.join(os.path.dirname(__file__)) rsp = api_client.get('/server/crash_logs') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.crash_logs, data) assert not errors assert len(data) == 2
MichaelNedzelsky/intellij-community
refs/heads/master
python/testData/resolve/multiFile/fromQualifiedPackageImportFile/FromQualifiedPackageImportFile.py
83
from mypackage.child import testfile # <ref>
YaningX/open-hackathon
refs/heads/master
open-hackathon-server/src/hackathon/hazure/service_adapter.py
6
# -*- coding: utf-8 -*- """ Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd.  All rights reserved. The MIT License (MIT) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __author__ = "rapidhere" __all__ = ["ServiceAdapter"] from hackathon import Component class ServiceAdapter(Component): """the abstract ServiceAdapter with proxy pattern this adapter delegate the method and properties to the inner proxy, so make the adpater has all functions that adaptee has """ def __init__(self, service): self.service = service def __getattr__(self, name): return getattr(self.service, name)
wallyworld/juju
refs/heads/develop
acceptancetests/repository/trusty/haproxy/hooks/charmhelpers/fetch/giturl.py
6
# Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. # # charm-helpers is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 as # published by the Free Software Foundation. # # charm-helpers is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. import os from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource ) from charmhelpers.core.host import mkdir import six if six.PY3: raise ImportError('GitPython does not support Python 3') try: from git import Repo except ImportError: from charmhelpers.fetch import apt_install apt_install("python-git") from git import Repo from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet if url_parts.scheme not in ('http', 'https', 'git'): return False else: return True def clone(self, source, dest, branch): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) repo = Repo.clone_from(source, dest) repo.git.checkout(branch) def install(self, source, branch="master", dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: dest_dir = os.path.join(dest, branch_name) else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir
staslev/beam
refs/heads/master
sdks/python/apache_beam/io/textio.py
5
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A source and a sink for reading from and writing to text files.""" from __future__ import absolute_import import logging from functools import partial from apache_beam.coders import coders from apache_beam.io import filebasedsink from apache_beam.io import filebasedsource from apache_beam.io import iobase from apache_beam.io.filebasedsource import ReadAllFiles from apache_beam.io.filesystem import CompressionTypes from apache_beam.io.iobase import Read from apache_beam.io.iobase import Write from apache_beam.transforms import PTransform from apache_beam.transforms.display import DisplayDataItem __all__ = ['ReadFromText', 'ReadAllFromText', 'WriteToText'] class _TextSource(filebasedsource.FileBasedSource): r"""A source for reading text files. Parses a text file as newline-delimited elements. Supports newline delimiters '\n' and '\r\n. This implementation only supports reading text encoded using UTF-8 or ASCII. """ DEFAULT_READ_BUFFER_SIZE = 8192 class ReadBuffer(object): # A buffer that gives the buffered data and next position in the # buffer that should be read. def __init__(self, data, position): self._data = data self._position = position @property def data(self): return self._data @data.setter def data(self, value): assert isinstance(value, bytes) self._data = value @property def position(self): return self._position @position.setter def position(self, value): assert isinstance(value, (int, long)) if value > len(self._data): raise ValueError('Cannot set position to %d since it\'s larger than ' 'size of data %d.', value, len(self._data)) self._position = value def __init__(self, file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, buffer_size=DEFAULT_READ_BUFFER_SIZE, validate=True, skip_header_lines=0): super(_TextSource, self).__init__(file_pattern, min_bundle_size, compression_type=compression_type, validate=validate) self._strip_trailing_newlines = strip_trailing_newlines self._compression_type = compression_type self._coder = coder self._buffer_size = buffer_size if skip_header_lines < 0: raise ValueError('Cannot skip negative number of header lines: %d', skip_header_lines) elif skip_header_lines > 10: logging.warning( 'Skipping %d header lines. Skipping large number of header ' 'lines might significantly slow down processing.') self._skip_header_lines = skip_header_lines def display_data(self): parent_dd = super(_TextSource, self).display_data() parent_dd['strip_newline'] = DisplayDataItem( self._strip_trailing_newlines, label='Strip Trailing New Lines') parent_dd['buffer_size'] = DisplayDataItem( self._buffer_size, label='Buffer Size') parent_dd['coder'] = DisplayDataItem( self._coder.__class__, label='Coder') return parent_dd def read_records(self, file_name, range_tracker): start_offset = range_tracker.start_position() read_buffer = _TextSource.ReadBuffer('', 0) next_record_start_position = -1 def split_points_unclaimed(stop_position): return (0 if stop_position <= next_record_start_position else iobase.RangeTracker.SPLIT_POINTS_UNKNOWN) range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed) with self.open_file(file_name) as file_to_read: position_after_skipping_header_lines = self._skip_lines( file_to_read, read_buffer, self._skip_header_lines) if self._skip_header_lines else 0 start_offset = max(start_offset, position_after_skipping_header_lines) if start_offset > position_after_skipping_header_lines: # Seeking to one position before the start index and ignoring the # current line. If start_position is at beginning if the line, that line # belongs to the current bundle, hence ignoring that is incorrect. # Seeking to one byte before prevents that. file_to_read.seek(start_offset - 1) read_buffer = _TextSource.ReadBuffer('', 0) sep_bounds = self._find_separator_bounds(file_to_read, read_buffer) if not sep_bounds: # Could not find a separator after (start_offset - 1). This means that # none of the records within the file belongs to the current source. return _, sep_end = sep_bounds read_buffer.data = read_buffer.data[sep_end:] next_record_start_position = start_offset - 1 + sep_end else: next_record_start_position = position_after_skipping_header_lines while range_tracker.try_claim(next_record_start_position): record, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer) # For compressed text files that use an unsplittable OffsetRangeTracker # with infinity as the end position, above 'try_claim()' invocation # would pass for an empty record at the end of file that is not # followed by a new line character. Since such a record is at the last # position of a file, it should not be a part of the considered range. # We do this check to ignore such records. if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition break # Record separator must be larger than zero bytes. assert num_bytes_to_next_record != 0 if num_bytes_to_next_record > 0: next_record_start_position += num_bytes_to_next_record yield self._coder.decode(record) if num_bytes_to_next_record < 0: break def _find_separator_bounds(self, file_to_read, read_buffer): # Determines the start and end positions within 'read_buffer.data' of the # next separator starting from position 'read_buffer.position'. # Currently supports following separators. # * '\n' # * '\r\n' # This method may increase the size of buffer but it will not decrease the # size of it. current_pos = read_buffer.position while True: if current_pos >= len(read_buffer.data): # Ensuring that there are enough bytes to determine if there is a '\n' # at current_pos. if not self._try_to_ensure_num_bytes_in_buffer( file_to_read, read_buffer, current_pos + 1): return # Using find() here is more efficient than a linear scan of the byte # array. next_lf = read_buffer.data.find('\n', current_pos) if next_lf >= 0: if next_lf > 0 and read_buffer.data[next_lf - 1] == '\r': # Found a '\r\n'. Accepting that as the next separator. return (next_lf - 1, next_lf + 1) else: # Found a '\n'. Accepting that as the next separator. return (next_lf, next_lf + 1) current_pos = len(read_buffer.data) def _try_to_ensure_num_bytes_in_buffer( self, file_to_read, read_buffer, num_bytes): # Tries to ensure that there are at least num_bytes bytes in the buffer. # Returns True if this can be fulfilled, returned False if this cannot be # fulfilled due to reaching EOF. while len(read_buffer.data) < num_bytes: read_data = file_to_read.read(self._buffer_size) if not read_data: return False read_buffer.data += read_data return True def _skip_lines(self, file_to_read, read_buffer, num_lines): """Skip num_lines from file_to_read, return num_lines+1 start position.""" if file_to_read.tell() > 0: file_to_read.seek(0) position = 0 for _ in range(num_lines): _, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer) if num_bytes_to_next_record < 0: # We reached end of file. It is OK to just break here # because subsequent _read_record will return same result. break position += num_bytes_to_next_record return position def _read_record(self, file_to_read, read_buffer): # Returns a tuple containing the current_record and number of bytes to the # next record starting from 'read_buffer.position'. If EOF is # reached, returns a tuple containing the current record and -1. if read_buffer.position > self._buffer_size: # read_buffer is too large. Truncating and adjusting it. read_buffer.data = read_buffer.data[read_buffer.position:] read_buffer.position = 0 record_start_position_in_buffer = read_buffer.position sep_bounds = self._find_separator_bounds(file_to_read, read_buffer) read_buffer.position = sep_bounds[1] if sep_bounds else len( read_buffer.data) if not sep_bounds: # Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for # the starting position of the next record. return (read_buffer.data[record_start_position_in_buffer:], -1) if self._strip_trailing_newlines: # Current record should not contain the separator. return (read_buffer.data[record_start_position_in_buffer:sep_bounds[0]], sep_bounds[1] - record_start_position_in_buffer) else: # Current record should contain the separator. return (read_buffer.data[record_start_position_in_buffer:sep_bounds[1]], sep_bounds[1] - record_start_position_in_buffer) class _TextSink(filebasedsink.FileBasedSink): """A sink to a GCS or local text file or files.""" def __init__(self, file_path_prefix, file_name_suffix='', append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None): """Initialize a _TextSink. Args: file_path_prefix: The file path to write to. The files written will begin with this prefix, followed by a shard identifier (see num_shards), and end in a common extension, if given by file_name_suffix. In most cases, only this argument is specified and num_shards, shard_name_template, and file_name_suffix use default values. file_name_suffix: Suffix for the files written. append_trailing_newlines: indicate whether this sink should write an additional newline char after writing each element. num_shards: The number of files (shards) used for output. If not set, the service will decide on the optimal number of shards. Constraining the number of shards is likely to reduce the performance of a pipeline. Setting this value is not recommended unless you require a specific number of output files. shard_name_template: A template string containing placeholders for the shard number and shard count. When constructing a filename for a particular shard number, the upper-case letters 'S' and 'N' are replaced with the 0-padded shard number and shard count respectively. This argument can be '' in which case it behaves as if num_shards was set to 1 and only one file will be generated. The default pattern used is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template. coder: Coder used to encode each line. compression_type: Used to handle compressed output files. Typical value is CompressionTypes.AUTO, in which case the final file path's extension (as determined by file_path_prefix, file_name_suffix, num_shards and shard_name_template) will be used to detect the compression. header: String to write at beginning of file as a header. If not None and append_trailing_newlines is set, '\n' will be added. Returns: A _TextSink object usable for writing. """ super(_TextSink, self).__init__( file_path_prefix, file_name_suffix=file_name_suffix, num_shards=num_shards, shard_name_template=shard_name_template, coder=coder, mime_type='text/plain', compression_type=compression_type) self._append_trailing_newlines = append_trailing_newlines self._header = header def open(self, temp_path): file_handle = super(_TextSink, self).open(temp_path) if self._header is not None: file_handle.write(self._header) if self._append_trailing_newlines: file_handle.write('\n') return file_handle def display_data(self): dd_parent = super(_TextSink, self).display_data() dd_parent['append_newline'] = DisplayDataItem( self._append_trailing_newlines, label='Append Trailing New Lines') return dd_parent def write_encoded_record(self, file_handle, encoded_value): """Writes a single encoded record.""" file_handle.write(encoded_value) if self._append_trailing_newlines: file_handle.write('\n') def _create_text_source( file_pattern=None, min_bundle_size=None, compression_type=None, strip_trailing_newlines=None, coder=None, skip_header_lines=None): return _TextSource( file_pattern=file_pattern, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coder, validate=False, skip_header_lines=skip_header_lines) class ReadAllFromText(PTransform): """A ``PTransform`` for reading a ``PCollection`` of text files. Reads a ``PCollection`` of text files or file patterns and and produces a ``PCollection`` of strings. Parses a text file as newline-delimited elements, by default assuming UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'. This implementation only supports reading text encoded using UTF-8 or ASCII. This does not support other encodings such as UTF-16 or UTF-32. """ DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB def __init__( self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), skip_header_lines=0, **kwargs): """Initialize the ``ReadAllFromText`` transform. Args: min_bundle_size: Minimum size of bundles that should be generated when splitting this source into bundles. See ``FileBasedSource`` for more details. desired_bundle_size: Desired size of bundles that should be generated when splitting this source into bundles. See ``FileBasedSource`` for more details. compression_type: Used to handle compressed input files. Typical value is ``CompressionTypes.AUTO``, in which case the underlying file_path's extension will be used to detect the compression. strip_trailing_newlines: Indicates whether this source should remove the newline char in each line it reads before decoding that line. validate: flag to verify that the files exist during the pipeline creation time. skip_header_lines: Number of header lines to skip. Same number is skipped from each source file. Must be 0 or higher. Large number of skipped lines might impact performance. coder: Coder used to decode each line. """ super(ReadAllFromText, self).__init__(**kwargs) source_from_file = partial( _create_text_source, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coder, skip_header_lines=skip_header_lines) self._desired_bundle_size = desired_bundle_size self._min_bundle_size = min_bundle_size self._compression_type = compression_type self._read_all_files = ReadAllFiles( True, compression_type, desired_bundle_size, min_bundle_size, source_from_file) def expand(self, pvalue): return pvalue | 'ReadAllFiles' >> self._read_all_files class ReadFromText(PTransform): r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text files. Parses a text file as newline-delimited elements, by default assuming ``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``. This implementation only supports reading text encoded using ``UTF-8`` or ``ASCII``. This does not support other encodings such as ``UTF-16`` or ``UTF-32``. """ def __init__( self, file_pattern=None, min_bundle_size=0, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), validate=True, skip_header_lines=0, **kwargs): """Initialize the :class:`ReadFromText` transform. Args: file_pattern (str): The file path to read from as a local file path or a GCS ``gs://`` path. The path can contain glob characters (``*``, ``?``, and ``[...]`` sets). min_bundle_size (int): Minimum size of bundles that should be generated when splitting this source into bundles. See :class:`~apache_beam.io.filebasedsource.FileBasedSource` for more details. compression_type (str): Used to handle compressed input files. Typical value is :attr:`CompressionTypes.AUTO <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the underlying file_path's extension will be used to detect the compression. strip_trailing_newlines (bool): Indicates whether this source should remove the newline char in each line it reads before decoding that line. validate (bool): flag to verify that the files exist during the pipeline creation time. skip_header_lines (int): Number of header lines to skip. Same number is skipped from each source file. Must be 0 or higher. Large number of skipped lines might impact performance. coder (~apache_beam.coders.coders.Coder): Coder used to decode each line. """ super(ReadFromText, self).__init__(**kwargs) self._source = _TextSource( file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder, validate=validate, skip_header_lines=skip_header_lines) def expand(self, pvalue): return pvalue.pipeline | Read(self._source) class WriteToText(PTransform): """A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to text files.""" def __init__( self, file_path_prefix, file_name_suffix='', append_trailing_newlines=True, num_shards=0, shard_name_template=None, coder=coders.ToStringCoder(), compression_type=CompressionTypes.AUTO, header=None): r"""Initialize a :class:`WriteToText` transform. Args: file_path_prefix (str): The file path to write to. The files written will begin with this prefix, followed by a shard identifier (see **num_shards**), and end in a common extension, if given by **file_name_suffix**. In most cases, only this argument is specified and **num_shards**, **shard_name_template**, and **file_name_suffix** use default values. file_name_suffix (str): Suffix for the files written. append_trailing_newlines (bool): indicate whether this sink should write an additional newline char after writing each element. num_shards (int): The number of files (shards) used for output. If not set, the service will decide on the optimal number of shards. Constraining the number of shards is likely to reduce the performance of a pipeline. Setting this value is not recommended unless you require a specific number of output files. shard_name_template (str): A template string containing placeholders for the shard number and shard count. Currently only ``''`` and ``'-SSSSS-of-NNNNN'`` are patterns accepted by the service. When constructing a filename for a particular shard number, the upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded shard number and shard count respectively. This argument can be ``''`` in which case it behaves as if num_shards was set to 1 and only one file will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``. coder (~apache_beam.coders.coders.Coder): Coder used to encode each line. compression_type (str): Used to handle compressed output files. Typical value is :class:`CompressionTypes.AUTO <apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the final file path's extension (as determined by **file_path_prefix**, **file_name_suffix**, **num_shards** and **shard_name_template**) will be used to detect the compression. header (str): String to write at beginning of file as a header. If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will be added. """ self._sink = _TextSink(file_path_prefix, file_name_suffix, append_trailing_newlines, num_shards, shard_name_template, coder, compression_type, header) def expand(self, pcoll): return pcoll | Write(self._sink)